From b9958b520f40fcde7971ea37b556370adca8c1f7 Mon Sep 17 00:00:00 2001 From: Kairo Araujo Date: Mon, 20 Jan 2025 15:32:48 +0100 Subject: [PATCH 1/2] Add Archivista Storage Backend --- config/100-deployment.yaml | 10 +- docs/config.md | 186 +- go.mod | 32 +- go.sum | 46 +- pkg/chains/storage/archivista/archivista.go | 217 + .../storage/archivista/archivista_test.go | 324 ++ pkg/chains/storage/archivista/cert.go | 120 + pkg/chains/storage/archivista/cert_test.go | 191 + pkg/chains/storage/archivista/patch_test.go | 82 + pkg/chains/storage/storage.go | 9 +- pkg/config/config.go | 77 +- vendor/cel.dev/expr/.bazelversion | 4 + vendor/cel.dev/expr/.gitignore | 3 + vendor/cel.dev/expr/BUILD.bazel | 4 + vendor/cel.dev/expr/README.md | 8 + vendor/cel.dev/expr/WORKSPACE | 27 + vendor/cel.dev/expr/cloudbuild.yaml | 7 + vendor/cel.dev/expr/regen_go_proto.sh | 8 + vendor/cloud.google.com/go/auth/CHANGES.md | 3 + vendor/cloud.google.com/go/auth/auth.go | 22 + .../go/auth/credentials/compute.go | 16 + .../go/auth/credentials/detect.go | 24 + .../go/auth/credentials/filetypes.go | 18 + .../go/auth/credentials/idtoken/cache.go | 23 + .../go/auth/credentials/idtoken/compute.go | 23 + .../go/auth/credentials/idtoken/file.go | 47 + .../go/auth/credentials/idtoken/idtoken.go | 6 + .../go/auth/credentials/idtoken/validate.go | 49 + .../auth/credentials/impersonate/idtoken.go | 112 + .../credentials/impersonate/impersonate.go | 57 + .../go/auth/credentials/impersonate/user.go | 32 + .../internal/externalaccount/aws_provider.go | 33 + .../externalaccount/externalaccount.go | 37 + .../internal/externalaccount/url_provider.go | 15 + .../externalaccountuser.go | 12 + .../go/auth/credentials/internal/gdch/gdch.go | 41 + .../internal/impersonate/impersonate.go | 18 + .../internal/stsexchange/sts_exchange.go | 18 + .../go/auth/credentials/selfsignedjwt.go | 28 + .../go/auth/grpctransport/directpath.go | 9 + .../go/auth/grpctransport/grpctransport.go | 33 + .../go/auth/httptransport/httptransport.go | 18 + .../go/auth/httptransport/trace.go | 93 + .../go/auth/httptransport/transport.go | 24 + .../go/auth/internal/compute/compute.go | 5 + .../go/auth/internal/internal.go | 43 + .../go/auth/internal/jwt/jwt.go | 8 + .../go/auth/internal/transport/cba.go | 11 + .../go/auth/internal/transport/s2a.go | 29 + .../go/auth/internal/transport/transport.go | 3 + .../go/auth/oauth2adapt/CHANGES.md | 3 + .../cloud.google.com/go/auth/threelegged.go | 19 + .../go/compute/metadata/CHANGES.md | 3 + .../go/compute/metadata/metadata.go | 41 + .../go/kms/apiv1/autokey_admin_client.go | 186 + .../go/kms/apiv1/autokey_client.go | 206 + .../go/kms/apiv1/auxiliary.go | 28 + vendor/cloud.google.com/go/kms/apiv1/doc.go | 50 + .../go/kms/apiv1/ekm_client.go | 262 ++ .../go/kms/apiv1/key_management_client.go | 652 +++ .../go/kms/apiv1/kmspb/autokey.pb.go | 170 + .../go/kms/apiv1/kmspb/autokey_admin.pb.go | 129 + .../go/kms/apiv1/kmspb/ekm_service.pb.go | 347 ++ .../go/kms/apiv1/kmspb/resources.pb.go | 291 ++ .../go/kms/apiv1/kmspb/service.pb.go | 1060 +++++ .../go/kms/internal/version.go | 4 + vendor/cloud.google.com/go/storage/CHANGES.md | 3 + vendor/cloud.google.com/go/storage/client.go | 18 + .../go/storage/experimental/experimental.go | 3 + .../go/storage/grpc_client.go | 754 ++++ .../go/storage/grpc_metrics.go | 209 + .../go/storage/http_client.go | 37 + .../go/storage/internal/apiv2/auxiliary.go | 4 + .../storage/internal/apiv2/auxiliary_go123.go | 4 + .../go/storage/internal/apiv2/doc.go | 12 + .../internal/apiv2/gapic_metadata.json | 6 + .../go/storage/internal/apiv2/helpers.go | 14 + .../storage/internal/apiv2/storage_client.go | 169 + .../internal/apiv2/storagepb/storage.pb.go | 3798 ++++++++++++++++- .../go/storage/internal/experimental.go | 3 + .../go/storage/internal/version.go | 4 + vendor/cloud.google.com/go/storage/invoke.go | 6 + vendor/cloud.google.com/go/storage/option.go | 13 + vendor/cloud.google.com/go/storage/reader.go | 19 + vendor/cloud.google.com/go/storage/storage.go | 147 + vendor/cloud.google.com/go/storage/writer.go | 6 + vendor/filippo.io/edwards25519/LICENSE | 27 + vendor/filippo.io/edwards25519/README.md | 14 + vendor/filippo.io/edwards25519/doc.go | 20 + .../filippo.io/edwards25519/edwards25519.go | 427 ++ vendor/filippo.io/edwards25519/extra.go | 349 ++ vendor/filippo.io/edwards25519/field/fe.go | 420 ++ .../filippo.io/edwards25519/field/fe_amd64.go | 16 + .../filippo.io/edwards25519/field/fe_amd64.s | 379 ++ .../edwards25519/field/fe_amd64_noasm.go | 12 + .../filippo.io/edwards25519/field/fe_arm64.go | 16 + .../filippo.io/edwards25519/field/fe_arm64.s | 42 + .../edwards25519/field/fe_arm64_noasm.go | 12 + .../filippo.io/edwards25519/field/fe_extra.go | 50 + .../edwards25519/field/fe_generic.go | 266 ++ vendor/filippo.io/edwards25519/scalar.go | 343 ++ vendor/filippo.io/edwards25519/scalar_fiat.go | 1147 +++++ vendor/filippo.io/edwards25519/scalarmult.go | 214 + vendor/filippo.io/edwards25519/tables.go | 129 + vendor/github.com/4meepo/tagalign/.gitignore | 3 + .../4meepo/tagalign/.goreleaser.yml | 8 + vendor/github.com/4meepo/tagalign/options.go | 10 + vendor/github.com/4meepo/tagalign/tagalign.go | 219 + .../Antonboom/nilnil/pkg/analyzer/analyzer.go | 4 + .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 3 + .../internal/resource/resource_identifier.go | 3 + .../sdk/azcore/internal/pollers/op/op.go | 25 + .../sdk/azcore/internal/shared/constants.go | 4 + .../sdk/azcore/runtime/pager.go | 3 + .../sdk/azcore/runtime/poller.go | 16 + .../sdk/azidentity/BREAKING_CHANGES.md | 3 + .../sdk/azidentity/CHANGELOG.md | 3 + .../azure-sdk-for-go/sdk/azidentity/README.md | 24 + .../sdk/azidentity/TOKEN_CACHING.MD | 14 + .../sdk/azidentity/TROUBLESHOOTING.md | 6 + .../sdk/azidentity/azidentity.go | 10 + .../sdk/azidentity/azure_cli_credential.go | 6 + .../azure_developer_cli_credential.go | 6 + .../azidentity/chained_token_credential.go | 7 + .../azure-sdk-for-go/sdk/azidentity/ci.yml | 22 + .../sdk/azidentity/confidential_client.go | 4 + .../azidentity/default_azure_credential.go | 17 + .../sdk/azidentity/device_code_credential.go | 5 + .../interactive_browser_credential.go | 5 + .../sdk/azidentity/managed_identity_client.go | 42 + .../sdk/azidentity/public_client.go | 12 + .../sdk/azidentity/test-resources-post.ps1 | 14 + .../sdk/azidentity/version.go | 4 + .../sdk/security/keyvault/azkeys/CHANGELOG.md | 3 + .../sdk/security/keyvault/azkeys/assets.json | 4 + .../sdk/security/keyvault/azkeys/ci.yml | 18 + .../security/keyvault/azkeys/custom_client.go | 3 + .../keyvault/azkeys/platform-matrix.json | 5 + .../sdk/security/keyvault/azkeys/version.go | 4 + .../security/keyvault/internal/CHANGELOG.md | 3 + .../keyvault/internal/challenge_policy.go | 54 + .../keyvault/internal/ci.securitykeyvault.yml | 28 + .../security/keyvault/internal/constants.go | 4 + .../apps/confidential/confidential.go | 13 + .../apps/internal/base/base.go | 12 + .../apps/internal/json/json.go | 58 + .../apps/internal/local/server.go | 7 + .../apps/internal/oauth/oauth.go | 11 + .../internal/oauth/ops/authority/authority.go | 93 + .../internal/oauth/ops/internal/comm/comm.go | 19 + .../apps/internal/oauth/resolvers.go | 28 + .../detectors/gcp/app_engine.go | 7 + .../detectors/gcp/detector.go | 28 + .../detectors/gcp/faas.go | 11 + .../detectors/gcp/gce.go | 40 + .../detectors/gcp/gke.go | 16 + .../Masterminds/semver/v3/version.go | 27 + .../go-crypto/internal/byteutil/byteutil.go | 12 + .../ProtonMail/go-crypto/ocb/ocb.go | 65 + .../go-crypto/openpgp/armor/armor.go | 82 + .../go-crypto/openpgp/armor/encode.go | 50 + .../go-crypto/openpgp/canonical_text.go | 9 + .../ProtonMail/go-crypto/openpgp/ecdh/ecdh.go | 10 + .../go-crypto/openpgp/errors/errors.go | 24 + .../openpgp/internal/algorithm/cipher.go | 20 + .../openpgp/internal/ecc/curve_info.go | 18 + .../go-crypto/openpgp/internal/ecc/ed25519.go | 7 + .../go-crypto/openpgp/internal/ecc/ed448.go | 7 + .../go-crypto/openpgp/key_generation.go | 86 + .../ProtonMail/go-crypto/openpgp/keys.go | 92 + .../go-crypto/openpgp/packet/aead_crypter.go | 126 + .../openpgp/packet/aead_encrypted.go | 17 + .../go-crypto/openpgp/packet/compressed.go | 17 + .../go-crypto/openpgp/packet/config.go | 25 + .../go-crypto/openpgp/packet/encrypted_key.go | 156 + .../go-crypto/openpgp/packet/literal.go | 6 + .../openpgp/packet/one_pass_signature.go | 51 + .../go-crypto/openpgp/packet/opaque.go | 8 + .../go-crypto/openpgp/packet/packet.go | 52 + .../go-crypto/openpgp/packet/private_key.go | 232 + .../go-crypto/openpgp/packet/public_key.go | 247 ++ .../go-crypto/openpgp/packet/reader.go | 37 + .../go-crypto/openpgp/packet/signature.go | 417 ++ .../openpgp/packet/symmetric_key_encrypted.go | 71 + .../openpgp/packet/symmetrically_encrypted.go | 3 + .../packet/symmetrically_encrypted_aead.go | 57 + .../packet/symmetrically_encrypted_mdc.go | 18 + .../go-crypto/openpgp/packet/userattribute.go | 8 + .../go-crypto/openpgp/packet/userid.go | 8 + .../ProtonMail/go-crypto/openpgp/read.go | 144 + .../go-crypto/openpgp/read_write_test_data.go | 21 + .../ProtonMail/go-crypto/openpgp/s2k/s2k.go | 31 + .../go-crypto/openpgp/s2k/s2k_cache.go | 4 + .../go-crypto/openpgp/s2k/s2k_config.go | 10 + .../ProtonMail/go-crypto/openpgp/write.go | 58 + .../alecthomas/go-check-sumtype/README.md | 3 + .../alecthomas/go-check-sumtype/check.go | 15 + .../alecthomas/go-check-sumtype/config.go | 3 + .../alecthomas/go-check-sumtype/def.go | 24 + .../aws/aws-sdk-go-v2/aws/config.go | 3 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 4 + .../aws/middleware/user_agent.go | 32 + .../aws-sdk-go-v2/aws/protocol/query/array.go | 38 + .../aws/protocol/query/object.go | 15 + .../aws-sdk-go-v2/aws/protocol/query/value.go | 3 + .../aws/retry/retryable_error.go | 4 + .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 3 + .../aws/aws-sdk-go-v2/config/config.go | 7 + .../aws/aws-sdk-go-v2/config/env_config.go | 179 + .../config/go_module_metadata.go | 4 + .../aws/aws-sdk-go-v2/config/load_options.go | 11 + .../aws/aws-sdk-go-v2/config/provider.go | 3 + .../aws/aws-sdk-go-v2/config/resolve.go | 3 + .../aws/aws-sdk-go-v2/config/shared_config.go | 15 + .../aws-sdk-go-v2/credentials/CHANGELOG.md | 3 + .../credentials/go_module_metadata.go | 4 + .../feature/ec2/imds/CHANGELOG.md | 3 + .../feature/ec2/imds/go_module_metadata.go | 4 + .../internal/configsources/CHANGELOG.md | 3 + .../configsources/go_module_metadata.go | 4 + .../endpoints/awsrulesfn/partitions.json | 6 + .../internal/endpoints/v2/CHANGELOG.md | 3 + .../endpoints/v2/go_module_metadata.go | 4 + .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 3 + .../internal/ini/go_module_metadata.go | 4 + .../internal/accept-encoding/CHANGELOG.md | 3 + .../accept-encoding/go_module_metadata.go | 4 + .../internal/presigned-url/CHANGELOG.md | 3 + .../presigned-url/go_module_metadata.go | 4 + .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 3 + .../aws-sdk-go-v2/service/kms/api_client.go | 4 + .../service/kms/go_module_metadata.go | 4 + .../kms/internal/endpoints/endpoints.go | 6 + .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 3 + .../aws-sdk-go-v2/service/sso/api_client.go | 4 + .../service/sso/go_module_metadata.go | 4 + .../service/ssooidc/CHANGELOG.md | 3 + .../service/ssooidc/api_client.go | 4 + .../service/ssooidc/api_op_CreateToken.go | 30 + .../ssooidc/api_op_CreateTokenWithIAM.go | 12 + .../service/ssooidc/api_op_RegisterClient.go | 10 + .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 6 + .../service/ssooidc/go_module_metadata.go | 4 + .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 3 + .../aws-sdk-go-v2/service/sts/api_client.go | 4 + .../service/sts/api_op_AssumeRole.go | 62 + .../service/sts/api_op_AssumeRoleWithSAML.go | 25 + .../sts/api_op_AssumeRoleWithWebIdentity.go | 63 + .../service/sts/api_op_GetFederationToken.go | 10 + .../service/sts/api_op_GetSessionToken.go | 11 + .../service/sts/deserializers.go | 6 + .../aws-sdk-go-v2/service/sts/generated.json | 3 + .../service/sts/go_module_metadata.go | 4 + .../sts/internal/endpoints/endpoints.go | 6 + .../aws-sdk-go-v2/service/sts/serializers.go | 6 + .../aws-sdk-go-v2/service/sts/types/errors.go | 12 + .../aws-sdk-go-v2/service/sts/validators.go | 9 + .../aws-sdk-go/aws/session/shared_config.go | 4 + .../github.com/aws/aws-sdk-go/aws/version.go | 4 + .../dynamodb/dynamodbattribute/converter.go | 27 + vendor/github.com/aws/smithy-go/CHANGELOG.md | 3 + .../github.com/aws/smithy-go/CONTRIBUTING.md | 3 + vendor/github.com/aws/smithy-go/Makefile | 3 + .../encoding/httpbinding/path_replace.go | 28 + .../aws/smithy-go/go_module_metadata.go | 4 + .../aws/smithy-go/transport/http/host.go | 4 + .../aws/smithy-go/transport/http/metrics.go | 51 + .../http/middleware_close_response_body.go | 15 + .../aws/smithy-go/transport/http/request.go | 12 + .../github.com/bombsimon/wsl/v4/.golangci.yml | 34 + .../github.com/bombsimon/wsl/v4/analyzer.go | 19 + vendor/github.com/bombsimon/wsl/v4/wsl.go | 24 + .../buildkite/agent/v3/api/artifacts.go | 61 + .../github.com/buildkite/agent/v3/api/auth.go | 37 + .../buildkite/agent/v3/api/client.go | 100 + .../github.com/buildkite/agent/v3/api/oidc.go | 17 + .../buildkite/agent/v3/api/steps.go | 3 + .../buildkite/agent/v3/version/VERSION | 4 + .../buildkite/agent/v3/version/version.go | 3 + .../buildkite/go-pipeline/README.md | 4 + .../go-pipeline/ordered/unmarshal.go | 60 + .../go-pipeline/step_command_matrix.go | 8 + .../buildkite/interpolate/interpolate.go | 31 + .../buildkite/interpolate/parser.go | 65 + vendor/github.com/buildkite/roko/retrier.go | 68 + .../butuzov/ireturn/analyzer/analyzer.go | 33 + .../ireturn/analyzer/internal/config/allow.go | 4 + .../ireturn/analyzer/internal/config/new.go | 4 + .../analyzer/internal/config/reject.go | 4 + .../ireturn/analyzer/internal/types/iface.go | 4 + .../github.com/butuzov/mirror/MIRROR_FUNCS.md | 204 + vendor/github.com/butuzov/mirror/Makefile | 33 + vendor/github.com/butuzov/mirror/analyzer.go | 6 + .../butuzov/mirror/checkers_maphash.go | 35 + .../mirror/internal/checker/checker.go | 12 + .../mirror/internal/checker/violation.go | 12 + vendor/github.com/butuzov/mirror/readme.md | 15 + .../ckaznocha/intrange/.golangci.yml | 31 + .../github.com/ckaznocha/intrange/intrange.go | 74 + .../stargz-snapshotter/estargz/build.go | 6 + .../stargz-snapshotter/estargz/testutil.go | 24 + .../github.com/coreos/go-oidc/v3/oidc/oidc.go | 39 + .../curioswitch/go-reassign/.golangci.yml | 15 + .../curioswitch/go-reassign/README.md | 7 + .../go-reassign/internal/analyzer/analyzer.go | 18 + .../cli/cli/config/credentials/file_store.go | 11 + .../client/client.go | 32 + .../credentials/error.go | 16 + .../github.com/edwarnicke/gitoid/.gitignore | 24 + vendor/github.com/edwarnicke/gitoid/LICENSE | 201 + vendor/github.com/edwarnicke/gitoid/README.md | 111 + vendor/github.com/edwarnicke/gitoid/gitoid.go | 215 + .../github.com/edwarnicke/gitoid/options.go | 56 + .../envoy/config/cluster/v3/cluster.pb.go | 743 ++++ .../config/cluster/v3/cluster.pb.validate.go | 3 + .../config/cluster/v3/cluster_vtproto.pb.go | 6 + .../envoy/config/core/v3/protocol.pb.go | 434 ++ .../config/core/v3/protocol.pb.validate.go | 6 + .../config/core/v3/protocol_vtproto.pb.go | 12 + .../config/listener/v3/quic_config.pb.go | 46 + .../listener/v3/quic_config.pb.validate.go | 3 + .../listener/v3/quic_config_vtproto.pb.go | 6 + .../config/route/v3/route_components.pb.go | 1429 +++++++ .../v3/http_connection_manager.pb.go | 17 + .../v3/client_side_weighted_round_robin.pb.go | 42 + ...nt_side_weighted_round_robin_vtproto.pb.go | 6 + .../transport_sockets/tls/v3/common.pb.go | 154 + .../tls/v3/common.pb.validate.go | 4 + .../go-jose/v3/cryptosigner/cryptosigner.go | 147 + vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go | 18 + .../golangci/gofmt/gofmt/golangci.go | 6 + .../golangci-lint/cmd/golangci-lint/main.go | 17 + .../pkg/commands/config_verify.go | 34 + .../golangci-lint/pkg/commands/flagsets.go | 48 + .../golangci-lint/pkg/commands/help.go | 45 + .../golangci-lint/pkg/commands/run.go | 10 + .../golangci-lint/pkg/config/config.go | 25 + .../golangci-lint/pkg/config/issues.go | 9 + .../pkg/config/linters_settings.go | 77 + .../golangci-lint/pkg/config/loader.go | 67 + .../golangci-lint/pkg/config/output.go | 7 + .../golangci/golangci-lint/pkg/config/run.go | 7 + .../golangci-lint/pkg/fsutils/fsutils.go | 4 + .../golangci-lint/pkg/goanalysis/issue.go | 4 + .../golangci-lint/pkg/goanalysis/runner.go | 62 + .../pkg/goanalysis/runner_action.go | 26 + .../pkg/goanalysis/runner_action_cache.go | 44 + .../pkg/goanalysis/runner_base.go | 370 ++ .../pkg/goanalysis/runner_loadingpackage.go | 27 + .../golangci-lint/pkg/goanalysis/runners.go | 14 + .../pkg/goanalysis/runners_cache.go | 8 + .../pkg/golinters/asasalint/asasalint.go | 9 + .../pkg/golinters/bidichk/bidichk.go | 43 + .../pkg/golinters/bodyclose/bodyclose.go | 4 + .../pkg/golinters/cyclop/cyclop.go | 4 + .../pkg/golinters/dogsled/dogsled.go | 102 + .../golangci-lint/pkg/golinters/dupl/dupl.go | 6 + .../pkg/golinters/dupword/dupword.go | 15 + .../pkg/golinters/errcheck/errcheck.go | 10 + .../pkg/golinters/errchkjson/errchkjson.go | 18 + .../pkg/golinters/errorlint/errorlint.go | 27 + .../pkg/golinters/forbidigo/forbidigo.go | 72 + .../pkg/golinters/funlen/funlen.go | 72 + .../golangci-lint/pkg/golinters/gci/gci.go | 241 ++ .../golinters/ginkgolinter/ginkgolinter.go | 15 + .../gochecknoinits/gochecknoinits.go | 67 + .../gochecksumtype/gochecksumtype.go | 6 + .../pkg/golinters/gocritic/gocritic.go | 99 + .../pkg/golinters/godot/godot.go | 77 + .../pkg/golinters/godox/godox.go | 59 + .../pkg/golinters/gofmt/gofmt.go | 75 + .../pkg/golinters/gofumpt/gofumpt.go | 84 + .../pkg/golinters/goheader/goheader.go | 88 + .../pkg/golinters/goimports/goimports.go | 70 + .../gomoddirectives/gomoddirectives.go | 13 + .../pkg/golinters/gomodguard/gomodguard.go | 4 + .../pkg/golinters/gosec/gosec.go | 4 + .../golinters/gosmopolitan/gosmopolitan.go | 17 + .../pkg/golinters/govet/govet.go | 22 + .../pkg/golinters/grouper/grouper.go | 10 + .../pkg/golinters/importas/importas.go | 5 + .../pkg/golinters/internal/diff.go | 209 + .../pkg/golinters/internal/util.go | 22 + .../golangci-lint/pkg/golinters/lll/lll.go | 110 + .../pkg/golinters/loggercheck/loggercheck.go | 3 + .../pkg/golinters/maintidx/maintidx.go | 17 + .../pkg/golinters/makezero/makezero.go | 59 + .../pkg/golinters/mirror/mirror.go | 68 + .../pkg/golinters/misspell/misspell.go | 96 + .../pkg/golinters/musttag/musttag.go | 8 + .../pkg/golinters/nestif/nestif.go | 65 + .../pkg/golinters/nilnil/nilnil.go | 14 + .../nolintlint/internal/nolintlint.go | 227 + .../pkg/golinters/nolintlint/nolintlint.go | 84 + .../pkg/golinters/prealloc/prealloc.go | 49 + .../pkg/golinters/protogetter/protogetter.go | 62 + .../pkg/golinters/recvcheck/recvcheck.go | 8 + .../pkg/golinters/revive/revive.go | 24 + .../pkg/golinters/tagalign/tagalign.go | 57 + .../pkg/golinters/tagliatelle/tagliatelle.go | 15 + .../pkg/golinters/testpackage/testpackage.go | 16 + .../pkg/golinters/thelper/thelper.go | 20 + .../pkg/golinters/unparam/unparam.go | 55 + .../golinters/usestdlibvars/usestdlibvars.go | 25 + .../pkg/golinters/whitespace/whitespace.go | 90 + .../pkg/golinters/wrapcheck/wrapcheck.go | 3 + .../golangci-lint/pkg/lint/linter/config.go | 9 + .../pkg/lint/lintersdb/builder_linter.go | 132 + .../golangci/golangci-lint/pkg/lint/runner.go | 10 + .../golangci-lint/pkg/printers/codeclimate.go | 4 + .../golangci-lint/pkg/printers/teamcity.go | 14 + .../golangci-lint/pkg/result/issue.go | 34 + .../pkg/result/processors/fixer.go | 244 ++ .../pkg/result/processors/max_from_linter.go | 4 + .../pkg/result/processors/max_same_issues.go | 4 + .../pkg/result/processors/severity.go | 10 + .../pkg/result/processors/uniq_by_line.go | 8 + vendor/github.com/golangci/modinfo/.gitignore | 1 + .../github.com/golangci/modinfo/.golangci.yml | 157 + vendor/github.com/golangci/modinfo/LICENSE | 674 +++ vendor/github.com/golangci/modinfo/Makefile | 12 + vendor/github.com/golangci/modinfo/module.go | 157 + vendor/github.com/golangci/modinfo/readme.md | 73 + .../github.com/google/cel-go/cel/BUILD.bazel | 7 + vendor/github.com/google/cel-go/cel/decls.go | 25 + vendor/github.com/google/cel-go/cel/env.go | 85 + .../github.com/google/cel-go/cel/inlining.go | 4 + vendor/github.com/google/cel-go/cel/io.go | 126 + .../github.com/google/cel-go/cel/library.go | 72 + .../github.com/google/cel-go/cel/optimizer.go | 35 + .../github.com/google/cel-go/cel/options.go | 33 + .../github.com/google/cel-go/cel/program.go | 23 + .../google/cel-go/checker/BUILD.bazel | 4 + .../google/cel-go/checker/checker.go | 28 + .../github.com/google/cel-go/checker/cost.go | 336 ++ .../google/cel-go/checker/standard.go | 35 + .../google/cel-go/common/BUILD.bazel | 4 + .../google/cel-go/common/ast/BUILD.bazel | 15 + .../google/cel-go/common/ast/ast.go | 34 + .../google/cel-go/common/ast/conversion.go | 41 + .../google/cel-go/common/ast/expr.go | 18 + .../google/cel-go/common/ast/factory.go | 40 + .../google/cel-go/common/ast/navigable.go | 3 + .../cel-go/common/containers/container.go | 9 + .../google/cel-go/common/debug/debug.go | 7 + .../google/cel-go/common/decls/decls.go | 31 + .../github.com/google/cel-go/common/error.go | 18 + .../github.com/google/cel-go/common/errors.go | 9 + .../google/cel-go/common/runes/buffer.go | 48 + .../github.com/google/cel-go/common/source.go | 26 + .../google/cel-go/common/stdlib/BUILD.bazel | 8 + .../google/cel-go/common/stdlib/standard.go | 60 + .../google/cel-go/common/types/BUILD.bazel | 6 + .../google/cel-go/common/types/bytes.go | 4 + .../google/cel-go/common/types/err.go | 3 + .../google/cel-go/common/types/list.go | 17 + .../google/cel-go/common/types/map.go | 30 + .../google/cel-go/common/types/null.go | 8 + .../google/cel-go/common/types/object.go | 4 + .../google/cel-go/common/types/pb/type.go | 19 + .../google/cel-go/common/types/provider.go | 3 + .../cel-go/common/types/traits/iterator.go | 3 + .../cel-go/common/types/traits/lister.go | 3 + .../cel-go/common/types/traits/mapper.go | 3 + .../cel-go/common/types/traits/traits.go | 5 + .../google/cel-go/common/types/types.go | 86 + .../google/cel-go/interpreter/activation.go | 41 + .../cel-go/interpreter/attribute_patterns.go | 25 + .../google/cel-go/interpreter/attributes.go | 181 + .../cel-go/interpreter/interpretable.go | 91 + .../google/cel-go/interpreter/planner.go | 27 + .../google/cel-go/interpreter/prune.go | 33 + .../google/cel-go/interpreter/runtimecost.go | 39 + .../github.com/google/cel-go/parser/errors.go | 13 + .../google/cel-go/parser/gen/BUILD.bazel | 4 + .../google/cel-go/parser/gen/CEL.g4 | 20 +- .../google/cel-go/parser/gen/CEL.interp | 15 +- .../google/cel-go/parser/gen/CEL.tokens | 3 + .../google/cel-go/parser/gen/CELLexer.interp | 15 +- .../google/cel-go/parser/gen/CELLexer.tokens | 3 + .../cel-go/parser/gen/cel_base_listener.go | 15 + .../cel-go/parser/gen/cel_base_visitor.go | 15 + .../google/cel-go/parser/gen/cel_lexer.go | 328 ++ .../google/cel-go/parser/gen/cel_listener.go | 24 + .../google/cel-go/parser/gen/cel_parser.go | 3066 ++++++++++++- .../google/cel-go/parser/gen/cel_visitor.go | 21 + .../github.com/google/cel-go/parser/helper.go | 36 + .../github.com/google/cel-go/parser/macro.go | 61 + .../google/cel-go/parser/options.go | 6 + .../github.com/google/cel-go/parser/parser.go | 70 + .../google/cel-go/parser/unescape.go | 69 + .../google/cel-go/parser/unparser.go | 26 + .../certificate-transparency-go/AUTHORS | 7 + .../certificate-transparency-go/CHANGELOG.md | 3 + .../certificate-transparency-go/CONTRIBUTORS | 4 + .../certificate-transparency-go/README.md | 4 + .../internal/redact/redact.go | 12 + .../pkg/authn/keychain.go | 8 + .../go-containerregistry/pkg/name/ref.go | 4 + .../pkg/v1/mutate/mutate.go | 14 + .../pkg/v1/remote/referrers.go | 4 + .../pkg/v1/remote/transport/bearer.go | 38 + .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 156 + .../google/s2a-go/internal/v2/s2av2.go | 21 + .../v2/tlsconfigstore/tlsconfigstore.go | 4 + vendor/github.com/google/s2a-go/s2a.go | 19 + .../github.com/google/s2a-go/s2a_options.go | 63 + .../google/s2a-go/stream/s2a_stream.go | 6 + .../gax-go/v2/.release-please-manifest.json | 4 + .../googleapis/gax-go/v2/CHANGES.md | 3 + .../googleapis/gax-go/v2/internal/version.go | 4 + .../gax-go/v2/internallog/internallog.go | 4 + .../grpc-gateway/v2/runtime/convert.go | 12 + .../grpc-gateway/v2/runtime/errors.go | 3 + .../grpc-gateway/v2/runtime/fieldmask.go | 4 + .../grpc-gateway/v2/runtime/handler.go | 8 + .../grpc-gateway/v2/runtime/marshaler.go | 3 + .../v2/runtime/marshaler_registry.go | 5 + .../grpc-gateway/v2/runtime/proto2_convert.go | 8 + .../grpc-gateway/v2/runtime/query.go | 8 + .../grpc-gateway/v2/utilities/pattern.go | 4 + .../v2/utilities/string_array_flag.go | 4 + vendor/github.com/in-toto/archivista/LICENSE | 201 + .../in-toto/archivista/pkg/api/client.go | 126 + .../in-toto/archivista/pkg/api/download.go | 128 + .../in-toto/archivista/pkg/api/graphql.go | 174 + .../in-toto/archivista/pkg/api/structs.go | 90 + .../in-toto/archivista/pkg/api/upload.go | 115 + .../archivista/pkg/http-client/client.go | 216 + .../predicates/provenance/v1/provenance.pb.go | 167 +- .../attestation/go/v1/resource_descriptor.go | 77 +- .../go/v1/resource_descriptor.pb.go | 49 +- .../in-toto/attestation/go/v1/statement.pb.go | 35 +- vendor/github.com/in-toto/go-witness/LICENSE | 201 + .../go-witness/cryptoutil/digestset.go | 292 ++ .../in-toto/go-witness/cryptoutil/dirhash.go | 61 + .../in-toto/go-witness/cryptoutil/ecdsa.go | 85 + .../in-toto/go-witness/cryptoutil/ed25519.go | 83 + .../in-toto/go-witness/cryptoutil/gitoid.go | 85 + .../in-toto/go-witness/cryptoutil/rsa.go | 89 + .../in-toto/go-witness/cryptoutil/signer.go | 121 + .../in-toto/go-witness/cryptoutil/util.go | 201 + .../in-toto/go-witness/cryptoutil/verifier.go | 99 + .../in-toto/go-witness/cryptoutil/x509.go | 173 + .../in-toto/go-witness/dsse/dsse.go | 96 + .../in-toto/go-witness/dsse/sign.go | 115 + .../in-toto/go-witness/dsse/verify.go | 201 + .../github.com/in-toto/go-witness/log/log.go | 94 + .../in-toto/go-witness/log/silent.go | 30 + .../in-toto/go-witness/timestamp/fake.go | 43 + .../in-toto/go-witness/timestamp/timestamp.go | 29 + .../in-toto/go-witness/timestamp/tsp.go | 176 + .../github.com/jjti/go-spancheck/.gitignore | 5 +- .../jjti/go-spancheck/.golangci.yml | 13 + vendor/github.com/jjti/go-spancheck/go.work | 4 + .../github.com/jjti/go-spancheck/go.work.sum | 5 + .../github.com/jjti/go-spancheck/spancheck.go | 20 + vendor/github.com/julz/importas/analyzer.go | 12 + vendor/github.com/julz/importas/config.go | 23 + vendor/github.com/julz/importas/flags.go | 23 + .../ldez/gomoddirectives/.golangci.yml | 53 + .../github.com/ldez/gomoddirectives/LICENSE | 4 + .../ldez/gomoddirectives/gomoddirectives.go | 69 + .../github.com/ldez/gomoddirectives/module.go | 42 + .../github.com/ldez/gomoddirectives/readme.md | 17 + .../github.com/ldez/tagliatelle/.golangci.yml | 65 + vendor/github.com/ldez/tagliatelle/readme.md | 48 + .../ldez/tagliatelle/tagliatelle.go | 121 + .../magiconair/properties/CHANGELOG.md | 205 + .../magiconair/properties/README.md | 39 + .../magiconair/properties/decode.go | 9 + .../github.com/magiconair/properties/load.go | 21 + .../mitchellh/mapstructure/CHANGELOG.md | 3 + .../mitchellh/mapstructure/decode_hooks.go | 4 + .../mitchellh/mapstructure/mapstructure.go | 7 + .../nunnatsa/ginkgolinter/README.md | 58 + .../nunnatsa/ginkgolinter/analyzer.go | 18 + .../github.com/nunnatsa/ginkgolinter/doc.go | 4 + .../internal/expression/actual/actual.go | 14 + .../internal/expression/actual/actualarg.go | 12 + .../internal/expression/expression.go | 32 + .../internal/expression/matcher/matcher.go | 9 + .../internal/expression/value/value.go | 3 + .../internal/ginkgohandler/handling.go | 8 + .../internal/gomegahandler/dothandler.go | 33 + .../internal/gomegahandler/handler.go | 11 + .../internal/gomegahandler/namedhandler.go | 36 + .../internal/rules/asyncfunccallrule.go | 8 + .../internal/rules/asynctimeintervalsrule.go | 4 + .../internal/rules/equaldifferenttypesrule.go | 4 + .../internal/rules/equalnilrule.go | 4 + .../ginkgolinter/internal/rules/havelen0.go | 4 + .../internal/rules/haveoccurredrule.go | 4 + .../internal/rules/nilcomparerule.go | 6 + .../internal/rules/succeedrule.go | 4 + .../nunnatsa/ginkgolinter/types/boolean.go | 32 + .../nunnatsa/ginkgolinter/types/config.go | 19 + .../prometheus/common/expfmt/decode.go | 22 + .../prometheus/common/expfmt/encode.go | 45 + .../prometheus/common/expfmt/expfmt.go | 47 + .../common/expfmt/openmetrics_create.go | 17 + .../prometheus/common/expfmt/text_create.go | 8 + .../prometheus/common/expfmt/text_parse.go | 97 + .../prometheus/common/model/alert.go | 14 + .../prometheus/common/model/labels.go | 18 + .../common/model/labelset_string.go | 5 + .../common/model/labelset_string_go120.go | 39 + .../prometheus/common/model/metric.go | 98 + .../prometheus/common/model/silence.go | 30 + .../prometheus/common/model/value_float.go | 7 + .../common/model/value_histogram.go | 15 + .../github.com/raeperd/recvcheck/.gitignore | 5 + vendor/github.com/raeperd/recvcheck/Makefile | 12 + vendor/github.com/raeperd/recvcheck/README.md | 11 + .../github.com/raeperd/recvcheck/analyzer.go | 51 + .../santhosh-tekuri/jsonschema/v5/.gitignore | 4 + .../santhosh-tekuri/jsonschema/v5/.gitmodules | 3 + .../santhosh-tekuri/jsonschema/v5/LICENSE | 175 + .../santhosh-tekuri/jsonschema/v5/README.md | 220 + .../santhosh-tekuri/jsonschema/v5/compiler.go | 812 ++++ .../santhosh-tekuri/jsonschema/v5/content.go | 29 + .../santhosh-tekuri/jsonschema/v5/doc.go | 49 + .../santhosh-tekuri/jsonschema/v5/draft.go | 1454 +++++++ .../santhosh-tekuri/jsonschema/v5/errors.go | 129 + .../jsonschema/v5/extension.go | 116 + .../santhosh-tekuri/jsonschema/v5/format.go | 567 +++ .../jsonschema/v5/httploader/httploader.go | 38 + .../santhosh-tekuri/jsonschema/v5/loader.go | 60 + .../santhosh-tekuri/jsonschema/v5/output.go | 77 + .../santhosh-tekuri/jsonschema/v5/resource.go | 280 ++ .../santhosh-tekuri/jsonschema/v5/schema.go | 900 ++++ .../usestdlibvars/pkg/analyzer/analyzer.go | 48 + .../go-securesystemslib/dsse/verify.go | 4 + .../signerverifier/ecdsa.go | 7 + .../signerverifier/ed25519.go | 3 + .../go-securesystemslib/signerverifier/rsa.go | 9 + .../signerverifier/signerverifier.go | 10 + .../signerverifier/utils.go | 3 + .../cosign/v2/cmd/cosign/cli/fulcio/fulcio.go | 11 + .../cosign/v2/cmd/cosign/cli/options/copy.go | 4 + .../v2/cmd/cosign/cli/options/deprecate.go | 3 + .../v2/cmd/cosign/cli/options/initialize.go | 8 + .../v2/cmd/cosign/cli/options/registry.go | 20 + .../sigstore/cosign/v2/pkg/blob/load.go | 6 + .../cosign/v2/pkg/cosign/git/gitlab/gitlab.go | 4 + .../sigstore/cosign/v2/pkg/cosign/verify.go | 16 + .../cosign/v2/pkg/oci/remote/options.go | 3 + .../gen/pb-go/bundle/v1/sigstore_bundle.pb.go | 174 + .../gen/pb-go/common/v1/sigstore_common.pb.go | 493 +++ .../gen/pb-go/dsse/envelope.pb.go | 84 + .../gen/pb-go/rekor/v1/sigstore_rekor.pb.go | 190 + .../v0.0.1/hashedrekord_v0_0_1_schema.json | 4 + .../pkg/signature/kms/azure/README.md | 14 + .../pkg/signature/kms/azure/client.go | 120 + .../sigstore/pkg/signature/kms/kms.go | 19 + .../pkg/verification/verify.go | 4 + vendor/github.com/spf13/pflag/flag.go | 47 + vendor/github.com/spf13/pflag/ip.go | 3 + vendor/github.com/spf13/pflag/string_array.go | 8 + .../go-spiffe/v2/bundle/jwtbundle/bundle.go | 7 + .../go-spiffe/v2/bundle/jwtbundle/doc.go | 12 + .../v2/bundle/spiffebundle/bundle.go | 7 + .../go-spiffe/v2/bundle/spiffebundle/doc.go | 12 + .../go-spiffe/v2/bundle/x509bundle/doc.go | 12 + .../spiffe/go-spiffe/v2/workloadapi/addr.go | 10 + .../go-spiffe/v2/workloadapi/client_posix.go | 4 + .../v2/workloadapi/client_windows.go | 4 + .../pkg/analyzer/analyzer.go | 18 + .../pipeline/pkg/apis/config/default.go | 30 + .../pkg/apis/pipeline/v1/param_types.go | 4 + .../v1alpha1/stepaction_validation.go | 7 + .../pkg/apis/pipeline/v1beta1/param_types.go | 4 + .../apis/pipeline/v1beta1/task_validation.go | 8 + .../pipeline/pkg/substitution/substitution.go | 8 + .../tektoncd/pipeline/test/e2e-tests.sh | 5 + .../tektoncd/pipeline/test/featureflags.go | 4 + .../tektoncd/pipeline/test/presubmit-tests.sh | 21 + .../github.com/tektoncd/plumbing/.gitignore | 5 +- vendor/github.com/tektoncd/plumbing/OWNERS | 11 + .../github.com/tetafro/godot/.goreleaser.yml | 3 + vendor/github.com/tetafro/godot/getters.go | 55 + .../bodyclose/passes/bodyclose/bodyclose.go | 17 + .../wrapcheck/v2/wrapcheck/wrapcheck.go | 7 + vendor/github.com/ultraware/funlen/README.md | 3 + vendor/github.com/ultraware/funlen/main.go | 124 + .../github.com/ultraware/whitespace/README.md | 4 + .../ultraware/whitespace/whitespace.go | 152 + vendor/github.com/uudashr/gocognit/README.md | 31 + vendor/github.com/uudashr/gocognit/doc.go | 4 + .../github.com/uudashr/gocognit/gocognit.go | 148 + vendor/github.com/uudashr/gocognit/recv.go | 3 + .../uudashr/gocognit/recv_pre118.go | 3 + .../github.com/uudashr/iface/opaque/opaque.go | 8 + .../github.com/uudashr/iface/unused/unused.go | 25 + .../vbatts/tar-split/archive/tar/reader.go | 3 + vendor/github.com/xanzy/go-gitlab/.gitignore | 33 + .../github.com/xanzy/go-gitlab/.golangci.yml | 56 + .../xanzy/go-gitlab/CONTRIBUTING.md | 53 + vendor/github.com/xanzy/go-gitlab/LICENSE | 201 + vendor/github.com/xanzy/go-gitlab/Makefile | 22 + vendor/github.com/xanzy/go-gitlab/README.md | 208 + .../xanzy/go-gitlab/access_requests.go | 253 ++ .../github.com/xanzy/go-gitlab/appearance.go | 110 + .../xanzy/go-gitlab/applications.go | 106 + .../xanzy/go-gitlab/audit_events.go | 202 + vendor/github.com/xanzy/go-gitlab/avatar.go | 64 + .../xanzy/go-gitlab/award_emojis.go | 468 ++ vendor/github.com/xanzy/go-gitlab/boards.go | 367 ++ vendor/github.com/xanzy/go-gitlab/branches.go | 252 ++ .../xanzy/go-gitlab/broadcast_messages.go | 191 + .../xanzy/go-gitlab/ci_yml_templates.go | 95 + .../xanzy/go-gitlab/client_options.go | 142 + .../xanzy/go-gitlab/cluster_agents.go | 294 ++ vendor/github.com/xanzy/go-gitlab/commits.go | 610 +++ .../xanzy/go-gitlab/container_registry.go | 311 ++ .../xanzy/go-gitlab/custom_attributes.go | 188 + .../github.com/xanzy/go-gitlab/deploy_keys.go | 275 ++ .../xanzy/go-gitlab/deploy_tokens.go | 290 ++ .../github.com/xanzy/go-gitlab/deployments.go | 260 ++ .../go-gitlab/deployments_merge_requests.go | 53 + .../github.com/xanzy/go-gitlab/discussions.go | 1143 +++++ .../xanzy/go-gitlab/dockerfile_templates.go | 93 + .../xanzy/go-gitlab/dora_metrics.go | 110 + .../github.com/xanzy/go-gitlab/draft_notes.go | 233 + .../xanzy/go-gitlab/environments.go | 238 ++ .../github.com/xanzy/go-gitlab/epic_issues.go | 152 + vendor/github.com/xanzy/go-gitlab/epics.go | 275 ++ .../xanzy/go-gitlab/error_tracking.go | 196 + .../xanzy/go-gitlab/event_parsing.go | 312 ++ .../xanzy/go-gitlab/event_systemhook_types.go | 249 ++ .../xanzy/go-gitlab/event_webhook_types.go | 1265 ++++++ vendor/github.com/xanzy/go-gitlab/events.go | 231 + .../xanzy/go-gitlab/external_status_checks.go | 218 + .../xanzy/go-gitlab/feature_flags.go | 96 + .../xanzy/go-gitlab/freeze_periods.go | 194 + .../xanzy/go-gitlab/generic_packages.go | 158 + .../github.com/xanzy/go-gitlab/geo_nodes.go | 433 ++ .../xanzy/go-gitlab/gitignore_templates.go | 93 + vendor/github.com/xanzy/go-gitlab/gitlab.go | 1049 +++++ .../xanzy/go-gitlab/group_access_tokens.go | 199 + .../xanzy/go-gitlab/group_badges.go | 237 + .../xanzy/go-gitlab/group_boards.go | 353 ++ .../xanzy/go-gitlab/group_clusters.go | 217 + .../xanzy/go-gitlab/group_epic_boards.go | 104 + .../github.com/xanzy/go-gitlab/group_hooks.go | 268 ++ .../xanzy/go-gitlab/group_import_export.go | 180 + .../xanzy/go-gitlab/group_iterations.go | 90 + .../xanzy/go-gitlab/group_labels.go | 258 ++ .../xanzy/go-gitlab/group_members.go | 391 ++ .../xanzy/go-gitlab/group_milestones.go | 322 ++ .../go-gitlab/group_protected_environments.go | 281 ++ .../group_repository_storage_move.go | 195 + .../xanzy/go-gitlab/group_serviceaccounts.go | 181 + .../xanzy/go-gitlab/group_ssh_certificates.go | 105 + .../xanzy/go-gitlab/group_variables.go | 218 + .../github.com/xanzy/go-gitlab/group_wikis.go | 204 + vendor/github.com/xanzy/go-gitlab/groups.go | 1177 +++++ vendor/github.com/xanzy/go-gitlab/import.go | 266 ++ .../xanzy/go-gitlab/instance_clusters.go | 153 + .../xanzy/go-gitlab/instance_variables.go | 186 + vendor/github.com/xanzy/go-gitlab/invites.go | 176 + .../github.com/xanzy/go-gitlab/issue_links.go | 186 + vendor/github.com/xanzy/go-gitlab/issues.go | 791 ++++ .../xanzy/go-gitlab/issues_statistics.go | 187 + .../xanzy/go-gitlab/job_token_scope.go | 284 ++ vendor/github.com/xanzy/go-gitlab/jobs.go | 585 +++ vendor/github.com/xanzy/go-gitlab/keys.go | 97 + vendor/github.com/xanzy/go-gitlab/labels.go | 317 ++ vendor/github.com/xanzy/go-gitlab/license.go | 128 + .../xanzy/go-gitlab/license_templates.go | 109 + vendor/github.com/xanzy/go-gitlab/markdown.go | 47 + .../xanzy/go-gitlab/member_roles.go | 144 + .../go-gitlab/merge_request_approvals.go | 440 ++ .../xanzy/go-gitlab/merge_requests.go | 1080 +++++ .../xanzy/go-gitlab/merge_trains.go | 170 + vendor/github.com/xanzy/go-gitlab/metadata.go | 63 + .../github.com/xanzy/go-gitlab/milestones.go | 272 ++ .../github.com/xanzy/go-gitlab/namespaces.go | 174 + vendor/github.com/xanzy/go-gitlab/notes.go | 696 +++ .../xanzy/go-gitlab/notifications.go | 242 ++ vendor/github.com/xanzy/go-gitlab/packages.go | 261 ++ vendor/github.com/xanzy/go-gitlab/pages.go | 45 + .../xanzy/go-gitlab/pages_domains.go | 216 + .../xanzy/go-gitlab/personal_access_tokens.go | 222 + .../xanzy/go-gitlab/pipeline_schedules.go | 385 ++ .../xanzy/go-gitlab/pipeline_triggers.go | 248 ++ .../github.com/xanzy/go-gitlab/pipelines.go | 408 ++ .../github.com/xanzy/go-gitlab/plan_limits.go | 104 + .../xanzy/go-gitlab/project_access_tokens.go | 200 + .../xanzy/go-gitlab/project_badges.go | 230 + .../xanzy/go-gitlab/project_clusters.go | 236 + .../xanzy/go-gitlab/project_feature_flags.go | 246 ++ .../xanzy/go-gitlab/project_import_export.go | 225 + .../xanzy/go-gitlab/project_iterations.go | 90 + .../go-gitlab/project_managed_licenses.go | 188 + .../xanzy/go-gitlab/project_members.go | 238 ++ .../xanzy/go-gitlab/project_mirror.go | 195 + .../project_repository_storage_move.go | 199 + .../xanzy/go-gitlab/project_snippets.go | 209 + .../xanzy/go-gitlab/project_templates.go | 110 + .../xanzy/go-gitlab/project_variables.go | 232 + .../go-gitlab/project_vulnerabilities.go | 150 + vendor/github.com/xanzy/go-gitlab/projects.go | 2263 ++++++++++ .../xanzy/go-gitlab/protected_branches.go | 257 ++ .../xanzy/go-gitlab/protected_environments.go | 282 ++ .../xanzy/go-gitlab/protected_tags.go | 176 + .../xanzy/go-gitlab/releaselinks.go | 201 + vendor/github.com/xanzy/go-gitlab/releases.go | 281 ++ .../xanzy/go-gitlab/repositories.go | 421 ++ .../xanzy/go-gitlab/repository_files.go | 385 ++ .../xanzy/go-gitlab/repository_submodules.go | 93 + .../xanzy/go-gitlab/request_options.go | 102 + .../xanzy/go-gitlab/resource_group.go | 165 + .../go-gitlab/resource_iteration_events.go | 122 + .../xanzy/go-gitlab/resource_label_events.go | 220 + .../go-gitlab/resource_milestone_events.go | 155 + .../xanzy/go-gitlab/resource_state_events.go | 154 + .../xanzy/go-gitlab/resource_weight_events.go | 80 + vendor/github.com/xanzy/go-gitlab/runners.go | 597 +++ vendor/github.com/xanzy/go-gitlab/search.go | 359 ++ vendor/github.com/xanzy/go-gitlab/services.go | 2179 ++++++++++ vendor/github.com/xanzy/go-gitlab/settings.go | 965 +++++ .../xanzy/go-gitlab/sidekiq_metrics.go | 157 + .../snippet_repository_storage_move.go | 203 + vendor/github.com/xanzy/go-gitlab/snippets.go | 314 ++ vendor/github.com/xanzy/go-gitlab/strings.go | 93 + .../xanzy/go-gitlab/system_hooks.go | 176 + vendor/github.com/xanzy/go-gitlab/tags.go | 248 ++ .../github.com/xanzy/go-gitlab/time_stats.go | 180 + vendor/github.com/xanzy/go-gitlab/todos.go | 163 + vendor/github.com/xanzy/go-gitlab/topics.go | 222 + vendor/github.com/xanzy/go-gitlab/types.go | 979 +++++ vendor/github.com/xanzy/go-gitlab/users.go | 1591 +++++++ vendor/github.com/xanzy/go-gitlab/validate.go | 154 + vendor/github.com/xanzy/go-gitlab/version.go | 58 + vendor/github.com/xanzy/go-gitlab/wikis.go | 204 + vendor/github.com/zeebo/errs/AUTHORS | 4 + vendor/github.com/zeebo/errs/README.md | 4 + vendor/github.com/zeebo/errs/errs.go | 131 + vendor/github.com/zeebo/errs/group.go | 20 + .../contrib/detectors/gcp/version.go | 4 + .../google.golang.org/grpc/otelgrpc/config.go | 47 + .../grpc/otelgrpc/stats_handler.go | 29 + .../grpc/otelgrpc/version.go | 4 + .../net/http/otelhttp/client.go | 12 + .../net/http/otelhttp/common.go | 10 + .../net/http/otelhttp/handler.go | 41 + .../internal/request/resp_writer_wrapper.go | 8 + .../net/http/otelhttp/internal/semconv/env.go | 92 + .../otelhttp/internal/semconv/httpconv.go | 76 + .../http/otelhttp/internal/semconv/util.go | 14 + .../http/otelhttp/internal/semconv/v1.20.0.go | 70 + .../net/http/otelhttp/transport.go | 81 + .../net/http/otelhttp/version.go | 4 + vendor/go.opentelemetry.io/otel/.gitignore | 11 + vendor/go.opentelemetry.io/otel/.golangci.yml | 35 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 15 + vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 22 + vendor/go.opentelemetry.io/otel/Makefile | 29 + vendor/go.opentelemetry.io/otel/README.md | 31 + vendor/go.opentelemetry.io/otel/RELEASING.md | 20 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 4 + .../go.opentelemetry.io/otel/attribute/set.go | 42 + .../otel/baggage/baggage.go | 20 + .../go.opentelemetry.io/otel/codes/codes.go | 7 + .../otel/internal/attribute/attribute.go | 36 + .../otel/internal/global/instruments.go | 28 + .../otel/internal/global/meter.go | 206 + .../otel/internal/global/trace.go | 13 + .../otel/internal/rawhelpers.go | 4 + .../otel/metric/asyncfloat64.go | 4 + .../otel/metric/asyncint64.go | 4 + .../otel/metric/instrument.go | 4 + vendor/go.opentelemetry.io/otel/renovate.json | 7 + .../otel/sdk/instrumentation/scope.go | 6 + .../otel/sdk/metric/config.go | 45 + .../otel/sdk/metric/exemplar.go | 52 + .../otel/sdk/metric/instrument.go | 16 + .../metric/internal/aggregate/aggregate.go | 17 + .../sdk/metric/internal/aggregate/exemplar.go | 7 + .../aggregate/exponential_histogram.go | 20 + .../metric/internal/aggregate/histogram.go | 24 + .../metric/internal/aggregate/lastvalue.go | 23 + .../otel/sdk/metric/internal/aggregate/sum.go | 34 + .../otel/sdk/metric/internal/exemplar/doc.go | 6 + .../otel/sdk/metric/internal/exemplar/drop.go | 23 + .../sdk/metric/internal/exemplar/exemplar.go | 29 + .../sdk/metric/internal/exemplar/filter.go | 29 + .../internal/exemplar/filtered_reservoir.go | 49 + .../otel/sdk/metric/internal/exemplar/hist.go | 46 + .../otel/sdk/metric/internal/exemplar/rand.go | 191 + .../sdk/metric/internal/exemplar/reservoir.go | 32 + .../sdk/metric/internal/exemplar/storage.go | 95 + .../sdk/metric/internal/exemplar/value.go | 58 + .../otel/sdk/metric/internal/x/x.go | 36 + .../otel/sdk/metric/manual_reader.go | 12 + .../otel/sdk/metric/meter.go | 112 + .../otel/sdk/metric/periodic_reader.go | 12 + .../otel/sdk/metric/pipeline.go | 144 + .../otel/sdk/metric/provider.go | 13 + .../otel/sdk/metric/version.go | 4 + .../otel/sdk/metric/view.go | 8 + .../otel/sdk/resource/auto.go | 61 + .../otel/sdk/resource/builtin.go | 10 + .../otel/sdk/resource/host_id_windows.go | 12 + .../otel/sdk/resource/os_windows.go | 4 + .../go.opentelemetry.io/otel/sdk/version.go | 4 + .../go.opentelemetry.io/otel/trace/config.go | 4 + .../go.opentelemetry.io/otel/trace/context.go | 4 + vendor/go.opentelemetry.io/otel/trace/doc.go | 4 + .../otel/verify_examples.sh | 74 + vendor/go.opentelemetry.io/otel/version.go | 4 + vendor/go.opentelemetry.io/otel/versions.yaml | 26 + vendor/go.step.sm/crypto/LICENSE | 201 + .../crypto/fingerprint/fingerprint.go | 78 + .../crypto/internal/bcrypt_pbkdf/LICENSE | 27 + .../crypto/internal/bcrypt_pbkdf/README | 22 + .../internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 100 + .../go.step.sm/crypto/internal/emoji/emoji.go | 274 ++ vendor/go.step.sm/crypto/internal/utils/io.go | 70 + .../crypto/internal/utils/utfbom/LICENSE | 201 + .../crypto/internal/utils/utfbom/README.md | 66 + .../crypto/internal/utils/utfbom/utfbom.go | 195 + vendor/go.step.sm/crypto/jose/encrypt.go | 135 + vendor/go.step.sm/crypto/jose/generate.go | 204 + vendor/go.step.sm/crypto/jose/options.go | 125 + vendor/go.step.sm/crypto/jose/parse.go | 411 ++ vendor/go.step.sm/crypto/jose/types.go | 310 ++ vendor/go.step.sm/crypto/jose/validate.go | 221 + vendor/go.step.sm/crypto/jose/x25519.go | 66 + .../go.step.sm/crypto/keyutil/fingerprint.go | 74 + vendor/go.step.sm/crypto/keyutil/key.go | 265 ++ vendor/go.step.sm/crypto/pemutil/cosign.go | 79 + vendor/go.step.sm/crypto/pemutil/pem.go | 856 ++++ vendor/go.step.sm/crypto/pemutil/pkcs8.go | 353 ++ vendor/go.step.sm/crypto/pemutil/ssh.go | 299 ++ vendor/go.step.sm/crypto/randutil/random.go | 113 + vendor/go.step.sm/crypto/x25519/x25519.go | 321 ++ vendor/golang.org/x/crypto/pkcs12/crypto.go | 4 + vendor/golang.org/x/crypto/ssh/handshake.go | 14 +- vendor/golang.org/x/crypto/ssh/server.go | 50 +- vendor/golang.org/x/mod/sumdb/dirhash/hash.go | 135 + vendor/golang.org/x/net/http2/config.go | 4 + vendor/golang.org/x/net/http2/config_go124.go | 4 + vendor/golang.org/x/net/http2/transport.go | 19 + vendor/golang.org/x/oauth2/google/default.go | 6 + .../x/oauth2/google/externalaccount/aws.go | 4 + .../google/externalaccount/basecredentials.go | 16 + vendor/golang.org/x/oauth2/oauth2.go | 4 + vendor/golang.org/x/sync/errgroup/errgroup.go | 1 - vendor/golang.org/x/sys/cpu/cpu.go | 3 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 21 +- vendor/golang.org/x/sys/unix/auxv.go | 36 - .../golang.org/x/sys/unix/auxv_unsupported.go | 13 - .../golang.org/x/sys/unix/syscall_solaris.go | 87 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 +- .../x/sys/unix/zerrors_linux_386.go | 3 - .../x/sys/unix/zerrors_linux_amd64.go | 3 - .../x/sys/unix/zerrors_linux_arm.go | 3 - .../x/sys/unix/zerrors_linux_arm64.go | 4 - .../x/sys/unix/zerrors_linux_loong64.go | 3 - .../x/sys/unix/zerrors_linux_mips.go | 3 - .../x/sys/unix/zerrors_linux_mips64.go | 3 - .../x/sys/unix/zerrors_linux_mips64le.go | 3 - .../x/sys/unix/zerrors_linux_mipsle.go | 3 - .../x/sys/unix/zerrors_linux_ppc.go | 3 - .../x/sys/unix/zerrors_linux_ppc64.go | 3 - .../x/sys/unix/zerrors_linux_ppc64le.go | 3 - .../x/sys/unix/zerrors_linux_riscv64.go | 3 - .../x/sys/unix/zerrors_linux_s390x.go | 3 - .../x/sys/unix/zerrors_linux_sparc64.go | 3 - .../x/sys/unix/zsyscall_solaris_amd64.go | 114 - .../x/sys/unix/zsysnum_linux_386.go | 4 - .../x/sys/unix/zsysnum_linux_amd64.go | 4 - .../x/sys/unix/zsysnum_linux_arm.go | 4 - .../x/sys/unix/zsysnum_linux_arm64.go | 4 - .../x/sys/unix/zsysnum_linux_loong64.go | 4 - .../x/sys/unix/zsysnum_linux_mips.go | 4 - .../x/sys/unix/zsysnum_linux_mips64.go | 4 - .../x/sys/unix/zsysnum_linux_mips64le.go | 4 - .../x/sys/unix/zsysnum_linux_mipsle.go | 4 - .../x/sys/unix/zsysnum_linux_ppc.go | 4 - .../x/sys/unix/zsysnum_linux_ppc64.go | 4 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 - .../x/sys/unix/zsysnum_linux_riscv64.go | 4 - .../x/sys/unix/zsysnum_linux_s390x.go | 4 - .../x/sys/unix/zsysnum_linux_sparc64.go | 4 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 6 +- .../x/tools/go/analysis/analysis.go | 10 + .../go/analysis/passes/buildtag/buildtag.go | 33 + .../tools/go/analysis/passes/printf/printf.go | 3 + .../go/analysis/passes/structtag/structtag.go | 4 + .../x/tools/go/ast/inspector/inspector.go | 43 + .../x/tools/go/ast/inspector/iter.go | 8 + .../x/tools/go/ast/inspector/typeof.go | 6 + .../x/tools/go/gcexportdata/gcexportdata.go | 20 + .../x/tools/go/packages/external.go | 14 + .../golang.org/x/tools/go/packages/golist.go | 17 + .../x/tools/go/packages/loadmode_string.go | 6 + .../x/tools/go/packages/packages.go | 67 + vendor/golang.org/x/tools/go/ssa/const.go | 51 + vendor/golang.org/x/tools/go/ssa/dom.go | 3 + vendor/golang.org/x/tools/go/ssa/util.go | 17 + .../x/tools/go/types/typeutil/map.go | 250 ++ .../internal/analysisinternal/analysis.go | 265 ++ .../x/tools/internal/gcimporter/exportdata.go | 91 + .../x/tools/internal/gcimporter/gcimporter.go | 205 + .../x/tools/internal/gcimporter/iimport.go | 8 + .../tools/internal/gcimporter/ureader_yes.go | 12 + .../x/tools/internal/imports/source.go | 4 + .../x/tools/internal/imports/source_env.go | 8 + .../internal/packagesinternal/packages.go | 8 + .../x/tools/internal/stdlib/manifest.go | 227 + .../x/tools/internal/typeparams/common.go | 75 + .../x/tools/internal/typesinternal/recv.go | 3 + .../x/tools/internal/typesinternal/types.go | 3 + .../x/tools/internal/versions/constraint.go | 13 + .../internal/versions/constraint_go121.go | 14 + .../api/googleapi/googleapi.go | 20 + .../iamcredentials/v1/iamcredentials-gen.go | 99 + .../google.golang.org/api/idtoken/idtoken.go | 3 + .../google.golang.org/api/internal/creds.go | 3 + .../api/internal/gensupport/media.go | 3 + .../api/internal/gensupport/resumable.go | 16 + .../api/internal/gensupport/send.go | 6 + .../google.golang.org/api/internal/version.go | 4 + vendor/google.golang.org/api/option/option.go | 9 + .../api/storage/v1/storage-api.json | 11 + .../api/storage/v1/storage-gen.go | 1279 ++++++ .../api/transport/grpc/dial.go | 24 + .../api/transport/http/dial.go | 34 + .../http/internal/propagation/http.go | 87 + .../googleapis/api/annotations/client.pb.go | 356 ++ .../googleapis/api/httpbody/httpbody.pb.go | 9 + .../googleapis/api/metric/metric.pb.go | 84 + .../rpc/errdetails/error_details.pb.go | 68 + .../grpc/balancer/balancer.go | 105 + .../grpclb/grpc_lb_v1/load_balancer.pb.go | 225 + .../grpc/balancer/grpclb/grpclb_picker.go | 12 + .../balancer/leastrequest/leastrequest.go | 4 + .../balancer/pickfirst/internal/internal.go | 7 + .../grpc/balancer/pickfirst/pickfirst.go | 4 + .../pickfirst/pickfirstleaf/pickfirstleaf.go | 241 ++ .../grpc/balancer/rls/config.go | 4 + .../grpc/balancer/rls/control_channel.go | 4 + .../rls/internal/adaptive/adaptive.go | 9 + .../grpc/balancer/roundrobin/roundrobin.go | 8 + .../balancer/weightedroundrobin/balancer.go | 405 ++ .../balancer/weightedroundrobin/scheduler.go | 19 + .../weightedroundrobin/weightedroundrobin.go | 3 + .../balancer/weightedtarget/weightedtarget.go | 10 + .../grpc/balancer_wrapper.go | 19 + .../grpc_binarylog_v1/binarylog.pb.go | 201 + vendor/google.golang.org/grpc/clientconn.go | 19 + vendor/google.golang.org/grpc/codec.go | 4 + .../internal/proto/grpc_gcp/altscontext.pb.go | 33 + .../internal/proto/grpc_gcp/handshaker.pb.go | 249 ++ .../grpc_gcp/transport_security_common.pb.go | 57 + .../grpc/credentials/google/google.go | 28 + .../google.golang.org/grpc/credentials/tls.go | 11 + vendor/google.golang.org/grpc/dialoptions.go | 31 + .../grpc/experimental/stats/metricregistry.go | 38 + .../grpc/experimental/stats/metrics.go | 78 + .../grpc/grpclog/internal/loggerv2.go | 74 + .../grpc/health/grpc_health_v1/health.pb.go | 57 + .../grpc/internal/backoff/backoff.go | 4 + .../grpc/internal/envconfig/envconfig.go | 4 + .../grpc/internal/envconfig/xds.go | 3 + .../grpc/internal/hierarchy/hierarchy.go | 6 + .../grpc/internal/internal.go | 13 + .../internal/proto/grpc_lookup_v1/rls.pb.go | 57 + .../proto/grpc_lookup_v1/rls_config.pb.go | 177 + .../internal/resolver/dns/dns_resolver.go | 45 + .../grpc/internal/transport/flowcontrol.go | 11 + .../grpc/internal/transport/handler_server.go | 51 + .../grpc/internal/transport/http2_client.go | 114 + .../grpc/internal/transport/http2_server.go | 111 + .../grpc/internal/transport/transport.go | 377 ++ .../grpc/internal/wrr/random.go | 16 + .../internal/xds/matcher/matcher_header.go | 40 + .../grpc/internal/xds/rbac/matchers.go | 11 + .../grpc/internal/xds/rbac/rbac_engine.go | 3 + .../grpc/mem/buffer_slice.go | 11 + vendor/google.golang.org/grpc/preloader.go | 8 + .../grpc/resolver/resolver.go | 11 + vendor/google.golang.org/grpc/rpc_util.go | 56 + vendor/google.golang.org/grpc/server.go | 136 + .../google.golang.org/grpc/service_config.go | 24 + .../grpc/stats/opentelemetry/LICENSE | 202 + .../stats/opentelemetry/client_metrics.go | 17 + .../grpc/stats/opentelemetry/opentelemetry.go | 59 + .../stats/opentelemetry/server_metrics.go | 14 + vendor/google.golang.org/grpc/stats/stats.go | 79 + vendor/google.golang.org/grpc/stream.go | 113 + vendor/google.golang.org/grpc/version.go | 4 + .../grpc/xds/googledirectpath/googlec2p.go | 28 + .../balancer/clusterimpl/clusterimpl.go | 216 + .../internal/balancer/clusterimpl/picker.go | 18 + .../balancer/clustermanager/clustermanager.go | 13 + .../clusterresolver/clusterresolver.go | 20 + .../balancer/clusterresolver/configbuilder.go | 93 + .../clusterresolver/resource_resolver.go | 8 + .../clusterresolver/resource_resolver_dns.go | 32 + .../balancer/outlierdetection/balancer.go | 116 + .../outlierdetection/subconn_wrapper.go | 28 + .../internal/balancer/priority/balancer.go | 9 + .../internal/balancer/ringhash/ringhash.go | 4 + .../internal/balancer/wrrlocality/balancer.go | 3 + .../xds/internal/httpfilter/fault/fault.go | 8 + .../grpc/xds/internal/internal.go | 3 + .../xds/internal/resolver/serviceconfig.go | 4 + .../xds/internal/resolver/xds_resolver.go | 17 + .../grpc/xds/internal/xdsclient/authority.go | 597 +++ .../grpc/xds/internal/xdsclient/client.go | 6 + .../grpc/xds/internal/xdsclient/client_new.go | 58 + .../internal/xdsclient/client_refcounted.go | 39 + .../grpc/xds/internal/xdsclient/clientimpl.go | 64 + .../xdsclient/clientimpl_authority.go | 146 + .../xds/internal/xdsclient/clientimpl_dump.go | 7 + .../xdsclient/clientimpl_loadreport.go | 18 + .../internal/xdsclient/clientimpl_watchers.go | 43 + .../internal/xdsclient/internal/internal.go | 5 + .../grpc/xds/internal/xdsclient/logging.go | 7 + .../xdsclient/transport/internal/internal.go | 25 + .../xdsclient/transport/loadreport.go | 259 ++ .../internal/xdsclient/transport/transport.go | 702 +++ .../xdsresource/cluster_resource_type.go | 5 + .../xdsresource/endpoints_resource_type.go | 5 + .../xdsresource/listener_resource_type.go | 5 + .../internal/xdsclient/xdsresource/matcher.go | 12 + .../xdsclient/xdsresource/resource_type.go | 14 + .../xdsresource/route_config_resource_type.go | 5 + .../xdsclient/xdsresource/type_eds.go | 4 + .../xdsclient/xdsresource/unmarshal_eds.go | 14 + .../api/resource/v1alpha3/generated.proto | 2 +- vendor/k8s.io/api/resource/v1alpha3/types.go | 6 +- .../v1alpha3/types_swagger_doc_generated.go | 2 +- .../api/resource/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/resource/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/modules.txt | 635 ++- .../release-utils/version/command.go | 6 + .../release-utils/version/version.go | 36 + 1144 files changed, 108098 insertions(+), 841 deletions(-) create mode 100644 pkg/chains/storage/archivista/archivista.go create mode 100644 pkg/chains/storage/archivista/archivista_test.go create mode 100644 pkg/chains/storage/archivista/cert.go create mode 100644 pkg/chains/storage/archivista/cert_test.go create mode 100644 pkg/chains/storage/archivista/patch_test.go create mode 100644 vendor/cloud.google.com/go/auth/httptransport/trace.go create mode 100644 vendor/filippo.io/edwards25519/LICENSE create mode 100644 vendor/filippo.io/edwards25519/README.md create mode 100644 vendor/filippo.io/edwards25519/doc.go create mode 100644 vendor/filippo.io/edwards25519/edwards25519.go create mode 100644 vendor/filippo.io/edwards25519/extra.go create mode 100644 vendor/filippo.io/edwards25519/field/fe.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64.s create mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64.s create mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_extra.go create mode 100644 vendor/filippo.io/edwards25519/field/fe_generic.go create mode 100644 vendor/filippo.io/edwards25519/scalar.go create mode 100644 vendor/filippo.io/edwards25519/scalar_fiat.go create mode 100644 vendor/filippo.io/edwards25519/scalarmult.go create mode 100644 vendor/filippo.io/edwards25519/tables.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml create mode 100644 vendor/github.com/buildkite/agent/v3/api/auth.go create mode 100644 vendor/github.com/edwarnicke/gitoid/.gitignore create mode 100644 vendor/github.com/edwarnicke/gitoid/LICENSE create mode 100644 vendor/github.com/edwarnicke/gitoid/README.md create mode 100644 vendor/github.com/edwarnicke/gitoid/gitoid.go create mode 100644 vendor/github.com/edwarnicke/gitoid/options.go create mode 100644 vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go create mode 100644 vendor/github.com/golangci/modinfo/.gitignore create mode 100644 vendor/github.com/golangci/modinfo/.golangci.yml create mode 100644 vendor/github.com/golangci/modinfo/LICENSE create mode 100644 vendor/github.com/golangci/modinfo/Makefile create mode 100644 vendor/github.com/golangci/modinfo/module.go create mode 100644 vendor/github.com/golangci/modinfo/readme.md create mode 100644 vendor/github.com/google/cel-go/checker/standard.go create mode 100644 vendor/github.com/in-toto/archivista/LICENSE create mode 100644 vendor/github.com/in-toto/archivista/pkg/api/client.go create mode 100644 vendor/github.com/in-toto/archivista/pkg/api/download.go create mode 100644 vendor/github.com/in-toto/archivista/pkg/api/graphql.go create mode 100644 vendor/github.com/in-toto/archivista/pkg/api/structs.go create mode 100644 vendor/github.com/in-toto/archivista/pkg/api/upload.go create mode 100644 vendor/github.com/in-toto/archivista/pkg/http-client/client.go create mode 100644 vendor/github.com/in-toto/go-witness/LICENSE create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/signer.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/util.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go create mode 100644 vendor/github.com/in-toto/go-witness/cryptoutil/x509.go create mode 100644 vendor/github.com/in-toto/go-witness/dsse/dsse.go create mode 100644 vendor/github.com/in-toto/go-witness/dsse/sign.go create mode 100644 vendor/github.com/in-toto/go-witness/dsse/verify.go create mode 100644 vendor/github.com/in-toto/go-witness/log/log.go create mode 100644 vendor/github.com/in-toto/go-witness/log/silent.go create mode 100644 vendor/github.com/in-toto/go-witness/timestamp/fake.go create mode 100644 vendor/github.com/in-toto/go-witness/timestamp/timestamp.go create mode 100644 vendor/github.com/in-toto/go-witness/timestamp/tsp.go create mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md create mode 100644 vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go create mode 100644 vendor/github.com/ultraware/funlen/main.go create mode 100644 vendor/github.com/xanzy/go-gitlab/.gitignore create mode 100644 vendor/github.com/xanzy/go-gitlab/.golangci.yml create mode 100644 vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md create mode 100644 vendor/github.com/xanzy/go-gitlab/LICENSE create mode 100644 vendor/github.com/xanzy/go-gitlab/Makefile create mode 100644 vendor/github.com/xanzy/go-gitlab/README.md create mode 100644 vendor/github.com/xanzy/go-gitlab/access_requests.go create mode 100644 vendor/github.com/xanzy/go-gitlab/appearance.go create mode 100644 vendor/github.com/xanzy/go-gitlab/applications.go create mode 100644 vendor/github.com/xanzy/go-gitlab/audit_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/avatar.go create mode 100644 vendor/github.com/xanzy/go-gitlab/award_emojis.go create mode 100644 vendor/github.com/xanzy/go-gitlab/boards.go create mode 100644 vendor/github.com/xanzy/go-gitlab/branches.go create mode 100644 vendor/github.com/xanzy/go-gitlab/broadcast_messages.go create mode 100644 vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/client_options.go create mode 100644 vendor/github.com/xanzy/go-gitlab/cluster_agents.go create mode 100644 vendor/github.com/xanzy/go-gitlab/commits.go create mode 100644 vendor/github.com/xanzy/go-gitlab/container_registry.go create mode 100644 vendor/github.com/xanzy/go-gitlab/custom_attributes.go create mode 100644 vendor/github.com/xanzy/go-gitlab/deploy_keys.go create mode 100644 vendor/github.com/xanzy/go-gitlab/deploy_tokens.go create mode 100644 vendor/github.com/xanzy/go-gitlab/deployments.go create mode 100644 vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go create mode 100644 vendor/github.com/xanzy/go-gitlab/discussions.go create mode 100644 vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/dora_metrics.go create mode 100644 vendor/github.com/xanzy/go-gitlab/draft_notes.go create mode 100644 vendor/github.com/xanzy/go-gitlab/environments.go create mode 100644 vendor/github.com/xanzy/go-gitlab/epic_issues.go create mode 100644 vendor/github.com/xanzy/go-gitlab/epics.go create mode 100644 vendor/github.com/xanzy/go-gitlab/error_tracking.go create mode 100644 vendor/github.com/xanzy/go-gitlab/event_parsing.go create mode 100644 vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go create mode 100644 vendor/github.com/xanzy/go-gitlab/event_webhook_types.go create mode 100644 vendor/github.com/xanzy/go-gitlab/events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/external_status_checks.go create mode 100644 vendor/github.com/xanzy/go-gitlab/feature_flags.go create mode 100644 vendor/github.com/xanzy/go-gitlab/freeze_periods.go create mode 100644 vendor/github.com/xanzy/go-gitlab/generic_packages.go create mode 100644 vendor/github.com/xanzy/go-gitlab/geo_nodes.go create mode 100644 vendor/github.com/xanzy/go-gitlab/gitignore_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/gitlab.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_access_tokens.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_badges.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_boards.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_clusters.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_epic_boards.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_hooks.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_import_export.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_iterations.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_labels.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_members.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_milestones.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_protected_environments.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_variables.go create mode 100644 vendor/github.com/xanzy/go-gitlab/group_wikis.go create mode 100644 vendor/github.com/xanzy/go-gitlab/groups.go create mode 100644 vendor/github.com/xanzy/go-gitlab/import.go create mode 100644 vendor/github.com/xanzy/go-gitlab/instance_clusters.go create mode 100644 vendor/github.com/xanzy/go-gitlab/instance_variables.go create mode 100644 vendor/github.com/xanzy/go-gitlab/invites.go create mode 100644 vendor/github.com/xanzy/go-gitlab/issue_links.go create mode 100644 vendor/github.com/xanzy/go-gitlab/issues.go create mode 100644 vendor/github.com/xanzy/go-gitlab/issues_statistics.go create mode 100644 vendor/github.com/xanzy/go-gitlab/job_token_scope.go create mode 100644 vendor/github.com/xanzy/go-gitlab/jobs.go create mode 100644 vendor/github.com/xanzy/go-gitlab/keys.go create mode 100644 vendor/github.com/xanzy/go-gitlab/labels.go create mode 100644 vendor/github.com/xanzy/go-gitlab/license.go create mode 100644 vendor/github.com/xanzy/go-gitlab/license_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/markdown.go create mode 100644 vendor/github.com/xanzy/go-gitlab/member_roles.go create mode 100644 vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go create mode 100644 vendor/github.com/xanzy/go-gitlab/merge_requests.go create mode 100644 vendor/github.com/xanzy/go-gitlab/merge_trains.go create mode 100644 vendor/github.com/xanzy/go-gitlab/metadata.go create mode 100644 vendor/github.com/xanzy/go-gitlab/milestones.go create mode 100644 vendor/github.com/xanzy/go-gitlab/namespaces.go create mode 100644 vendor/github.com/xanzy/go-gitlab/notes.go create mode 100644 vendor/github.com/xanzy/go-gitlab/notifications.go create mode 100644 vendor/github.com/xanzy/go-gitlab/packages.go create mode 100644 vendor/github.com/xanzy/go-gitlab/pages.go create mode 100644 vendor/github.com/xanzy/go-gitlab/pages_domains.go create mode 100644 vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go create mode 100644 vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go create mode 100644 vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go create mode 100644 vendor/github.com/xanzy/go-gitlab/pipelines.go create mode 100644 vendor/github.com/xanzy/go-gitlab/plan_limits.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_access_tokens.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_badges.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_clusters.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_feature_flags.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_import_export.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_iterations.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_members.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_mirror.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_snippets.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_variables.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go create mode 100644 vendor/github.com/xanzy/go-gitlab/projects.go create mode 100644 vendor/github.com/xanzy/go-gitlab/protected_branches.go create mode 100644 vendor/github.com/xanzy/go-gitlab/protected_environments.go create mode 100644 vendor/github.com/xanzy/go-gitlab/protected_tags.go create mode 100644 vendor/github.com/xanzy/go-gitlab/releaselinks.go create mode 100644 vendor/github.com/xanzy/go-gitlab/releases.go create mode 100644 vendor/github.com/xanzy/go-gitlab/repositories.go create mode 100644 vendor/github.com/xanzy/go-gitlab/repository_files.go create mode 100644 vendor/github.com/xanzy/go-gitlab/repository_submodules.go create mode 100644 vendor/github.com/xanzy/go-gitlab/request_options.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_group.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_label_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_state_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_weight_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/runners.go create mode 100644 vendor/github.com/xanzy/go-gitlab/search.go create mode 100644 vendor/github.com/xanzy/go-gitlab/services.go create mode 100644 vendor/github.com/xanzy/go-gitlab/settings.go create mode 100644 vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go create mode 100644 vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go create mode 100644 vendor/github.com/xanzy/go-gitlab/snippets.go create mode 100644 vendor/github.com/xanzy/go-gitlab/strings.go create mode 100644 vendor/github.com/xanzy/go-gitlab/system_hooks.go create mode 100644 vendor/github.com/xanzy/go-gitlab/tags.go create mode 100644 vendor/github.com/xanzy/go-gitlab/time_stats.go create mode 100644 vendor/github.com/xanzy/go-gitlab/todos.go create mode 100644 vendor/github.com/xanzy/go-gitlab/topics.go create mode 100644 vendor/github.com/xanzy/go-gitlab/types.go create mode 100644 vendor/github.com/xanzy/go-gitlab/users.go create mode 100644 vendor/github.com/xanzy/go-gitlab/validate.go create mode 100644 vendor/github.com/xanzy/go-gitlab/version.go create mode 100644 vendor/github.com/xanzy/go-gitlab/wikis.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.step.sm/crypto/LICENSE create mode 100644 vendor/go.step.sm/crypto/fingerprint/fingerprint.go create mode 100644 vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE create mode 100644 vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README create mode 100644 vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go create mode 100644 vendor/go.step.sm/crypto/internal/emoji/emoji.go create mode 100644 vendor/go.step.sm/crypto/internal/utils/io.go create mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE create mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/README.md create mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go create mode 100644 vendor/go.step.sm/crypto/jose/encrypt.go create mode 100644 vendor/go.step.sm/crypto/jose/generate.go create mode 100644 vendor/go.step.sm/crypto/jose/options.go create mode 100644 vendor/go.step.sm/crypto/jose/parse.go create mode 100644 vendor/go.step.sm/crypto/jose/types.go create mode 100644 vendor/go.step.sm/crypto/jose/validate.go create mode 100644 vendor/go.step.sm/crypto/jose/x25519.go create mode 100644 vendor/go.step.sm/crypto/keyutil/fingerprint.go create mode 100644 vendor/go.step.sm/crypto/keyutil/key.go create mode 100644 vendor/go.step.sm/crypto/pemutil/cosign.go create mode 100644 vendor/go.step.sm/crypto/pemutil/pem.go create mode 100644 vendor/go.step.sm/crypto/pemutil/pkcs8.go create mode 100644 vendor/go.step.sm/crypto/pemutil/ssh.go create mode 100644 vendor/go.step.sm/crypto/randutil/random.go create mode 100644 vendor/go.step.sm/crypto/x25519/x25519.go create mode 100644 vendor/golang.org/x/mod/sumdb/dirhash/hash.go delete mode 100644 vendor/golang.org/x/sys/unix/auxv.go delete mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint_go121.go create mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go create mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go diff --git a/config/100-deployment.yaml b/config/100-deployment.yaml index b7a978116e..141e9c6d13 100644 --- a/config/100-deployment.yaml +++ b/config/100-deployment.yaml @@ -41,9 +41,13 @@ metadata: app.kubernetes.io/part-of: tekton-chains # The data can be tweaked at install time, it is commented out # because these are the default settings. -# data: -# artifacts.taskrun.format: tekton -# artifacts.taskrun.storage: tekton +data: + artifacts.taskrun.format: in-toto + artifacts.taskrun.storage: archivista + artifacts.pipelinerun.format: in-toto + artifacts.pipelinerun.storage: archivista + storage.archivista.url: https://archivista.testifysec.io + # artifacts.taskrun.signer: x509 # artifacts.oci.storage: oci # artifacts.oci.format: simplesigning diff --git a/docs/config.md b/docs/config.md index a3eeafa809..53d3420de2 100644 --- a/docs/config.md +++ b/docs/config.md @@ -5,126 +5,119 @@ weight: 20 --- --> # Chains Configuration + `Chains` works by observing `TaskRun` and `PipelineRun` executions, capturing relevant information, and storing it in a cryptographically-signed format. -`TaskRuns` and `PipelineRuns` can indicate inputs and outputs which are then captured and surfaced in the `Chains` payload formats, where relevant. -`Chains` uses the `Results` to _hint_ at the correct inputs and outputs. Check out [slsa-provenance.md](slsa-provenance.md) for more details. +`TaskRuns` and `PipelineRuns` can indicate inputs and outputs which are then captured and surfaced in the `Chains` payload formats where relevant. +Chains uses the `Results` to _hint_ at the correct inputs and outputs. Check out [slsa-provenance.md](slsa-provenance.md) for more details. ## Chains Configuration -Chains uses a `ConfigMap` called `chains-config` in the `tekton-chains` namespace for configuration. +Chains uses a `ConfigMap` called `chains-config` in the `tekton-chains` namespace for configuration. Supported keys include: ### TaskRun Configuration -| Key | Description | Supported Values | Default | -| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------- | :-------- | +| Key | Description | Supported Values | Default | +| :-------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------- | :-------- | | `artifacts.taskrun.format` | The format to store `TaskRun` payloads in. | `in-toto`, `slsa/v1`, `slsa/v2alpha3`, `slsa/v2alpha4` | `in-toto` | -| `artifacts.taskrun.storage` | The storage backend to store `TaskRun` signatures in. Multiple backends can be specified with comma-separated list ("tekton,oci"). To disable the `TaskRun` artifact input an empty string (""). | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `tekton` | -| `artifacts.taskrun.signer` | The signature backend to sign `TaskRun` payloads with. | `x509`, `kms` | `x509` | +| `artifacts.taskrun.storage` | The storage backend to store `TaskRun` signatures in. Multiple backends can be specified with a comma-separated list (e.g. `"tekton,oci"`). An empty string disables TaskRun artifacts. | `tekton`, `oci`, `gcs`, `docdb`, `grafeas`, `archivista` | `tekton` | +| `artifacts.taskrun.signer` | The signature backend to sign `TaskRun` payloads with. | `x509`, `kms` | `x509` | -> NOTE: -> -> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. -> - `slsa/v2alpha3` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended format for new chains users who want the slsav1.0 spec. -> - `slsa/v2alpha4` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction). Recommended format for new chains users who want the slsav1.0 spec. +> **NOTE:** +> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. +> - `slsa/v2alpha3` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended for new Chains users who want the slsav1.0 spec. +> - `slsa/v2alpha4` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction). Recommended for new Chains users who want the slsav1.0 spec. ### PipelineRun Configuration -| Key | Description | Supported Values | Default | -| :--------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :----------------------------------------- | :-------- | +| Key | Description | Supported Values | Default | +| :--------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :--------------------------------------------------------- | :-------- | | `artifacts.pipelinerun.format` | The format to store `PipelineRun` payloads in. | `in-toto`, `slsa/v1`, `slsa/v2alpha3`, `slsa/v2alpha4` | `in-toto` | -| `artifacts.pipelinerun.storage` | The storage backend to store `PipelineRun` signatures in. Multiple backends can be specified with comma-separated list ("tekton,oci"). To disable the `PipelineRun` artifact input an empty string (""). | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `tekton` | -| `artifacts.pipelinerun.signer` | The signature backend to sign `PipelineRun` payloads with. | `x509`, `kms` | `x509` | -| `artifacts.pipelinerun.enable-deep-inspection` | This boolean option will configure whether Chains should inspect child taskruns in order to capture inputs/outputs within a pipelinerun. `"false"` means that Chains only checks pipeline level results, whereas `"true"` means Chains inspects both pipeline level and task level results. | `"true"`, `"false"` | `"false"` | - -> NOTE: -> -> - For grafeas storage backend, currently we only support Container Analysis. We will make grafeas server address configurabe within a short time. -> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. -> - `slsa/v2alpha3` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended format for new chains users who want the slsav1.0 spec. -> - `slsa/v2alpha4` corresponds to the slsav1.0 spec. and uses latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction) when `artifacts.pipelinerun.enable-deep-inspection` is set to `true`. Recommended format for new chains users who want the slsav1.0 spec. +| `artifacts.pipelinerun.storage` | The storage backend to store `PipelineRun` signatures in. Multiple backends can be specified with a comma-separated list (e.g. `"tekton,oci"`). An empty string disables PipelineRun artifacts. | `tekton`, `oci`, `gcs`, `docdb`, `grafeas`, `archivista` | `tekton` | +| `artifacts.pipelinerun.signer` | The signature backend to sign `PipelineRun` payloads with. | `x509`, `kms` | `x509` | +| `artifacts.pipelinerun.enable-deep-inspection` | This boolean option configures whether Chains should inspect child TaskRuns to capture inputs/outputs within a PipelineRun. `"false"` means only pipeline-level results are checked, whereas `"true"` means both pipeline and task level results are inspected. | `"true"`, `"false"` | `"false"` | +> **NOTE:** +> - For the Grafeas storage backend, currently only Container Analysis is supported. A configurable Grafeas server address is coming soon. +> - `slsa/v1` is an alias of `in-toto` for backwards compatibility. +> - `slsa/v2alpha3` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). Recommended for new Chains users who want the slsav1.0 spec. +> - `slsa/v2alpha4` corresponds to the slsav1.0 spec and uses the latest [`v1` Tekton Objects](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1). It reads type-hinted results from [StepActions](https://tekton.dev/docs/pipelines/pipeline-api/#tekton.dev/v1alpha1.StepAction) when `artifacts.pipelinerun.enable-deep-inspection` is set to `true`. Recommended for new Chains users who want the slsav1.0 spec. ### OCI Configuration | Key | Description | Supported Values | Default | | :---------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------- | :-------------- | | `artifacts.oci.format` | The format to store `OCI` payloads in. | `simplesigning` | `simplesigning` | -| `artifacts.oci.storage` | The storage backend to store `OCI` signatures in. Multiple backends can be specified with comma-separated list ("oci,tekton"). To disable the `OCI` artifact input an empty string (""). | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `oci` | +| `artifacts.oci.storage` | The storage backend to store `OCI` signatures in. Multiple backends can be specified with a comma-separated list (e.g. `"oci,tekton"`). An empty string disables OCI artifacts. | `tekton`, `oci`, `gcs`, `docdb`, `grafeas` | `oci` | | `artifacts.oci.signer` | The signature backend to sign `OCI` payloads with. | `x509`, `kms` | `x509` | ### KMS Configuration | Key | Description | Supported Values | Default | | :------------------- | :---------------------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------- | :------ | -| `signers.kms.kmsref` | The URI reference to a KMS service to use in `KMS` signers. | Supported schemes: `gcpkms://`, `awskms://`, `azurekms://`, `hashivault://`. See https://docs.sigstore.dev/cosign/kms_support for more details. | | +| `signers.kms.kmsref` | The URI reference to a KMS service for `KMS` signers. | Supported schemes: `gcpkms://`, `awskms://`, `azurekms://`, `hashivault://`. See [Sigstore KMS Support](https://docs.sigstore.dev/cosign/kms_support) for details. | | ### Storage Configuration -| Key | Description | Supported Values | Default | -|:-------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------| -| `storage.gcs.bucket` | The GCS bucket for storage | | | -| `storage.oci.repository` | The OCI repo to store OCI signatures and attestation in | If left undefined _and_ one of `artifacts.{oci,taskrun}.storage` includes `oci` storage, attestations will be stored alongside the stored OCI artifact itself. ([example on GCP](../images/attestations-in-artifact-registry.png)) Defining this value results in the OCI bundle stored in the designated location _instead of_ alongside the image. See [cosign documentation](https://github.com/sigstore/cosign#specifying-registry) for additional information. | | -| `storage.docdb.url` | The go-cloud URI reference to a docstore collection | `firestore://projects/[PROJECT]/databases/(default)/documents/[COLLECTION]?name_field=name` | | -| `storage.docdb.mongo-server-url` (optional) | The value of MONGO_SERVER_URL env var with the MongoDB connection URI | Example: `mongodb://[USER]:[PASSWORD]@[HOST]:[PORT]/[DATABASE]` | | -| `storage.docdb.mongo-server-url-dir` (optional) | The path of the directory that contains the file named MONGO_SERVER_URL that stores the value of MONGO_SERVER_URL env var | If the file `/mnt/mongo-creds-secret/MONGO_SERVER_URL` has the value of MONGO_SERVER_URL, then set `storage.docdb.mongo-server-url-dir: /mnt/mongo-creds-secret` | | -| `storage.docdb.mongo-server-url-path` (optional) | The path of the file that contains the value of mongo server url | If the file `/mnt/mongo-creds-secret/mongo-server-url` has the value, then set `storage.docdb.mongo-server-url-path: /mnt/mongo-creds-secret/mongo-server-url` | | -| `storage.grafeas.projectid` | The project of where grafeas server is located for storing occurrences | | | -| `storage.grafeas.noteid` (optional) | This field will be used as the prefix part of the note name that will be created. The value of this field must be a string without spaces. (See more details [below](#grafeas).) | | | -| `storage.grafeas.notehint` (optional) | This field is used to set the [human_readable_name](https://github.com/grafeas/grafeas/blob/cd23d4dc1bef740d6d6d90d5007db5c9a2431c41/proto/v1/attestation.proto#L49) field in the Grafeas ATTESTATION note. If it is not provided, the default `This attestation note was generated by Tekton Chains` will be used. | | | +| Key | Description | Supported Values | Default | +| :----------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------- | +| `storage.gcs.bucket` | The GCS bucket for storage. | | | +| `storage.oci.repository` | The OCI repository to store OCI signatures and attestations in. | If undefined and one of `artifacts.{oci,taskrun}.storage` includes OCI storage, attestations will be stored alongside the OCI artifact. Defining this value results in the OCI bundle stored in the designated location instead of alongside the image. See [cosign documentation](https://github.com/sigstore/cosign#specifying-registry) for details. | | +| `storage.docdb.url` | The go-cloud URI reference to a docstore collection. | `firestore://projects/[PROJECT]/databases/(default)/documents/[COLLECTION]?name_field=name` | | +| `storage.docdb.mongo-server-url` (optional) | The MongoDB connection URI, equivalent to the `MONGO_SERVER_URL` environment variable. | Example: `mongodb://[USER]:[PASSWORD]@[HOST]:[PORT]/[DATABASE]` | | +| `storage.docdb.mongo-server-url-dir` (optional) | The directory containing a file named `MONGO_SERVER_URL` with the MongoDB connection URI. | If the file `/mnt/mongo-creds-secret/MONGO_SERVER_URL` contains the MongoDB URL, set this to `/mnt/mongo-creds-secret`. | | +| `storage.docdb.mongo-server-url-path` (optional) | The file path that contains the MongoDB connection URI. | For example, if `/mnt/mongo-creds-secret/mongo-server-url` contains the MongoDB URL, set this to `/mnt/mongo-creds-secret/mongo-server-url`. | | +| `storage.grafeas.projectid` | The project where the Grafeas server is located for storing occurrences. | | | +| `storage.grafeas.noteid` (optional) | The prefix for the note name used when creating a Grafeas note. Must be a string without spaces. | | | +| `storage.grafeas.notehint` (optional)| Sets the `human_readable_name` in the Grafeas ATTESTATION note. If not provided, defaults to `This attestation note was generated by Tekton Chains`. | | | +| `storage.archivista.url` | The URL endpoint for the Archivista service. | A valid HTTPS URL pointing to your Archivista instance (e.g. `https://archivista.testifysec.io`). | None | #### docstore -You can read about the go-cloud docstore URI format [here](https://gocloud.dev/howto/docstore/). Tekton Chains supports the following docstore services: - +For details on the go-cloud docstore URI format, see [Go Cloud Docstore](https://gocloud.dev/howto/docstore/). Chains supports the following docstore services: - `firestore` - `dynamodb` - `mongo` #### MongoDB -You can provide MongoDB connection through different options +You can provide a MongoDB connection via multiple options: -* Using MONGO_SERVER_URL Environment Variable - * User can set the MongoDB connection URL in the MONGO_SERVER_URL env var in the Chains deployment +- **Environment Variable:** + Set the connection URL in the `MONGO_SERVER_URL` environment variable in the Chains deployment. -* Using `storage.docdb.mongo-server-url` field in the chains-config configmap - * Alternatively, you can set the connection URL using the `storage.docdb.mongo-server-url` field in the chains-config configmap - * This field overrides the MONGO_SERVER_URL env var +- **ConfigMap Field (`storage.docdb.mongo-server-url`):** + Alternatively, set the connection URL in the `chains-config` ConfigMap. This field overrides the `MONGO_SERVER_URL` environment variable. -* Using `storage.docdb.mongo-server-url-dir` field - * Another option is to set `storage.docdb.mongo-server-url-dir`, which points to a directory containing a file named `MONGO_SERVER_URL` - * The directory path setting takes precedence over both `storage.docdb.mongo-server-url` and the `MONGO_SERVER_URL` env var - * For instance, if `/mnt/mongo-creds-secret/MONGO_SERVER_URL` contains the MongoDB URL, set `storage.docdb.mongo-server-url-dir`: `/mnt/mongo-creds-secret` +- **Directory Field (`storage.docdb.mongo-server-url-dir`):** + Set this to the directory containing a file named `MONGO_SERVER_URL`. This takes precedence over both the ConfigMap field and the environment variable. -* Using `storage.docdb.mongo-server-url-path` field - * You can use `storage.docdb.mongo-server-url-path` field in chains-config configmap to directly reference the file containing the MongoDB URL - * This field overrides all others (`mongo-server-url-dir, mongo-server-url, and MONGO_SERVER_URL env var`) - * For instance, if `/mnt/mongo-creds-secret/mongo-server-url` contains the MongoDB URL, then set `storage.docdb.mongo-server-url-path`: `/mnt/mongo-creds-secret/mongo-server-url` +- **File Path Field (`storage.docdb.mongo-server-url-path`):** + Directly reference the file containing the MongoDB URL. This field overrides all others. -**NOTE** :- -* When using `storage.docdb.mongo-server-url-dir` or `storage.docdb.mongo-server-url-path` field, store the value of mongo server url in a secret and mount the secret. When the secret is updated, the new value will be fetched by Tekton Chains controller -* Also using `storage.docdb.mongo-server-url-dir` or `storage.docdb.mongo-server-url-path` field are recommended, using `storage.docdb.mongo-server-url` should be avoided since credentials are stored in a ConfigMap instead of a secret +**NOTE:** +- When using the directory or file path fields, store the MongoDB URL in a secret and mount the secret so that Chains can pick up updates automatically. +- It is recommended to use `storage.docdb.mongo-server-url-dir` or `storage.docdb.mongo-server-url-path` rather than `storage.docdb.mongo-server-url` to avoid storing credentials in a ConfigMap. #### Grafeas -You can read more about Grafeas notes and occurrences [here](https://github.com/grafeas/grafeas/blob/master/docs/grafeas_concepts.md). To create occurrences, we have to create notes first that are used to link occurrences. Two types of occurrences will be created: `ATTESTATION` Occurrence and `BUILD` Occrrence. The configurable `noteid` is used as the prefix of the note name. Under the hood, the suffix `-simplesigning` will be appended for the `ATTESTATION` note, and the suffix `-intoto` will be appended for the `BUILD` note. If the `noteid` field is not configured, `tekton-` will be used as the prefix. +For more information on Grafeas notes and occurrences, see [Grafeas Concepts](https://github.com/grafeas/grafeas/blob/master/docs/grafeas_concepts.md). To create occurrences, a note must first be created. Two types of occurrences are created: +- `ATTESTATION` Occurrence (note suffix: `-simplesigning`) +- `BUILD` Occurrence (note suffix: `-intoto`) +If `storage.grafeas.noteid` is not set, the prefix `tekton-` will be used. ### In-toto Configuration | Key | Description | Supported Values | Default | | :-------------------------- | :--------------------------------------------- | :------------------------------------------------------------------------------ | :---------------------------------- | -| `builder.id` | The builder ID to set for in-toto attestations | | `https://tekton.dev/chains/v2` | -| `builddefinition.buildtype` | The buildType for in-toto attestations | `https://tekton.dev/chains/v2/slsa`, `https://tekton.dev/chains/v2/slsa-tekton` | `https://tekton.dev/chains/v2/slsa` | +| `builder.id` | The builder ID for in-toto attestations. | | `https://tekton.dev/chains/v2` | +| `builddefinition.buildtype` | The buildType for in-toto attestations. | `https://tekton.dev/chains/v2/slsa`, `https://tekton.dev/chains/v2/slsa-tekton` | `https://tekton.dev/chains/v2/slsa` | -> NOTE: -> Considerations for the builddefinition.buildtype parameter: -> -> - It is only valid for `slsa/v2alpha3` configurations (see TaskRun or PipelineRun configuration). -> - The parameter can take one of two values: -> - `https://tekton.dev/chains/v2/slsa`: This buildType strictly conforms to the slsav1.0 spec. -> - `https://tekton.dev/chains/v2/slsa-tekton`: This buildType also conforms to the slsav1.0 spec, but adds additional informaton specific to Tekton. This information includes the PipelinRun/TaskRun labels and annotations as internalParameters. It also includes capturing each pipeline task in a PipelinRun under resolvedDependencies. +> **NOTE:** +> - `builddefinition.buildtype` is valid for `slsa/v2alpha3` configurations only. +> - Use `https://tekton.dev/chains/v2/slsa` for strict slsav1.0 compliance. +> - Use `https://tekton.dev/chains/v2/slsa-tekton` for slsav1.0 with additional Tekton-specific details. ### Sigstore Features Configuration @@ -132,10 +125,10 @@ You can read more about Grafeas notes and occurrences [here](https://github.com/ | Key | Description | Supported Values | Default | | :--------------------- | :----------------------------------------------------------------- | :------------------------ | :--------------------------- | -| `transparency.enabled` | Whether to enable automatic binary transparency uploads. | `true`, `false`, `manual` | `false` | -| `transparency.url` | The URL to upload binary transparency attestations to, if enabled. | | `https://rekor.sigstore.dev` | +| `transparency.enabled` | Whether to enable automatic binary transparency uploads. | `true`, `false`, `manual` | `false` | +| `transparency.url` | The URL to upload binary transparency attestations to, if enabled. | | `https://rekor.sigstore.dev` | -**Note**: If `transparency.enabled` is set to `manual`, then only `TaskRuns` and `PipelineRuns` with the following annotation will be uploaded to the transparency log: +**Note:** If `transparency.enabled` is set to `manual`, only TaskRuns and PipelineRuns with the annotation below will be uploaded to the transparency log: ```yaml chains.tekton.dev/transparency-upload: "true" @@ -145,47 +138,50 @@ chains.tekton.dev/transparency-upload: "true" | Key | Description | Supported Values | Default | | :--------------------------------- | :------------------------------------------------------------ | :----------------------------------------- | :------------------------------------------------- | -| `signers.x509.fulcio.enabled` | Whether to enable automatic certificates from fulcio. | `true`, `false` | `false` | -| `signers.x509.fulcio.address` | Fulcio address to request certificate from, if enabled | | `https://fulcio.sigstore.dev` | -| `signers.x509.fulcio.issuer` | Expected OIDC issuer. | | `https://oauth2.sigstore.dev/auth` | -| `signers.x509.fulcio.provider` | Provider to request ID Token from | `google`, `spiffe`, `github`, `filesystem` | Unset, each provider will be attempted. | -| `signers.x509.identity.token.file` | Path to file containing ID Token. | | -| `signers.x509.tuf.mirror.url` | TUF server URL. $TUF_URL/root.json is expected to be present. | | `https://sigstore-tuf-root.storage.googleapis.com` | +| `signers.x509.fulcio.enabled` | Enable automatic certificates from Fulcio. | `true`, `false` | `false` | +| `signers.x509.fulcio.address` | Fulcio address for certificate requests. | | `https://fulcio.sigstore.dev` | +| `signers.x509.fulcio.issuer` | Expected OIDC issuer. | | `https://oauth2.sigstore.dev/auth` | +| `signers.x509.fulcio.provider` | Provider for ID Token requests. | `google`, `spiffe`, `github`, `filesystem` | Unset (each provider will be attempted). | +| `signers.x509.identity.token.file` | Path to file containing an ID Token. | | | +| `signers.x509.tuf.mirror.url` | TUF server URL; expects `$TUF_URL/root.json` to be present. | | `https://sigstore-tuf-root.storage.googleapis.com` | #### KMS OIDC and Spire Configuration | Key | Description | Supported Values | Default | | :-------------------------------- | :------------------------------------------------------------------------------------------ | :--------------- | :------ | -| `signers.kms.auth.address` | URI of KMS server (e.g. the value of `VAULT_ADDR`) | | -| `signers.kms.auth.token` | Auth token KMS server (e.g. the value of `VAULT_TOKEN`) | | -| `signers.kms.auth.token-path` | Path to store KMS server Auth token (e.g. `/etc/kms-secrets`) | | -| `signers.kms.auth.oidc.path` | Path used for OIDC authentication (e.g. `jwt` for Vault) | | -| `signers.kms.auth.oidc.role` | Role used for OIDC authentication | | -| `signers.kms.auth.spire.sock` | URI of the Spire socket used for KMS token (e.g. `unix:///tmp/spire-agent/public/api.sock`) | | -| `signers.kms.auth.spire.audience` | Audience for requesting a SVID from Spire | | - -> NOTE: -> -> If `signers.kms.auth.token-path` is set, create a secret and ensure the Chains deployment mounts this secret to -> the path specified by `signers.kms.auth.token-path`. - -> [!IMPORTANT] -> To project the latest token values without needing to recreate the pod, avoid using `subPath` in volume mount. +| `signers.kms.auth.address` | URI of the KMS server (e.g. `VAULT_ADDR`). | | | +| `signers.kms.auth.token` | Authentication token for the KMS server (e.g. `VAULT_TOKEN`). | | | +| `signers.kms.auth.token-path` | File path to store the KMS server Auth token (e.g. `/etc/kms-secrets`). | | | +| `signers.kms.auth.oidc.path` | Path used for OIDC authentication (e.g. `jwt` for Vault). | | | +| `signers.kms.auth.oidc.role` | Role used for OIDC authentication. | | | +| `signers.kms.auth.spire.sock` | URI of the Spire socket for KMS token (e.g. `unix:///tmp/spire-agent/public/api.sock`). | | | +| `signers.kms.auth.spire.audience` | Audience for requesting a SVID from Spire. | | | + +> **NOTE:** +> - If `signers.kms.auth.token-path` is set, create a secret and mount it to the specified path. +> - To project updated token values without recreating pods, avoid using `subPath` in volume mounts. ### Visual Guide: ConfigMap Configuration Options -Refer the diagram below to explore the pictorial representation of signing and storage configuration options, and their usage in the context of chains artifacts. + +Refer to the diagram below to see a pictorial representation of signing and storage configuration options and how they relate to Chains artifacts. ![ConfigMap Configuration Diagram](../images/signing-storage-config-diagram.drawio.svg) ## Namespaces Restrictions in Chains Controller -This feature allows you to specify a list of namespaces for the controller to monitor, providing granular control over its operation. If no namespaces are specified, the controller defaults to monitoring all namespaces. + +Chains can be configured to monitor specific namespaces. If no namespaces are specified, the controller monitors all namespaces. ### Usage -To restrict the Chains Controller to specific namespaces, pass a comma-separated list of namespaces as an argument to the controller using the --namespace flag. + +Pass a comma-separated list of namespaces to the controller using the `--namespace` flag. ### Example -To restrict the controller to the dev and test namespaces, you would start the controller with the following argument: + +To restrict the controller to the `dev` and `test` namespaces, start the controller with: + ```shell --namespace=dev,test ``` -In this example, the controller will only monitor resources (pipelinesruns and taskruns) within the dev and test namespaces. + +In this example, the controller will only monitor TaskRuns and PipelineRuns in the `dev` and `test` namespaces. +``` \ No newline at end of file diff --git a/go.mod b/go.mod index b363e6999a..dd6f078de0 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,17 @@ go 1.23.4 toolchain go1.23.6 require ( +<<<<<<< HEAD cloud.google.com/go/compute/metadata v0.6.0 cloud.google.com/go/storage v1.50.0 +======= + cloud.google.com/go/compute/metadata v0.5.2 + cloud.google.com/go/storage v1.48.0 +<<<<<<< HEAD + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc +>>>>>>> 3b9297a78 ([WIP] uploads to archivista) +======= +>>>>>>> 06c2f6a79 ([WIP] add archivista storage backend) github.com/fsnotify/fsnotify v1.8.0 github.com/golangci/golangci-lint v1.63.4 github.com/google/addlicense v1.1.1 @@ -16,7 +25,9 @@ require ( github.com/google/go-licenses v1.6.0 github.com/grafeas/grafeas v0.2.3 github.com/hashicorp/go-multierror v1.1.1 - github.com/in-toto/attestation v1.1.1 + github.com/in-toto/archivista v0.5.4 + github.com/in-toto/attestation v1.1.0 + github.com/in-toto/go-witness v0.7.0 github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 github.com/opencontainers/go-digest v1.0.0 github.com/pkg/errors v0.9.1 @@ -37,14 +48,14 @@ require ( gocloud.dev v0.40.0 gocloud.dev/docstore/mongodocstore v0.40.0 gocloud.dev/pubsub/kafkapubsub v0.40.0 - golang.org/x/crypto v0.33.0 + golang.org/x/crypto v0.32.0 golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f google.golang.org/grpc v1.70.0 google.golang.org/protobuf v1.36.4 - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 - k8s.io/code-generator v0.32.1 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 sigs.k8s.io/yaml v1.4.0 ) @@ -183,6 +194,7 @@ require ( github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/envoyproxy/go-control-plane v0.13.1 // indirect @@ -441,10 +453,10 @@ require ( golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.34.0 // indirect golang.org/x/oauth2 v0.26.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.29.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect diff --git a/go.sum b/go.sum index bed5f65c23..72862c71cb 100644 --- a/go.sum +++ b/go.sum @@ -463,6 +463,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4A github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d h1:4l+Uq5zFWSagXgGFaKRRVWJrnlzeathyagWgYUltCgY= +github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d/go.mod h1:WxWwA3EYuCQjlR5EBUX3uaTS8bh9BOa7BcqVREHQ0uQ= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= @@ -863,8 +865,12 @@ github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= -github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= +github.com/in-toto/archivista v0.8.0 h1:l8zb28HdKCuzEWWGtOksRCoeibOuyh7DG/1ao6O7/TE= +github.com/in-toto/archivista v0.8.0/go.mod h1:uXFghXRS0PTLeJXsnhnABc40ruDLzjhMSAg0zLgLzGo= +github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= +github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= +github.com/in-toto/go-witness v0.7.0 h1:I48FUCLfyos0uCSlHJoqCJO6HjtxF2f/y65TQVpxd8k= +github.com/in-toto/go-witness v0.7.0/go.mod h1:WZQY96yHqPPYkRcQU7dXl0d3saMKAg9DepWbUVL586E= github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 h1:cwCITdi9pF50CF8uh40qDbkJ/VrEVzx5AoaHP7OPdEo= github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09/go.mod h1:yGCBn2JKF1m26FX8GmkcLSOFVjB6khWRxFsHwWIg7hw= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -1506,8 +1512,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1668,8 +1674,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1765,8 +1771,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1780,8 +1786,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1800,8 +1806,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2137,16 +2143,16 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/code-generator v0.32.1 h1:4lw1kFNDuFYXquTkB7Sl5EwPMUP2yyW9hh6BnFfRZFY= -k8s.io/code-generator v0.32.1/go.mod h1:zaILfm00CVyP/6/pJMJ3zxRepXkxyDfUV5SNG4CjZI4= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU= +k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= diff --git a/pkg/chains/storage/archivista/archivista.go b/pkg/chains/storage/archivista/archivista.go new file mode 100644 index 0000000000..86abefaa08 --- /dev/null +++ b/pkg/chains/storage/archivista/archivista.go @@ -0,0 +1,217 @@ +package archivista + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "net/http" + "strings" + + archivistaClient "github.com/in-toto/archivista/pkg/http-client" + "github.com/in-toto/go-witness/cryptoutil" + "github.com/in-toto/go-witness/dsse" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/config" + tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" // if needed + tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/logging" +) + +const ( + StorageBackendArchivista = "archivista" +) + +// generatePublicKeyIDFunc is a package-level variable wrapping the public key ID generation. +// It allows tests to simulate errors. +var generatePublicKeyIDFunc = cryptoutil.GeneratePublicKeyID + +// buildEnvelope constructs a DSSE envelope from the raw payload, signature, keyID, and certificate chain. +// If a valid chain is provided, it parses it into a leaf and intermediates; otherwise, certificate data is omitted. +func buildEnvelope(rawPayload []byte, signature, keyID string, chain string) dsse.Envelope { + var leaf []byte + var inters [][]byte + + chain = strings.TrimSpace(chain) + if chain != "" { + var err error + leaf, inters, err = parseAndOrderCertificateChain(chain) + if err != nil { + // Log error if needed and fall back to no certificate data. + leaf = nil + inters = [][]byte{} + } + } + return dsse.Envelope{ + Payload: rawPayload, + PayloadType: "application/vnd.in-toto+json", + Signatures: []dsse.Signature{ + { + KeyID: keyID, + Signature: []byte(signature), + Certificate: leaf, + Intermediates: inters, + }, + }, + } +} + +// Backend is the interface that all storage backends must implement. +type Backend interface { + StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error + RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) + RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) + Type() string +} + +// ArchivistaStorage implements the Backend interface for Archivista. +type ArchivistaStorage struct { + client *archivistaClient.ArchivistaClient + url string + cfg config.ArchivistaStorageConfig + tektonClient tektonclient.Interface // Injected Tekton client for patching objects +} + +// NewArchivistaStorage initializes a new ArchivistaStorage backend. +func NewArchivistaStorage(cfg config.Config, tektonClient tektonclient.Interface) (*ArchivistaStorage, error) { + archCfg := cfg.Storage.Archivista + if strings.TrimSpace(archCfg.URL) == "" { + return nil, fmt.Errorf("missing archivista URL in storage configuration") + } + + client, err := archivistaClient.CreateArchivistaClient(&http.Client{}, archCfg.URL) + if err != nil { + return nil, fmt.Errorf("failed to create Archivista client: %w", err) + } + + return &ArchivistaStorage{ + client: client, + url: archCfg.URL, + cfg: archCfg, + tektonClient: tektonClient, + }, nil +} + +// patchTektonObjectAnnotations patches the Tekton object's annotations with the given key/value pairs +// in one single patch call. +func PatchTektonObjectAnnotations(ctx context.Context, obj objects.TektonObject, annotations map[string]string, tektonClient tektonclient.Interface) error { + patchData := map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": annotations, + }, + } + patchBytes, err := json.Marshal(patchData) + if err != nil { + return fmt.Errorf("failed to marshal patch data: %w", err) + } + + switch o := obj.GetObject().(type) { + case *tektonv1.TaskRun: + _, err = tektonClient.TektonV1().TaskRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + case *tektonv1.PipelineRun: + _, err = tektonClient.TektonV1().PipelineRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + case *v1beta1.TaskRun: + _, err = tektonClient.TektonV1beta1().TaskRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + case *v1beta1.PipelineRun: + _, err = tektonClient.TektonV1beta1().PipelineRuns(o.Namespace).Patch(ctx, o.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + default: + return fmt.Errorf("unsupported Tekton object type for patching") + } +} + +// StorePayload builds a DSSE envelope from the raw payload and signature, +// logs the envelope, uploads it via the Archivista client API, and patches the +// Tekton object with the returned gitoid and Archivista URL. +func (a *ArchivistaStorage) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { + logger := logging.FromContext(ctx) + + // Validate signature. + if strings.TrimSpace(signature) == "" { + return fmt.Errorf("missing signature") + } + + var keyID string + certPEM := strings.TrimSpace(opts.Cert) + if certPEM != "" { + block, _ := pem.Decode([]byte(certPEM)) + if block != nil { + cert, err := x509.ParseCertificate(block.Bytes) + if err == nil { + // Generate keyID from the public key. + keyID, err = generatePublicKeyIDFunc(cert.PublicKey, crypto.SHA256) + if err != nil { + logger.Errorw("Failed to generate KeyID", "error", err) + keyID = "" + } + } else { + logger.Errorw("Failed to parse certificate", "error", err) + } + } else { + logger.Error("Failed to decode certificate PEM") + } + } // if no certificate provided, keyID remains blank + + // Optionally decode the payload for logging. + decodedPayload, err := base64.StdEncoding.DecodeString(string(rawPayload)) + if err != nil { + logger.Errorw("Failed to base64 decode payload", "keyID", keyID, "error", err) + logger.Infof("Raw payload (not base64 decoded): %s", string(rawPayload)) + } else { + logger.Infof("Decoded payload: %s", string(decodedPayload)) + } + + env := buildEnvelope(rawPayload, signature, keyID, opts.Chain) + + // Upload the envelope using the Archivista client's Store method. + uploadResp, err := a.client.Store(ctx, env) + if err != nil { + logger.Errorw("Failed to upload DSSE envelope to Archivista", "error", err) + return fmt.Errorf("failed to upload envelope to Archivista: %w", err) + } + logger.Infof("Successfully uploaded DSSE envelope to Archivista, response: %+v", uploadResp) + + // Update the in-memory Tekton object with Archivista annotations. + annotations := map[string]string{ + "chains.tekton.dev/archivista-gitoid": uploadResp.Gitoid, + "chains.tekton.dev/archivista-url": a.url, + } + obj.SetAnnotations(annotations) + + // Patch the live Tekton object in one call. + if err := PatchTektonObjectAnnotations(ctx, obj, annotations, a.tektonClient); err != nil { + logger.Errorw("Failed to patch Tekton object with Archivista annotations", "error", err) + return fmt.Errorf("failed to patch Tekton object: %w", err) + } + + return nil +} + +// RetrievePayload is not implemented for Archivista. +func (a *ArchivistaStorage) RetrievePayload(ctx context.Context, key string) ([]byte, []byte, error) { + return nil, nil, fmt.Errorf("RetrievePayload not implemented for Archivista") +} + +// RetrievePayloads is not implemented for Archivista. +func (a *ArchivistaStorage) RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { + return nil, fmt.Errorf("RetrievePayloads not implemented for Archivista") +} + +// RetrieveSignatures is not implemented for Archivista. +func (a *ArchivistaStorage) RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { + return nil, fmt.Errorf("RetrieveSignatures not implemented for Archivista") +} + +// Type returns the storage backend type. +func (a *ArchivistaStorage) Type() string { + return StorageBackendArchivista +} diff --git a/pkg/chains/storage/archivista/archivista_test.go b/pkg/chains/storage/archivista/archivista_test.go new file mode 100644 index 0000000000..c96ed29e13 --- /dev/null +++ b/pkg/chains/storage/archivista/archivista_test.go @@ -0,0 +1,324 @@ +package archivista + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + archivistaClient "github.com/in-toto/archivista/pkg/http-client" + "github.com/in-toto/go-witness/dsse" + "github.com/stretchr/testify/assert" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/config" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + fakePipelineClient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// -------------------------- +// Helper: setupEnv +// -------------------------- + +// setupEnv creates a fresh ArchivistaStorage test environment using a given TaskRun name. +func setupEnv(taskRunName string, cfg config.Config, archClient *archivistaClient.ArchivistaClient) (*ArchivistaStorage, objects.TektonObject, *fakePipelineClient.Clientset) { + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: taskRunName, + Namespace: "default", + }, + } + obj := objects.NewTaskRunObjectV1Beta1(tr) + fakeClient := fakePipelineClient.NewSimpleClientset(tr) + aStorage, err := NewArchivistaStorage(cfg, fakeClient) + if err != nil { + panic("failed to initialize ArchivistaStorage: " + err.Error()) + } + // Override the Archivista client with the provided one. + aStorage.client = archClient + return aStorage, obj, fakeClient +} + +// -------------------------- +// StorePayload Tests +// -------------------------- + +// TestStorePayload_TaskRun tests the basic success path of StorePayload without certificate data. +func TestStorePayload_TaskRun(t *testing.T) { + ctx := context.Background() + + // Create a v1beta1.TaskRun with minimal metadata and a dummy result. + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun", + Namespace: "default", + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskRunResults: []v1beta1.TaskRunResult{ + { + Name: "IMAGE_URL", + Value: *v1beta1.NewStructuredValues("mockImage"), + }, + }, + }, + }, + } + fakeClient := fakePipelineClient.NewSimpleClientset(tr) + + // Set up an httptest server to simulate Archivista. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/upload" { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"gitoid": "fake-gitoid"}`)) + return + } + http.NotFound(w, r) + })) + defer ts.Close() + + httpClient := &http.Client{} + cfg := config.Config{ + Storage: config.StorageConfigs{ + Archivista: config.ArchivistaStorageConfig{ + URL: ts.URL, + }, + }, + } + archClient, err := archivistaClient.CreateArchivistaClient(httpClient, cfg.Storage.Archivista.URL) + if err != nil { + t.Fatalf("failed to create Archivista client: %v", err) + } + + aStorage, obj, fakeClient := setupEnv("test-taskrun", cfg, archClient) + + // Prepare a valid payload. + type mockPayload struct { + A string `json:"a"` + B int `json:"b"` + } + payload := mockPayload{ + A: "foo", + B: 3, + } + payloadBytes, err := json.Marshal(payload) + assert.NoError(t, err, "should marshal payload") + encodedPayload := base64.StdEncoding.EncodeToString(payloadBytes) + signature := "test-signature" + opts := config.StorageOpts{ + ShortKey: "mockpayload", + Cert: "", + Chain: "", + } + + // Call StorePayload. + err = aStorage.StorePayload(ctx, obj, []byte(encodedPayload), signature, opts) + assert.NoError(t, err, "StorePayload should succeed") + + // Retrieve the updated TaskRun. + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "test-taskrun", metav1.GetOptions{}) + assert.NoError(t, err, "should retrieve updated TaskRun") + assert.Equal(t, "fake-gitoid", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + assert.Equal(t, ts.URL, updated.Annotations["chains.tekton.dev/archivista-url"]) +} + +// TestStorePayload_ErrorCases exercises error branches in StorePayload. +func TestStorePayload_ErrorCases(t *testing.T) { + ctx := context.Background() + + // Setup a common httptest server and configuration. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"gitoid": "fake-gitoid"}`)) + })) + defer ts.Close() + + httpClient := &http.Client{} + cfg := config.Config{ + Storage: config.StorageConfigs{ + Archivista: config.ArchivistaStorageConfig{ + URL: ts.URL, + }, + }, + } + archClient, err := archivistaClient.CreateArchivistaClient(httpClient, cfg.Storage.Archivista.URL) + if err != nil { + t.Fatalf("failed to create Archivista client: %v", err) + } + + setup := func(name string) (*ArchivistaStorage, objects.TektonObject, *fakePipelineClient.Clientset) { + return setupEnv(name, cfg, archClient) + } + + t.Run("missing signature", func(t *testing.T) { + aStorage, obj, _ := setup("missing-sig") + err := aStorage.StorePayload(ctx, obj, []byte("dummy"), "", config.StorageOpts{}) + if err == nil || err.Error() != "missing signature" { + t.Errorf("expected missing signature error, got: %v", err) + } + }) + + t.Run("invalid certificate PEM decode", func(t *testing.T) { + aStorage, obj, fakeClient := setup("invalid-cert-decode") + opts := config.StorageOpts{ + Cert: "invalid pem", + } + payload := base64.StdEncoding.EncodeToString([]byte("dummy")) + err := aStorage.StorePayload(ctx, obj, []byte(payload), "sig", opts) + if err != nil { + t.Errorf("expected success even with invalid cert PEM, got error: %v", err) + } + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "invalid-cert-decode", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated TaskRun: %v", err) + } + if updated.Annotations["chains.tekton.dev/archivista-gitoid"] != "fake-gitoid" { + t.Errorf("unexpected gitoid: %s", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + } + }) + + t.Run("certificate PEM parse failure", func(t *testing.T) { + aStorage, obj, fakeClient := setup("cert-parse-failure") + opts := config.StorageOpts{ + Cert: "-----BEGIN CERTIFICATE-----\nnotbase64\n-----END CERTIFICATE-----", + } + payload := base64.StdEncoding.EncodeToString([]byte("dummy")) + err := aStorage.StorePayload(ctx, obj, []byte(payload), "sig", opts) + if err != nil { + t.Errorf("expected success even if certificate fails parsing, got error: %v", err) + } + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "cert-parse-failure", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated TaskRun: %v", err) + } + if updated.Annotations["chains.tekton.dev/archivista-gitoid"] != "fake-gitoid" { + t.Errorf("unexpected gitoid: %s", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + } + }) + + t.Run("payload base64 decode error", func(t *testing.T) { + aStorage, obj, fakeClient := setup("payload-decode-error") + // Provide rawPayload that is not valid base64. + err := aStorage.StorePayload(ctx, obj, []byte("not-base64"), "sig", config.StorageOpts{}) + if err != nil { + t.Errorf("expected success even if base64 decode fails, got error: %v", err) + } + updated, err := fakeClient.TektonV1beta1().TaskRuns("default").Get(ctx, "payload-decode-error", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated TaskRun: %v", err) + } + if updated.Annotations["chains.tekton.dev/archivista-gitoid"] != "fake-gitoid" { + t.Errorf("unexpected gitoid: %s", updated.Annotations["chains.tekton.dev/archivista-gitoid"]) + } + }) +} + +// TestStorePayload_CertificateSuccess_WithRecordingServer tests the certificate branch. +// It uses an httptest.Server to record the outgoing DSSE envelope. +func TestStorePayload_CertificateSuccess_WithRecordingServer(t *testing.T) { + ctx := context.Background() + + // Generate a valid certificate. + validCertPEM, _, _ := createCertificate(t, "dummy", "dummy", 123, time.Now(), nil, nil) + + // Override keyID generation to return "test-key-id". + origFunc := generatePublicKeyIDFunc + generatePublicKeyIDFunc = func(pub interface{}, hash crypto.Hash) (string, error) { + return "test-key-id", nil + } + defer func() { generatePublicKeyIDFunc = origFunc }() + + // Create an httptest.Server that records the DSSE envelope. + var recordedBody []byte + recServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "failed to read body", http.StatusInternalServerError) + return + } + recordedBody = body + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"gitoid": "fake-gitoid"}`)) + })) + defer recServer.Close() + + archClient, err := archivistaClient.CreateArchivistaClient(&http.Client{}, recServer.URL) + if err != nil { + t.Fatalf("failed to create Archivista client: %v", err) + } + + // Create a minimal TaskRun and wrap it. + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cert-success", + Namespace: "default", + }, + } + tektonObj := objects.NewTaskRunObjectV1Beta1(tr) + + // Build configuration using the recording server's URL. + cfg := config.Config{ + Storage: config.StorageConfigs{ + Archivista: config.ArchivistaStorageConfig{ + URL: recServer.URL, + }, + }, + } + + aStorage, _, _ := setupEnv("cert-success", cfg, archClient) + + // Prepare a valid payload. + payload := []byte("dummy payload") + encodedPayload := base64.StdEncoding.EncodeToString(payload) + signature := "dummy-signature" + opts := config.StorageOpts{ + ShortKey: "dummy", + Cert: validCertPEM, + Chain: "", + } + + // Call StorePayload. + err = aStorage.StorePayload(ctx, tektonObj, []byte(encodedPayload), signature, opts) + if err != nil { + t.Fatalf("StorePayload failed: %v", err) + } + + // Unmarshal the recorded DSSE envelope. + var env dsse.Envelope + if err := json.Unmarshal(recordedBody, &env); err != nil { + t.Fatalf("failed to unmarshal recorded envelope: %v", err) + } + + // Verify the signature's KeyID. + if len(env.Signatures) == 0 { + t.Fatal("expected at least one signature in envelope") + } + if env.Signatures[0].KeyID != "test-key-id" { + t.Errorf("expected keyID 'test-key-id', got %q", env.Signatures[0].KeyID) + } +} + +func TestBuildEnvelope_FallbackOnInvalidChain(t *testing.T) { + // Prepare inputs. + rawPayload := []byte("dummy") + signature := "dummy-sig" + keyID := "dummy-key" + // Provide a non-empty chain that cannot be parsed as valid certificates. + invalidChain := "invalid chain" + + // Call buildEnvelope. + env := buildEnvelope(rawPayload, signature, keyID, invalidChain) + + // Expect that no certificate data was included. + if env.Signatures[0].Certificate != nil { + t.Errorf("expected certificate to be nil, got %v", env.Signatures[0].Certificate) + } + if len(env.Signatures[0].Intermediates) != 0 { + t.Errorf("expected intermediates to be empty, got %v", env.Signatures[0].Intermediates) + } +} diff --git a/pkg/chains/storage/archivista/cert.go b/pkg/chains/storage/archivista/cert.go new file mode 100644 index 0000000000..a5969d1a56 --- /dev/null +++ b/pkg/chains/storage/archivista/cert.go @@ -0,0 +1,120 @@ +package archivista + +import ( + "crypto/x509" + "encoding/pem" + "fmt" +) + +// parseAndOrderCertificateChain parses a PEM-encoded certificate chain string, +// validates each "CERTIFICATE" PEM block, and orders them so that the leaf certificate is first, +// followed by intermediates. If no intermediates are present, it simply returns the single certificate. +func parseAndOrderCertificateChain(chain string) (leafCert []byte, intermediates [][]byte, err error) { + if chain == "" { + return nil, nil, fmt.Errorf("empty certificate chain") + } + + data := []byte(chain) + type parsedCert struct { + cert *x509.Certificate + pem []byte + } + var certs []parsedCert + + // Parse all PEM blocks of type "CERTIFICATE" + for { + var block *pem.Block + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" { + continue + } + parsed, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse certificate: %w", err) + } + certs = append(certs, parsedCert{ + cert: parsed, + pem: pem.EncodeToMemory(block), + }) + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no valid certificates found in chain") + } + + // If only one certificate is found, return it as the leaf. + if len(certs) == 1 { + return certs[0].pem, nil, nil + } + + // Identify leaf candidates: + // Prefer certificates that are not self-signed and whose Subject isn't used as an Issuer in any other cert. + var leafCandidates []parsedCert + for i, pc := range certs { + selfSigned := pc.cert.Subject.String() == pc.cert.Issuer.String() + if !selfSigned { + used := false + for j, other := range certs { + if i == j { + continue + } + if other.cert.Issuer.String() == pc.cert.Subject.String() { + used = true + break + } + } + if !used { + leafCandidates = append(leafCandidates, pc) + } + } + } + // If no non-self-signed candidate, fall back to self-signed ones. + if len(leafCandidates) == 0 { + leafCandidates = certs + } + + // Choose the best leaf candidate: if multiple, select the one with the most recent NotBefore date. + leaf := leafCandidates[0] + for _, candidate := range leafCandidates[1:] { + if candidate.cert.NotBefore.After(leaf.cert.NotBefore) { + leaf = candidate + } + } + + // Build a map for quick lookup (subject => parsedCert). + subjectMap := make(map[string]parsedCert) + for _, pc := range certs { + subjectMap[pc.cert.Subject.String()] = pc + } + + // Order the chain starting from the leaf. + ordered := []parsedCert{leaf} + used := map[string]bool{leaf.cert.SerialNumber.String(): true} + current := leaf + for { + next, found := subjectMap[current.cert.Issuer.String()] + if !found || used[next.cert.SerialNumber.String()] { + break + } + ordered = append(ordered, next) + used[next.cert.SerialNumber.String()] = true + current = next + } + + leafCert = ordered[0].pem + // The intermediates are any ordered certs after the leaf. + for i := 1; i < len(ordered); i++ { + intermediates = append(intermediates, ordered[i].pem) + } + // Append any extra certificates not included in the ordering. + for _, pc := range certs { + if !used[pc.cert.SerialNumber.String()] { + intermediates = append(intermediates, pc.pem) + } + } + + return leafCert, intermediates, nil +} diff --git a/pkg/chains/storage/archivista/cert_test.go b/pkg/chains/storage/archivista/cert_test.go new file mode 100644 index 0000000000..744cbecf81 --- /dev/null +++ b/pkg/chains/storage/archivista/cert_test.go @@ -0,0 +1,191 @@ +package archivista + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "strings" + "testing" + "time" +) + +// createCertificate is a helper that generates a PEM-encoded certificate, its parsed form, and a private key. +// If parent and parentKey are nil, the certificate is self-signed. +func createCertificate(t *testing.T, subject, issuer string, serial int64, notBefore time.Time, parent *x509.Certificate, parentKey *rsa.PrivateKey) (string, *x509.Certificate, *rsa.PrivateKey) { + t.Helper() + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("failed to generate key: %v", err) + } + template := &x509.Certificate{ + SerialNumber: big.NewInt(serial), + Subject: pkix.Name{ + CommonName: subject, + }, + Issuer: pkix.Name{ + CommonName: issuer, + }, + NotBefore: notBefore, + NotAfter: notBefore.Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + } + // Self-sign if no parent provided. + if parent == nil { + parent = template + parentKey = key + } + certDER, err := x509.CreateCertificate(rand.Reader, template, parent, &key.PublicKey, parentKey) + if err != nil { + t.Fatalf("failed to create certificate: %v", err) + } + certPEM := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + })) + parsedCert, err := x509.ParseCertificate(certDER) + if err != nil { + t.Fatalf("failed to parse generated certificate: %v", err) + } + return certPEM, parsedCert, key +} + +func TestParseAndOrderCertificateChain(t *testing.T) { + // Test case 1: empty chain. + t.Run("empty chain", func(t *testing.T) { + _, _, err := parseAndOrderCertificateChain("") + if err == nil || !strings.Contains(err.Error(), "empty certificate chain") { + t.Errorf("expected error for empty chain, got: %v", err) + } + }) + + // Test case 2: no valid certificates found (wrong PEM type). + t.Run("no valid certificates", func(t *testing.T) { + invalid := "-----BEGIN NOT CERTIFICATE-----\nabc\n-----END NOT CERTIFICATE-----" + _, _, err := parseAndOrderCertificateChain(invalid) + if err == nil || !strings.Contains(err.Error(), "no valid certificates found") { + t.Errorf("expected error for no valid certificates, got: %v", err) + } + }) + + // Test case 3: single certificate (self-signed). + t.Run("single certificate", func(t *testing.T) { + notBefore := time.Now().Add(-1 * time.Hour) + certPEM, _, _ := createCertificate(t, "single", "single", 100, notBefore, nil, nil) + leaf, intermediates, err := parseAndOrderCertificateChain(certPEM) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Leaf should equal the original cert, with no intermediates. + if string(leaf) != certPEM { + t.Errorf("expected leaf to be the certificate, got different value") + } + if len(intermediates) != 0 { + t.Errorf("expected no intermediates, got %d", len(intermediates)) + } + }) + + // Test case 4: valid chain with leaf, intermediate, and root in random order. + t.Run("valid chain ordering", func(t *testing.T) { + now := time.Now() + // Create root certificate (self-signed). + rootPEM, rootCert, rootKey := createCertificate(t, "root", "root", 1, now.Add(-10*time.Hour), nil, nil) + // Create intermediate certificate signed by root. + intermediatePEM, intermediateCert, intermediateKey := createCertificate(t, "intermediate", "root", 2, now.Add(-5*time.Hour), rootCert, rootKey) + // Create leaf certificate signed by intermediate. + leafPEM, _, _ := createCertificate(t, "leaf", "intermediate", 3, now.Add(-1*time.Hour), intermediateCert, intermediateKey) + // Combine in random order: intermediate, leaf, root. + chain := intermediatePEM + leafPEM + rootPEM + leafOut, intermediates, err := parseAndOrderCertificateChain(chain) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Expected leaf is the leaf certificate. + if string(leafOut) != leafPEM { + t.Errorf("expected leaf to be leaf certificate") + } + // Expected intermediates: first should be the intermediate, second the root. + if len(intermediates) < 2 { + t.Errorf("expected at least 2 intermediates, got %d", len(intermediates)) + } else { + if string(intermediates[0]) != intermediatePEM { + t.Errorf("expected first intermediate to be intermediate certificate") + } + if string(intermediates[1]) != rootPEM { + t.Errorf("expected second intermediate to be root certificate") + } + } + }) + + // Test case 5: valid chain with an extra certificate not connected to the chain. + t.Run("chain with extra certificate", func(t *testing.T) { + now := time.Now() + // Create a proper chain: root, intermediate, leaf. + rootPEM, rootCert, rootKey := createCertificate(t, "root", "root", 1, now.Add(-10*time.Hour), nil, nil) + intermediatePEM, intermediateCert, intermediateKey := createCertificate(t, "intermediate", "root", 2, now.Add(-5*time.Hour), rootCert, rootKey) + leafPEM, _, _ := createCertificate(t, "leaf", "intermediate", 3, now.Add(-1*time.Hour), intermediateCert, intermediateKey) + // Create an extra self-signed certificate that doesn't chain. + extraPEM, _, _ := createCertificate(t, "extra", "extra", 4, now.Add(-2*time.Hour), nil, nil) + // Combine in random order: extra, root, leaf, intermediate. + chain := extraPEM + rootPEM + leafPEM + intermediatePEM + leafOut, intermediates, err := parseAndOrderCertificateChain(chain) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Expected leaf remains the leaf certificate. + if string(leafOut) != leafPEM { + t.Errorf("expected leaf to be leaf certificate") + } + // Intermediates should include the intermediate and root in order, with the extra appended. + foundIntermediate, foundRoot, foundExtra := false, false, false + for _, interm := range intermediates { + if string(interm) == intermediatePEM { + foundIntermediate = true + } + if string(interm) == rootPEM { + foundRoot = true + } + if string(interm) == extraPEM { + foundExtra = true + } + } + if !foundIntermediate || !foundRoot || !foundExtra { + t.Errorf("expected intermediates to contain intermediate, root, and extra certificates") + } + }) + + // Test case 6: chain with an invalid certificate block. + t.Run("invalid certificate block", func(t *testing.T) { + invalidCert := "-----BEGIN CERTIFICATE-----\ninvalid\n-----END CERTIFICATE-----" + chain := invalidCert + _, _, err := parseAndOrderCertificateChain(chain) + // Since no valid certificate is found, we expect the error to indicate that. + if err == nil || !strings.Contains(err.Error(), "no valid certificates found") { + t.Errorf("expected error for invalid certificate block, got: %v", err) + } + }) + + // Test case 7: multiple self-signed certificates; select the one with the most recent NotBefore. + t.Run("multiple self-signed certificates", func(t *testing.T) { + now := time.Now() + // Create two self-signed certificates with different NotBefore times. + certAPEM, _, _ := createCertificate(t, "A", "A", 10, now.Add(-2*time.Hour), nil, nil) + certBPEM, _, _ := createCertificate(t, "B", "B", 11, now.Add(-1*time.Hour), nil, nil) + // Combine in any order. + chain := certAPEM + certBPEM + leafOut, intermediates, err := parseAndOrderCertificateChain(chain) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Expect leaf to be certificate B since its NotBefore is later. + if string(leafOut) != certBPEM { + t.Errorf("expected leaf to be certificate B") + } + // The other certificate should appear as an intermediate. + if len(intermediates) != 1 || string(intermediates[0]) != certAPEM { + t.Errorf("expected certificate A to be intermediate") + } + }) +} diff --git a/pkg/chains/storage/archivista/patch_test.go b/pkg/chains/storage/archivista/patch_test.go new file mode 100644 index 0000000000..badaca4c9c --- /dev/null +++ b/pkg/chains/storage/archivista/patch_test.go @@ -0,0 +1,82 @@ +package archivista + +import ( + "context" + "testing" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + fakePipelineClient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/tektoncd/chains/pkg/chains/objects" +) + +// --- Supported Branch Tests --- + +// TestPatchTektonObjectAnnotations_TaskRunV1Beta1 exercises the v1beta1.TaskRun branch. +func TestPatchTektonObjectAnnotations_TaskRunV1Beta1(t *testing.T) { + ctx := context.Background() + // Seed the fake client with a v1beta1.TaskRun. + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "taskrun-v1beta1", + Namespace: "default", + }, + } + client := fakePipelineClient.NewSimpleClientset(tr) + // Wrap the TaskRun using the helper. + obj := objects.NewTaskRunObjectV1Beta1(tr) + annotations := map[string]string{"foo": "bar"} + if err := PatchTektonObjectAnnotations(ctx, obj, annotations, client); err != nil { + t.Fatalf("expected no error, got %v", err) + } + if len(client.Actions()) == 0 { + t.Fatalf("expected a patch action, got none") + } +} + +// TestPatchTektonObjectAnnotations_PipelineRunV1Beta1 exercises the v1beta1.PipelineRun branch. +func TestPatchTektonObjectAnnotations_PipelineRunV1Beta1(t *testing.T) { + ctx := context.Background() + // Seed the fake client with a v1beta1.PipelineRun. + pr := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pipelinerun-v1beta1", + Namespace: "default", + }, + } + client := fakePipelineClient.NewSimpleClientset(pr) + // Wrap the PipelineRun. + obj := objects.NewPipelineRunObjectV1Beta1(pr) + annotations := map[string]string{"foo": "bar"} + if err := PatchTektonObjectAnnotations(ctx, obj, annotations, client); err != nil { + t.Fatalf("expected no error, got %v", err) + } + if len(client.Actions()) == 0 { + t.Fatalf("expected a patch action, got none") + } +} + +// --- Unsupported Branch Test --- + +// unsupportedTaskRun is a type that embeds a v1beta1.TaskRun but is distinct. +type unsupportedTaskRun struct { + v1beta1.TaskRun +} + +// unsupportedTektonObject wraps a real TektonObject (from a v1beta1.TaskRun) but +// overrides GetObject() so that it returns an unsupported type. +type unsupportedTektonObject struct { + inner objects.TektonObject +} + +// GetObject returns an unsupported type by wrapping the inner object's underlying TaskRun. +func (u *unsupportedTektonObject) GetObject() interface{} { + // Retrieve the inner object (which should be a *v1beta1.TaskRun). + tr, ok := u.inner.GetObject().(*v1beta1.TaskRun) + if !ok { + return u.inner.GetObject() + } + // Wrap it in an unsupportedTaskRun so the type switch in PatchTektonObjectAnnotations doesn't match. + return &unsupportedTaskRun{*tr} +} diff --git a/pkg/chains/storage/storage.go b/pkg/chains/storage/storage.go index dbb07a37b2..fdb42e7ee5 100644 --- a/pkg/chains/storage/storage.go +++ b/pkg/chains/storage/storage.go @@ -18,6 +18,7 @@ import ( "errors" "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/chains/storage/archivista" "github.com/tektoncd/chains/pkg/chains/storage/docdb" "github.com/tektoncd/chains/pkg/chains/storage/gcs" "github.com/tektoncd/chains/pkg/chains/storage/grafeas" @@ -93,10 +94,14 @@ func InitializeBackends(ctx context.Context, ps versioned.Interface, kc kubernet return nil, err } backends[backendType] = pubsubBackend + case archivista.StorageBackendArchivista: + archivistaBackend, err := archivista.NewArchivistaStorage(cfg, ps) + if err != nil { + return nil, err + } + backends[backendType] = archivistaBackend } - } - logger.Infof("successfully initialized backends: %v", maps.Keys(backends)) return backends, nil } diff --git a/pkg/config/config.go b/pkg/config/config.go index 4e9cef101a..8a69ade629 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -27,6 +27,7 @@ import ( cm "knative.dev/pkg/configmap" ) +// Config is the overall configuration for Chains. type Config struct { Artifacts ArtifactConfigs Storage StorageConfigs @@ -36,14 +37,14 @@ type Config struct { BuildDefinition BuildDefinitionConfig } -// ArtifactConfigs contains the configuration for how to sign/store/format the signatures for each artifact type +// ArtifactConfigs contains the configuration for how to sign/store/format the signatures for each artifact type. type ArtifactConfigs struct { OCI Artifact PipelineRuns Artifact TaskRuns Artifact } -// Artifact contains the configuration for how to sign/store/format the signatures for a single artifact +// Artifact contains the configuration for how to sign/store/format the signatures for a single artifact. type Artifact struct { Format string StorageBackend sets.Set[string] @@ -51,17 +52,18 @@ type Artifact struct { DeepInspectionEnabled bool } -// StorageConfigs contains the configuration to instantiate different storage providers +// StorageConfigs contains the configuration to instantiate different storage providers. type StorageConfigs struct { - GCS GCSStorageConfig - OCI OCIStorageConfig - Tekton TektonStorageConfig - DocDB DocDBStorageConfig - Grafeas GrafeasConfig - PubSub PubSubStorageConfig + GCS GCSStorageConfig + OCI OCIStorageConfig + Tekton TektonStorageConfig + DocDB DocDBStorageConfig + Grafeas GrafeasConfig + PubSub PubSubStorageConfig + Archivista ArchivistaStorageConfig } -// SignerConfigs contains the configuration to instantiate different signers +// SignerConfigs contains the configuration to instantiate different signers. type SignerConfigs struct { X509 X509Signer KMS KMSSigner @@ -89,7 +91,7 @@ type KMSSigner struct { Auth KMSAuth } -// KMSAuth configures authentication to the KMS server +// KMSAuth configures authentication to the KMS server. type KMSAuth struct { Address string Token string @@ -98,13 +100,13 @@ type KMSAuth struct { Spire KMSAuthSpire } -// KMSAuthOIDC configures settings to authenticate with OIDC +// KMSAuthOIDC configures settings to authenticate with OIDC. type KMSAuthOIDC struct { Path string Role string } -// KMSAuthSpire configures settings to get an auth token from spire +// KMSAuthSpire configures settings to get an auth token from spire. type KMSAuthSpire struct { Sock string Audience string @@ -120,6 +122,7 @@ type OCIStorageConfig struct { } type TektonStorageConfig struct { + // Currently no fields. } type DocDBStorageConfig struct { @@ -130,12 +133,12 @@ type DocDBStorageConfig struct { } type GrafeasConfig struct { - // project id that is used to store notes and occurences + // Project id that is used to store notes and occurrences. ProjectID string - // note id used to create a note that an occurrence will be attached to + // Note id used to create a note that an occurrence will be attached to. NoteID string - // NoteHint is used to set the attestation note + // NoteHint is used to set the attestation note. NoteHint string } @@ -155,6 +158,16 @@ type TransparencyConfig struct { URL string } +// ----------------------- New Archivista configuration ----------------------- + +// ArchivistaStorageConfig holds configuration for the Archivista storage backend. +type ArchivistaStorageConfig struct { + // URL is the endpoint for the Archivista service. + URL string `json:"url,omitempty"` +} + +// ----------------------- Constants ----------------------- + const ( taskrunFormatKey = "artifacts.taskrun.format" taskrunStorageKey = "artifacts.taskrun.storage" @@ -177,6 +190,9 @@ const ( docDBMongoServerURLDirKey = "storage.docdb.mongo-server-url-dir" docDBMongoServerURLPathKey = "storage.docdb.mongo-server-url-path" + // New Archivista constant: + archivistaURLKey = "storage.archivista.url" + grafeasProjectIDKey = "storage.grafeas.projectid" grafeasNoteIDKey = "storage.grafeas.noteid" grafeasNoteHint = "storage.grafeas.notehint" @@ -185,8 +201,6 @@ const ( pubsubProvider = "storage.pubsub.provider" pubsubTopic = "storage.pubsub.topic" - // No config for PubSub - In-Memory - // PubSub - Kafka pubsubKafkaBootstrapServer = "storage.pubsub.kafka.bootstrap.servers" @@ -194,8 +208,8 @@ const ( kmsSignerKMSRef = "signers.kms.kmsref" kmsAuthAddress = "signers.kms.auth.address" kmsAuthToken = "signers.kms.auth.token" - kmsAuthOIDCPath = "signers.kms.auth.oidc.path" kmsAuthTokenPath = "signers.kms.auth.token-path" // #nosec G101 + kmsAuthOIDCPath = "signers.kms.auth.oidc.path" kmsAuthOIDCRole = "signers.kms.auth.oidc.role" kmsAuthSpireSock = "signers.kms.auth.spire.sock" kmsAuthSpireAudience = "signers.kms.auth.spire.audience" @@ -220,6 +234,8 @@ const ( ChainsConfig = "chains-config" ) +// ----------------------- Helper functions for parsing ----------------------- + func (artifact *Artifact) Enabled() bool { return !(artifact.StorageBackend.Len() == 1 && artifact.StorageBackend.Has("")) } @@ -268,7 +284,7 @@ func defaultConfig() *Config { } } -// NewConfigFromMap creates a Config from the supplied map +// NewConfigFromMap creates a Config from the supplied map. func NewConfigFromMap(data map[string]string) (*Config, error) { cfg := defaultConfig() @@ -276,18 +292,18 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { // Artifact-specific configs // TaskRuns asString(taskrunFormatKey, &cfg.Artifacts.TaskRuns.Format, "in-toto", "slsa/v1", "slsa/v2alpha3", "slsa/v2alpha4"), - asStringSet(taskrunStorageKey, &cfg.Artifacts.TaskRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka")), + asStringSet(taskrunStorageKey, &cfg.Artifacts.TaskRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka", "archivista")), asString(taskrunSignerKey, &cfg.Artifacts.TaskRuns.Signer, "x509", "kms"), // PipelineRuns asString(pipelinerunFormatKey, &cfg.Artifacts.PipelineRuns.Format, "in-toto", "slsa/v1", "slsa/v2alpha3", "slsa/v2alpha4"), - asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas")), + asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "archivista")), asString(pipelinerunSignerKey, &cfg.Artifacts.PipelineRuns.Signer, "x509", "kms"), asBool(pipelinerunEnableDeepInspectionKey, &cfg.Artifacts.PipelineRuns.DeepInspectionEnabled), // OCI asString(ociFormatKey, &cfg.Artifacts.OCI.Format, "simplesigning"), - asStringSet(ociStorageKey, &cfg.Artifacts.OCI.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka")), + asStringSet(ociStorageKey, &cfg.Artifacts.OCI.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas", "kafka", "archivista")), asString(ociSignerKey, &cfg.Artifacts.OCI.Signer, "x509", "kms"), // PubSub - General @@ -305,6 +321,9 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { asString(docDBMongoServerURLKey, &cfg.Storage.DocDB.MongoServerURL), asString(docDBMongoServerURLDirKey, &cfg.Storage.DocDB.MongoServerURLDir), asString(docDBMongoServerURLPathKey, &cfg.Storage.DocDB.MongoServerURLPath), + + asString(archivistaURLKey, &cfg.Storage.Archivista.URL), + asString(grafeasProjectIDKey, &cfg.Storage.Grafeas.ProjectID), asString(grafeasNoteIDKey, &cfg.Storage.Grafeas.NoteID), asString(grafeasNoteHint, &cfg.Storage.Grafeas.NoteHint), @@ -342,12 +361,12 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { return cfg, nil } -// NewConfigFromConfigMap creates a Config from the supplied ConfigMap +// NewConfigFromConfigMap creates a Config from the supplied ConfigMap. func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*Config, error) { return NewConfigFromMap(configMap.Data) } -// oneOf sets target to true if it maches any of the values +// oneOf sets target to true if it matches any of the values. func oneOf(key string, target *bool, values ...string) cm.ParseFunc { return func(data map[string]string) error { raw, ok := data[key] @@ -366,8 +385,7 @@ func oneOf(key string, target *bool, values ...string) cm.ParseFunc { } } -// allow additional supported values for a "true" decision -// in additional to the usual ones provided by strconv.ParseBool +// asBool passes the value at key through into the target. func asBool(key string, target *bool) cm.ParseFunc { return func(data map[string]string) error { raw, ok := data[key] @@ -383,8 +401,7 @@ func asBool(key string, target *bool) cm.ParseFunc { } } -// asString passes the value at key through into the target, if it exists. -// TODO(mattmoor): This might be a nice variation on cm.AsString to upstream. +// asString passes the value at key into the target, if it exists. func asString(key string, target *string, values ...string) cm.ParseFunc { return func(data map[string]string) error { raw, ok := data[key] @@ -402,7 +419,7 @@ func asString(key string, target *string, values ...string) cm.ParseFunc { } } -// asStringSet parses the value at key as a sets.Set[string] (split by ',') into the target, if it exists. +// asStringSet parses the value at key as a set (split by ',') into the target, if it exists. func asStringSet(key string, target *sets.Set[string], allowed sets.Set[string]) cm.ParseFunc { return func(data map[string]string) error { if raw, ok := data[key]; ok { diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion index 26bc914a3b..56b46a3ad2 100644 --- a/vendor/cel.dev/expr/.bazelversion +++ b/vendor/cel.dev/expr/.bazelversion @@ -1,2 +1,6 @@ +<<<<<<< HEAD 7.0.1 +======= +6.4.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Keep this pinned version in parity with cel-go diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore index 0d4fed27c9..0cae1c6dd0 100644 --- a/vendor/cel.dev/expr/.gitignore +++ b/vendor/cel.dev/expr/.gitignore @@ -1,2 +1,5 @@ bazel-* +<<<<<<< HEAD MODULE.bazel.lock +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel index 37d8adc950..701c29c88c 100644 --- a/vendor/cel.dev/expr/BUILD.bazel +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -16,7 +16,11 @@ go_library( importpath = "cel.dev/expr", visibility = ["//visibility:public"], deps = [ +<<<<<<< HEAD "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", +======= + "//proto/cel/expr:google_rpc_status_go_proto", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//runtime/protoimpl", "@org_golang_google_protobuf//types/known/anypb", diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md index 7930c0b755..542619e17c 100644 --- a/vendor/cel.dev/expr/README.md +++ b/vendor/cel.dev/expr/README.md @@ -33,7 +33,12 @@ The required components of a system that supports CEL are: * The textual representation of an expression as written by a developer. It is of similar syntax to expressions in C/C++/Java/JavaScript +<<<<<<< HEAD * A representation of the program's abstract syntax tree (AST). +======= +* A binary representation of an expression. It is an abstract syntax tree + (AST). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) * A compiler library that converts the textual representation to the binary representation. This can be done ahead of time (in the control plane) or just before evaluation (in the data plane). @@ -42,6 +47,7 @@ The required components of a system that supports CEL are: * An evaluator library that takes the binary format in the context and produces a result, usually a Boolean. +<<<<<<< HEAD For use cases which require persistence or cross-process communcation, it is highly recommended to serialize the type-checked expression as a protocol buffer. The CEL team will maintains canonical protocol buffers for ASTs and @@ -51,6 +57,8 @@ will keep these versions identical and wire-compatible in perpetuity: * [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Example of boolean conditions and object construction: ``` c diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE index b6dc9ed673..441f6e8a0d 100644 --- a/vendor/cel.dev/expr/WORKSPACE +++ b/vendor/cel.dev/expr/WORKSPACE @@ -27,6 +27,7 @@ http_archive( ], ) +<<<<<<< HEAD # googleapis as of 09/16/2024 http_archive( name = "com_google_googleapis", @@ -34,6 +35,15 @@ http_archive( sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8", urls = [ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz", +======= +# googleapis as of 05/26/2023 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-07c27163ac591955d736f3057b1619ece66f5b99", + sha256 = "bd8e735d881fb829751ecb1a77038dda4a8d274c45490cb9fcf004583ee10571", + urls = [ + "https://github.com/googleapis/googleapis/archive/07c27163ac591955d736f3057b1619ece66f5b99.tar.gz", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ], ) @@ -95,22 +105,39 @@ switched_rules_by_language( # Do *not* call *_dependencies(), etc, yet. See comment at the end. # Generated Google APIs protos for Golang +<<<<<<< HEAD # Generated Google APIs protos for Golang 08/26/2024 +======= +# Generated Google APIs protos for Golang 05/25/2023 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go_repository( name = "org_golang_google_genproto_googleapis_api", build_file_proto_mode = "disable_global", importpath = "google.golang.org/genproto/googleapis/api", +<<<<<<< HEAD sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=", version = "v0.0.0-20240826202546-f6391c0de4c7", ) # Generated Google APIs protos for Golang 08/26/2024 +======= + sum = "h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=", + version = "v0.0.0-20230525234035-dd9d682886f9", +) + +# Generated Google APIs protos for Golang 05/25/2023 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go_repository( name = "org_golang_google_genproto_googleapis_rpc", build_file_proto_mode = "disable_global", importpath = "google.golang.org/genproto/googleapis/rpc", +<<<<<<< HEAD sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=", version = "v0.0.0-20240826202546-f6391c0de4c7", +======= + sum = "h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=", + version = "v0.0.0-20230525234030-28d5490b6b19", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) # gRPC deps diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml index c40881f122..6224a7eeca 100644 --- a/vendor/cel.dev/expr/cloudbuild.yaml +++ b/vendor/cel.dev/expr/cloudbuild.yaml @@ -1,8 +1,15 @@ steps: +<<<<<<< HEAD - name: 'gcr.io/cloud-builders/bazel:7.0.1' entrypoint: bazel args: ['build', '...'] id: bazel-build +======= +- name: 'gcr.io/cloud-builders/bazel:6.4.0' + entrypoint: bazel + args: ['test', '--test_output=errors', '...'] + id: bazel-test +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) waitFor: ['-'] timeout: 15m options: diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh index fdcbb3ce25..3d0fe8217e 100644 --- a/vendor/cel.dev/expr/regen_go_proto.sh +++ b/vendor/cel.dev/expr/regen_go_proto.sh @@ -1,9 +1,17 @@ #!/bin/sh +<<<<<<< HEAD bazel build //proto/cel/expr/conformance/... files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) for src in ${files[@]}; do dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/') +======= +bazel build //proto/test/... +files=($(bazel aquery 'kind(proto, //proto/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\%\/github.com\/google\/cel-spec\/\(.*\)\)/\2/') +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) echo "copying $dst" $(cp $src $dst) done diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 466426c0d8..b01854d82f 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,6 @@ # Changelog +<<<<<<< HEAD ## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08) @@ -46,6 +47,8 @@ * **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index cd5e988684..0bf3eeb11e 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -24,7 +24,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strings" @@ -33,7 +36,10 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -492,11 +498,14 @@ type Options2LO struct { // UseIDToken requests that the token returned be an ID token if one is // returned from the server. Optional. UseIDToken bool +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options2LO) client() *http.Client { @@ -527,13 +536,20 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { if err := opts.validate(); err != nil { return nil, err } +<<<<<<< HEAD return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil +======= + return tokenProvider2LO{opts: opts, Client: opts.client()}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type tokenProvider2LO struct { opts *Options2LO Client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { @@ -568,12 +584,18 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") +<<<<<<< HEAD tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } +<<<<<<< HEAD tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 8afd0472ea..70c496133a 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,11 +37,16 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. +<<<<<<< HEAD func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { return auth.NewCachedTokenProvider(&computeProvider{ scopes: opts.Scopes, client: client, }, &auth.CachedTokenProviderOptions{ +======= +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExpireEarly: opts.EarlyTokenRefresh, DisableAsyncRefresh: opts.DisableAsyncRefresh, }) @@ -50,7 +55,10 @@ func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.Tok // computeProvider fetches tokens from the google cloud metadata service. type computeProvider struct { scopes []string +<<<<<<< HEAD client *metadata.Client +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type metadataTokenResp struct { @@ -59,7 +67,11 @@ type metadataTokenResp struct { TokenType string `json:"token_type"` } +<<<<<<< HEAD func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { +======= +func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tokenURI, err := url.Parse(computeTokenURI) if err != nil { return nil, err @@ -69,7 +81,11 @@ func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("scopes", strings.Join(cs.scopes, ",")) tokenURI.RawQuery = v.Encode() } +<<<<<<< HEAD tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) +======= + tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index a1b5a93188..4865d7ddeb 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -19,7 +19,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" "time" @@ -28,7 +31,10 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/compute/metadata" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -98,6 +104,7 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { } if OnGCE() { +<<<<<<< HEAD metadataClient := metadata.NewWithOptions(&metadata.Options{ Logger: opts.logger(), }) @@ -109,6 +116,14 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ MetadataClient: metadataClient, }, +======= + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: computeTokenProvider(opts), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }), nil } @@ -165,11 +180,14 @@ type DetectOptions struct { // The default value is "googleapis.com". This option is ignored for // authentication flows that do not support universe domain. Optional. UniverseDomain string +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *DetectOptions) validate() error { @@ -205,10 +223,13 @@ func (o *DetectOptions) client() *http.Client { return internal.DefaultClient() } +<<<<<<< HEAD func (o *DetectOptions) logger() *slog.Logger { return internallog.New(o.Logger) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { b, err := os.ReadFile(filename) if err != nil { @@ -269,7 +290,10 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { AuthURL: c.AuthURI, TokenURL: c.TokenURI, Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EarlyTokenExpiry: opts.EarlyTokenRefresh, AuthHandlerOpts: handleOpts, // TODO(codyoss): refactor this out. We need to add in auto-detection diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index e5243e6cfb..91ccfbc6c9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -141,7 +141,10 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) TokenURL: f.TokenURL, Subject: opts.Subject, Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -160,7 +163,10 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return auth.New3LOTokenProvider(opts3LO) } @@ -179,7 +185,10 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { @@ -198,7 +207,10 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU ClientSecret: f.ClientSecret, Scopes: opts.scopes(), Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return externalaccountuser.NewTokenProvider(externalOpts) } @@ -218,7 +230,10 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil Tp: tp, Delegates: f.Delegates, Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } @@ -226,6 +241,9 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO return gdch.NewTokenProvider(f, &gdch.Options{ STSAudience: opts.STSAudience, Client: opts.client(), +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go index 2fbbdb8072..7574d53f90 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go @@ -18,7 +18,10 @@ import ( "context" "encoding/json" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "strconv" "strings" @@ -26,7 +29,10 @@ import ( "time" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) type cachingClient struct { @@ -36,6 +42,7 @@ type cachingClient struct { // If nil, time.Now is used. clock func() time.Time +<<<<<<< HEAD mu sync.Mutex certs map[string]*cachedResponse logger *slog.Logger @@ -46,6 +53,16 @@ func newCachingClient(client *http.Client, logger *slog.Logger) *cachingClient { client: client, certs: make(map[string]*cachedResponse, 2), logger: logger, +======= + mu sync.Mutex + certs map[string]*cachedResponse +} + +func newCachingClient(client *http.Client) *cachingClient { + return &cachingClient{ + client: client, + certs: make(map[string]*cachedResponse, 2), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -62,12 +79,18 @@ func (c *cachingClient) getCert(ctx context.Context, url string) (*certResponse, if err != nil { return nil, err } +<<<<<<< HEAD c.logger.DebugContext(ctx, "cert request", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(c.client, req) if err != nil { return nil, err } +<<<<<<< HEAD c.logger.DebugContext(ctx, "cert response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("idtoken: unable to retrieve cert, got status code %d", resp.StatusCode) } diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go index 86f6cd77ac..a13eeb04f7 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go @@ -23,7 +23,10 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" "cloud.google.com/go/compute/metadata" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const identitySuffix = "instance/service-accounts/default/identity" @@ -35,6 +38,7 @@ func computeCredentials(opts *Options) (*auth.Credentials, error) { if opts.CustomClaims != nil { return nil, fmt.Errorf("idtoken: Options.CustomClaims can't be used with the metadata service, please provide a service account if you would like to use this feature") } +<<<<<<< HEAD metadataClient := metadata.NewWithOptions(&metadata.Options{ Logger: internallog.New(opts.Logger), }) @@ -42,27 +46,46 @@ func computeCredentials(opts *Options) (*auth.Credentials, error) { audience: opts.Audience, format: opts.ComputeTokenFormat, client: metadataClient, +======= + tp := computeIDTokenProvider{ + audience: opts.Audience, + format: opts.ComputeTokenFormat, + client: *metadata.NewClient(opts.client()), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: 5 * time.Minute, }), ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { +<<<<<<< HEAD return metadataClient.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ MetadataClient: metadataClient, }, +======= + return metadata.ProjectIDWithContext(ctx) + }), + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }), nil } type computeIDTokenProvider struct { audience string format ComputeTokenFormat +<<<<<<< HEAD client *metadata.Client } func (c *computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { +======= + client metadata.Client +} + +func (c computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v := url.Values{} v.Set("audience", c.audience) if c.format != ComputeTokenFormatStandard { diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go index 87fab751fb..06a7b77fb4 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go @@ -22,10 +22,15 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/impersonate" +<<<<<<< HEAD intimpersonate "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "github.com/googleapis/gax-go/v2/internallog" +======= + "cloud.google.com/go/auth/internal" + "cloud.google.com/go/auth/internal/credsfile" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -45,6 +50,7 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials if err != nil { return nil, err } +<<<<<<< HEAD var tp auth.TokenProvider if resolveUniverseDomain(f) == internal.DefaultUniverseDomain { tp, err = new2LOTokenProvider(f, opts) @@ -63,13 +69,44 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials Audience: opts.Audience, }, } +======= + opts2LO := &auth.Options2LO{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + TokenURL: f.TokenURL, + UseIDToken: true, + } + if opts2LO.TokenURL == "" { + opts2LO.TokenURL = jwtTokenURL + } + + var customClaims map[string]interface{} + if opts != nil { + customClaims = opts.CustomClaims + } + if customClaims == nil { + customClaims = make(map[string]interface{}) + } + customClaims["target_audience"] = opts.Audience + + opts2LO.PrivateClaims = customClaims + tp, err := auth.New2LOTokenProvider(opts2LO) + if err != nil { + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tp = auth.NewCachedTokenProvider(tp, nil) return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: tp, JSON: b, +<<<<<<< HEAD ProjectIDProvider: auth.CredentialsPropertyFunc(creds.ProjectID), UniverseDomainProvider: auth.CredentialsPropertyFunc(creds.UniverseDomain), +======= + ProjectIDProvider: internal.StaticCredentialsProperty(f.ProjectID), + UniverseDomainProvider: internal.StaticCredentialsProperty(f.UniverseDomain), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }), nil case credsfile.ImpersonatedServiceAccountKey, credsfile.ExternalAccountKey: type url struct { @@ -81,13 +118,20 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials } account := filepath.Base(accountURL.ServiceAccountImpersonationURL) account = strings.Split(account, ":")[0] +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) config := impersonate.IDTokenOptions{ Audience: opts.Audience, TargetPrincipal: account, IncludeEmail: true, Client: opts.client(), Credentials: creds, +<<<<<<< HEAD Logger: internallog.New(opts.Logger), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } idTokenCreds, err := impersonate.NewIDTokenCredentials(&config) if err != nil { @@ -104,6 +148,7 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials return nil, fmt.Errorf("idtoken: unsupported credentials type: %v", t) } } +<<<<<<< HEAD func new2LOTokenProvider(f *credsfile.ServiceAccountFile, opts *Options) (auth.TokenProvider, error) { opts2LO := &auth.Options2LO{ @@ -140,3 +185,5 @@ func resolveUniverseDomain(f *credsfile.ServiceAccountFile) string { } return internal.DefaultUniverseDomain } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go index 86db9525df..0e03107bf1 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go @@ -22,7 +22,10 @@ package idtoken import ( "errors" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" @@ -86,6 +89,7 @@ type Options struct { // when fetching tokens. If provided this should be a fully-authenticated // client. Optional. Client *http.Client +<<<<<<< HEAD // UniverseDomain is the default service domain for a given Cloud universe. // The default value is "googleapis.com". This is the universe domain // configured for the client, which will be compared to the universe domain @@ -96,6 +100,8 @@ type Options struct { // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options) client() *http.Client { diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go index c8175a6a8e..0d3da7b89e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go @@ -24,7 +24,10 @@ import ( "encoding/base64" "encoding/json" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math/big" "net/http" "strings" @@ -32,6 +35,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" ) @@ -45,6 +49,18 @@ const ( var ( defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient(), internallog.New(nil))} +======= +) + +const ( + es256KeySize int = 32 + googleIAPCertsURL string = "https://www.gstatic.com/iap/verify/public_key-jwk" + googleSACertsURL string = "https://www.googleapis.com/oauth2/v3/certs" +) + +var ( + defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient())} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // now aliases time.Now for testing. now = time.Now ) @@ -71,15 +87,20 @@ type jwk struct { // Validator provides a way to validate Google ID Tokens type Validator struct { +<<<<<<< HEAD client *cachingClient rs256URL string es256URL string +======= + client *cachingClient +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ValidatorOptions provides a way to configure a [Validator]. type ValidatorOptions struct { // Client used to make requests to the certs URL. Optional. Client *http.Client +<<<<<<< HEAD // Custom certs URL for RS256 JWK to be used. If not provided, the default // Google oauth2 endpoint will be used. Optional. RS256CertsURL string @@ -91,11 +112,14 @@ type ValidatorOptions struct { // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // Logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewValidator creates a Validator that uses the options provided to configure // a the internal http.Client that will be used to make requests to fetch JWKs. func NewValidator(opts *ValidatorOptions) (*Validator, error) { +<<<<<<< HEAD if opts == nil { opts = &ValidatorOptions{} } @@ -107,6 +131,15 @@ func NewValidator(opts *ValidatorOptions) (*Validator, error) { es256URL := opts.ES256CertsURL logger := internallog.New(opts.Logger) return &Validator{client: newCachingClient(client, logger), rs256URL: rs256URL, es256URL: es256URL}, nil +======= + var client *http.Client + if opts != nil && opts.Client != nil { + client = opts.Client + } else { + client = internal.DefaultClient() + } + return &Validator{client: newCachingClient(client)}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Validate is used to validate the provided idToken with a known Google cert @@ -161,7 +194,11 @@ func (v *Validator) validate(ctx context.Context, idToken string, audience strin if err := v.validateRS256(ctx, header.KeyID, hashedContent, sig); err != nil { return nil, err } +<<<<<<< HEAD case jwt.HeaderAlgES256: +======= + case "ES256": +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := v.validateES256(ctx, header.KeyID, hashedContent, sig); err != nil { return nil, err } @@ -173,7 +210,11 @@ func (v *Validator) validate(ctx context.Context, idToken string, audience strin } func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { +<<<<<<< HEAD certResp, err := v.client.getCert(ctx, v.rs256CertsURL()) +======= + certResp, err := v.client.getCert(ctx, googleSACertsURL) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -197,6 +238,7 @@ func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedConte return rsa.VerifyPKCS1v15(pk, crypto.SHA256, hashedContent, sig) } +<<<<<<< HEAD func (v *Validator) rs256CertsURL() string { if v.rs256URL == "" { return googleSACertsURL @@ -206,6 +248,10 @@ func (v *Validator) rs256CertsURL() string { func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { certResp, err := v.client.getCert(ctx, v.es256CertsURL()) +======= +func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { + certResp, err := v.client.getCert(ctx, googleIAPCertsURL) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -235,6 +281,7 @@ func (v *Validator) validateES256(ctx context.Context, keyID string, hashedConte return nil } +<<<<<<< HEAD func (v *Validator) es256CertsURL() string { if v.es256URL == "" { return googleIAPCertsURL @@ -242,6 +289,8 @@ func (v *Validator) es256CertsURL() string { return v.es256URL } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func findMatchingKey(response *certResponse, keyID string) (*jwk, error) { if response == nil { return nil, fmt.Errorf("idtoken: cert response is nil") diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go index 6c470822bd..5f873adeaf 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go @@ -15,6 +15,7 @@ package impersonate import ( +<<<<<<< HEAD "errors" "log/slog" "net/http" @@ -25,6 +26,20 @@ import ( "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/internal" "github.com/googleapis/gax-go/v2/internallog" +======= + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" + "cloud.google.com/go/auth/internal" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // IDTokenOptions for generating an impersonated ID token. @@ -53,6 +68,7 @@ type IDTokenOptions struct { // when fetching tokens. If provided this should be a fully-authenticated // client. Optional. Client *http.Client +<<<<<<< HEAD // UniverseDomain is the default service domain for a given Cloud universe. // The default value is "googleapis.com". This is the universe domain // configured for the client, which will be compared to the universe domain @@ -63,6 +79,8 @@ type IDTokenOptions struct { // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *IDTokenOptions) validate() error { @@ -91,6 +109,7 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { if err := opts.validate(); err != nil { return nil, err } +<<<<<<< HEAD client := opts.Client creds := opts.Credentials logger := internallog.New(opts.Logger) @@ -101,21 +120,38 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { Scopes: []string{defaultScope}, UseSelfSignedJWT: true, Logger: logger, +======= + + client := opts.Client + creds := opts.Credentials + if client == nil { + var err error + if creds == nil { + // TODO: test not signed jwt more + creds, err = credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{defaultScope}, + UseSelfSignedJWT: true, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err } } client, err = httptransport.NewClient(&httptransport.Options{ +<<<<<<< HEAD Credentials: creds, UniverseDomain: opts.UniverseDomain, Logger: logger, +======= + Credentials: creds, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err } } +<<<<<<< HEAD universeDomainProvider := resolveUniverseDomainProvider(creds) var delegates []string for _, v := range opts.Delegates { @@ -139,3 +175,79 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { UniverseDomainProvider: universeDomainProvider, }), nil } +======= + itp := impersonatedIDTokenProvider{ + client: client, + targetPrincipal: opts.TargetPrincipal, + audience: opts.Audience, + includeEmail: opts.IncludeEmail, + } + for _, v := range opts.Delegates { + itp.delegates = append(itp.delegates, formatIAMServiceAccountName(v)) + } + + var udp auth.CredentialsPropertyProvider + if creds != nil { + udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) + } + return auth.NewCredentials(&auth.CredentialsOptions{ + TokenProvider: auth.NewCachedTokenProvider(itp, nil), + UniverseDomainProvider: udp, + }), nil +} + +type generateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + Delegates []string `json:"delegates,omitempty"` +} + +type generateIDTokenResponse struct { + Token string `json:"token"` +} + +type impersonatedIDTokenProvider struct { + client *http.Client + + targetPrincipal string + audience string + includeEmail bool + delegates []string +} + +func (i impersonatedIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { + genIDTokenReq := generateIDTokenRequest{ + Audience: i.audience, + IncludeEmail: i.includeEmail, + Delegates: i.delegates, + } + bodyBytes, err := json.Marshal(genIDTokenReq) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + + url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + resp, body, err := internal.DoRequest(i.client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var generateIDTokenResp generateIDTokenResponse + if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return &auth.Token{ + Value: generateIDTokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go index 7d8efd54ef..16d66f8d44 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go @@ -20,7 +20,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "strings" "time" @@ -29,11 +32,18 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( universeDomainPlaceholder = "UNIVERSE_DOMAIN" +<<<<<<< HEAD +======= + iamCredentialsEndpoint = "https://iamcredentials.googleapis.com" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN" oauth2Endpoint = "https://oauth2.googleapis.com" errMissingTargetPrincipal = errors.New("impersonate: target service account must be provided") @@ -66,21 +76,31 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { client := opts.Client creds := opts.Credentials +<<<<<<< HEAD logger := internallog.New(opts.Logger) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if client == nil { var err error if creds == nil { creds, err = credentials.DetectDefault(&credentials.DetectOptions{ Scopes: []string{defaultScope}, UseSelfSignedJWT: true, +<<<<<<< HEAD Logger: logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err } } +<<<<<<< HEAD client, err = httptransport.NewClient(transportOpts(opts, creds, logger)) +======= + client, err = httptransport.NewClient(transportOpts(opts, creds)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -105,10 +125,16 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { targetPrincipal: opts.TargetPrincipal, lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), universeDomainProvider: universeDomainProvider, +<<<<<<< HEAD logger: logger, } for _, v := range opts.Delegates { its.delegates = append(its.delegates, internal.FormatIAMServiceAccountResource(v)) +======= + } + for _, v := range opts.Delegates { + its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } its.scopes = make([]string, len(opts.Scopes)) copy(its.scopes, opts.Scopes) @@ -130,10 +156,16 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { // is provided, it will be used in the transport for a validation ensuring that it // matches the universe domain in the base credentials. If opts.UniverseDomain // is not provided, this validation will be skipped. +<<<<<<< HEAD func transportOpts(opts *CredentialsOptions, creds *auth.Credentials, logger *slog.Logger) *httptransport.Options { tOpts := &httptransport.Options{ Credentials: creds, Logger: logger, +======= +func transportOpts(opts *CredentialsOptions, creds *auth.Credentials) *httptransport.Options { + tOpts := &httptransport.Options{ + Credentials: creds, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts.UniverseDomain == "" { tOpts.InternalOptions = &httptransport.InternalOptions{ @@ -191,11 +223,14 @@ type CredentialsOptions struct { // This field has no default value, and only if provided will it be used to // verify the universe domain from the credentials. Optional. UniverseDomain string +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *CredentialsOptions) validate() error { @@ -214,6 +249,13 @@ func (o *CredentialsOptions) validate() error { return nil } +<<<<<<< HEAD +======= +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type generateAccessTokenRequest struct { Delegates []string `json:"delegates,omitempty"` Lifetime string `json:"lifetime,omitempty"` @@ -226,10 +268,15 @@ type generateAccessTokenResponse struct { } type impersonatedTokenProvider struct { +<<<<<<< HEAD client *http.Client // universeDomain is used for endpoint construction. universeDomainProvider auth.CredentialsPropertyProvider logger *slog.Logger +======= + client *http.Client + universeDomainProvider auth.CredentialsPropertyProvider +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) targetPrincipal string lifetime string @@ -253,18 +300,28 @@ func (i impersonatedTokenProvider) Token(ctx context.Context) (*auth.Token, erro return nil, err } endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1) +<<<<<<< HEAD url := fmt.Sprintf("%s/v1/%s:generateAccessToken", endpoint, internal.FormatIAMServiceAccountResource(i.targetPrincipal)) +======= + url := fmt.Sprintf("%s/v1/%s:generateAccessToken", endpoint, formatIAMServiceAccountName(i.targetPrincipal)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(b)) if err != nil { return nil, fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") +<<<<<<< HEAD i.logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(i.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to generate access token: %w", err) } +<<<<<<< HEAD i.logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go index e5e1d65028..e611011dfc 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go @@ -20,7 +20,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strings" @@ -28,11 +31,14 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" ) var ( iamCredentialsEndpoint = "https://iamcredentials.googleapis.com" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // user provides an auth flow for domain-wide delegation, setting @@ -47,11 +53,18 @@ func user(opts *CredentialsOptions, client *http.Client, lifetime time.Duration, subject: opts.Subject, lifetime: lifetime, universeDomainProvider: universeDomainProvider, +<<<<<<< HEAD logger: internallog.New(opts.Logger), } u.delegates = make([]string, len(opts.Delegates)) for i, v := range opts.Delegates { u.delegates[i] = internal.FormatIAMServiceAccountResource(v) +======= + } + u.delegates = make([]string, len(opts.Delegates)) + for i, v := range opts.Delegates { + u.delegates[i] = formatIAMServiceAccountName(v) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } u.scopes = make([]string, len(opts.Scopes)) copy(u.scopes, opts.Scopes) @@ -95,7 +108,10 @@ type exchangeTokenResponse struct { type userTokenProvider struct { client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) targetPrincipal string subject string @@ -147,18 +163,28 @@ func (u userTokenProvider) signJWT(ctx context.Context) (string, error) { if err != nil { return "", fmt.Errorf("impersonate: unable to marshal request: %w", err) } +<<<<<<< HEAD reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, internal.FormatIAMServiceAccountResource(u.targetPrincipal)) +======= + reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := http.NewRequestWithContext(ctx, "POST", reqURL, bytes.NewReader(bodyBytes)) if err != nil { return "", fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") +<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user sign JWT request", "request", internallog.HTTPRequest(req, bodyBytes)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(u.client, req) if err != nil { return "", fmt.Errorf("impersonate: unable to sign JWT: %w", err) } +<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user sign JWT response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < 200 || c > 299 { return "", fmt.Errorf("impersonate: status code %d: %s", c, body) } @@ -179,12 +205,18 @@ func (u userTokenProvider) exchangeToken(ctx context.Context, signedJWT string) if err != nil { return nil, err } +<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user token exchange request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(u.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to exchange token: %w", err) } +<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user token exchange response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index 9ecd1f64bd..a013095f4c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -23,7 +23,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "os" @@ -33,7 +36,10 @@ import ( "time" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( @@ -89,7 +95,10 @@ type awsSubjectProvider struct { reqOpts *RequestOptions Client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -195,12 +204,18 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } @@ -230,12 +245,18 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } @@ -290,12 +311,18 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) } @@ -317,12 +344,18 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } +<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index a822064234..d31747e343 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -18,7 +18,10 @@ import ( "context" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "regexp" "strconv" @@ -29,7 +32,10 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal/credsfile" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -106,11 +112,14 @@ type Options struct { // This is important for X509 credentials which should create a new client if the default was used // but should respect a client explicitly passed in by the user. IsDefaultClient bool +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -231,7 +240,10 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { return nil, err } opts.resolveTokenURL() +<<<<<<< HEAD logger := internallog.New(opts.Logger) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stp, err := newSubjectTokenProvider(opts) if err != nil { return nil, err @@ -246,7 +258,10 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { client: client, opts: opts, stp: stp, +<<<<<<< HEAD logger: logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts.ServiceAccountImpersonationURL == "" { @@ -263,7 +278,10 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, +<<<<<<< HEAD Logger: logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err @@ -279,7 +297,10 @@ type subjectTokenProvider interface { // tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. type tokenProvider struct { client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) opts *Options stp subjectTokenProvider } @@ -321,7 +342,10 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { Authentication: clientAuth, Headers: header, ExtraOpts: options, +<<<<<<< HEAD Logger: tp.logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err @@ -342,14 +366,20 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { // newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a // subjectTokenProvider func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { +<<<<<<< HEAD logger := internallog.New(o.Logger) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} if o.AwsSecurityCredentialsProvider != nil { return &awsSubjectProvider{ securityCredentialsProvider: o.AwsSecurityCredentialsProvider, TargetResource: o.Audience, reqOpts: reqOpts, +<<<<<<< HEAD logger: logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } else if o.SubjectTokenProvider != nil { return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil @@ -366,7 +396,10 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { CredVerificationURL: o.CredentialSource.URL, TargetResource: o.Audience, Client: o.Client, +<<<<<<< HEAD logger: logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if o.CredentialSource.IMDSv2SessionTokenURL != "" { awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL @@ -377,6 +410,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { } else if o.CredentialSource.File != "" { return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil } else if o.CredentialSource.URL != "" { +<<<<<<< HEAD return &urlSubjectProvider{ URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, @@ -384,6 +418,9 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { Client: o.Client, Logger: logger, }, nil +======= + return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if o.CredentialSource.Executable != nil { ec := o.CredentialSource.Executable if ec.Command == "" { diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 754ecf4fef..c9ee24a31e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -19,12 +19,18 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -40,7 +46,10 @@ type urlSubjectProvider struct { Headers map[string]string Format *credsfile.Format Client *http.Client +<<<<<<< HEAD Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -52,12 +61,18 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } +<<<<<<< HEAD sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } +<<<<<<< HEAD sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return "", fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go index ae39206e5f..1010ef6f47 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -17,14 +17,20 @@ package externalaccountuser import ( "context" "errors" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Options stores the configuration for fetching tokens with external authorized @@ -53,8 +59,11 @@ type Options struct { // Client for token request. Client *http.Client +<<<<<<< HEAD // Logger for logging. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Options) validate() bool { @@ -94,7 +103,10 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { RefreshToken: opts.RefreshToken, Authentication: clientAuth, Headers: headers, +<<<<<<< HEAD Logger: internallog.New(tp.o.Logger), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index c2d320fdf4..230795f5e8 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -16,13 +16,20 @@ package gdch import ( "context" +<<<<<<< HEAD "crypto" +======= + "crypto/rsa" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "os" @@ -33,7 +40,10 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/auth/internal/jwt" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -53,7 +63,10 @@ var ( type Options struct { STSAudience string Client *http.Client +<<<<<<< HEAD Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a @@ -65,7 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok if o.STSAudience == "" { return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") } +<<<<<<< HEAD signer, err := internal.ParseKey([]byte(f.PrivateKey)) +======= + pk, err := internal.ParseKey([]byte(f.PrivateKey)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -78,11 +95,18 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), tokenURL: f.TokenURL, aud: o.STSAudience, +<<<<<<< HEAD signer: signer, pkID: f.PrivateKeyID, certPool: certPool, client: o.Client, logger: internallog.New(o.Logger), +======= + pk: pk, + pkID: f.PrivateKeyID, + certPool: certPool, + client: o.Client, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return tp, nil } @@ -101,12 +125,19 @@ type gdchProvider struct { serviceIdentity string tokenURL string aud string +<<<<<<< HEAD signer crypto.Signer +======= + pk *rsa.PrivateKey +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pkID string certPool *x509.CertPool client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { @@ -125,7 +156,11 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(g.pkID), } +<<<<<<< HEAD payload, err := jwt.EncodeJWS(&h, &claims, g.signer) +======= + payload, err := jwt.EncodeJWS(&h, &claims, g.pk) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -141,12 +176,18 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") +<<<<<<< HEAD g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } +<<<<<<< HEAD g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, &auth.Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index b3a99261fa..9958da8f39 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -20,13 +20,19 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -76,11 +82,14 @@ type Options struct { // Client configures the underlying client used to make network requests // when fetching tokens. Required. Client *http.Client +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options) validate() error { @@ -95,7 +104,10 @@ func (o *Options) validate() error { // Token performs the exchange to get a temporary service account token to allow access to GCP. func (o *Options) Token(ctx context.Context) (*auth.Token, error) { +<<<<<<< HEAD logger := internallog.New(o.Logger) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lifetime := defaultTokenLifetime if o.TokenLifetimeSeconds != 0 { lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) @@ -117,12 +129,18 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } +<<<<<<< HEAD logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } +<<<<<<< HEAD logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index e1d2b15034..8def2fdfad 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -19,7 +19,10 @@ import ( "encoding/base64" "encoding/json" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strconv" @@ -27,7 +30,10 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -42,7 +48,10 @@ const ( // Options stores the configuration for making an sts exchange request. type Options struct { Client *http.Client +<<<<<<< HEAD Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Endpoint string Request *TokenRequest Authentication ClientAuthentication @@ -83,7 +92,10 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { opts.Authentication.InjectAuthentication(data, opts.Headers) encodedData := data.Encode() +<<<<<<< HEAD logger := internallog.New(opts.Logger) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) if err != nil { @@ -97,12 +109,18 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) +<<<<<<< HEAD logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } +<<<<<<< HEAD logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index 8d335ccecc..6bf4c3440c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -16,10 +16,16 @@ package credentials import ( "context" +<<<<<<< HEAD "crypto" "errors" "fmt" "log/slog" +======= + "crypto/rsa" + "errors" + "fmt" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "time" @@ -40,7 +46,11 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions if len(opts.scopes()) == 0 && opts.Audience == "" { return nil, errors.New("credentials: both scopes and audience are empty") } +<<<<<<< HEAD signer, err := internal.ParseKey([]byte(f.PrivateKey)) +======= + pk, err := internal.ParseKey([]byte(f.PrivateKey)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) } @@ -48,9 +58,14 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions email: f.ClientEmail, audience: opts.Audience, scopes: opts.scopes(), +<<<<<<< HEAD signer: signer, pkID: f.PrivateKeyID, logger: opts.logger(), +======= + pk: pk, + pkID: f.PrivateKeyID, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -58,9 +73,14 @@ type selfSignedTokenProvider struct { email string audience string scopes []string +<<<<<<< HEAD signer crypto.Signer pkID string logger *slog.Logger +======= + pk *rsa.PrivateKey + pkID string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { @@ -80,10 +100,18 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(tp.pkID), } +<<<<<<< HEAD tok, err := jwt.EncodeJWS(h, c, tp.signer) if err != nil { return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) } tp.logger.Debug("created self-signed JWT", "token", tok) return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil +======= + msg, err := jwt.EncodeJWS(h, c, tp.pk) + if err != nil { + return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) + } + return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index d781c3e49a..e356fb2db1 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -66,12 +66,21 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool if tok == nil { return false } +<<<<<<< HEAD if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { return true } +======= + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { + return false + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 95f259037f..d9b5f1cb30 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -21,7 +21,10 @@ import ( "crypto/tls" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" "sync" @@ -30,7 +33,11 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= + "go.opencensus.io/plugin/ocgrpc" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" @@ -118,11 +125,14 @@ type Options struct { // APIKey specifies an API key to be used as the basis for authentication. // If set DetectOpts are ignored. APIKey string +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -138,10 +148,13 @@ func (o *Options) client() *http.Client { return nil } +<<<<<<< HEAD func (o *Options) logger() *slog.Logger { return internallog.New(o.Logger) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *Options) validate() error { if o == nil { return errors.New("grpctransport: opts required to be non-nil") @@ -183,9 +196,12 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = credentials.GoogleMTLSTokenURL } +<<<<<<< HEAD if do.Logger == nil { do.Logger = o.logger() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return do } @@ -254,7 +270,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -332,6 +351,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. +<<<<<<< HEAD +======= + grpcOpts = addOCStatsHandler(grpcOpts, opts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) @@ -430,6 +453,16 @@ func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { return c.secure } +<<<<<<< HEAD +======= +func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { if opts.DisableTelemetry { return dialOpts diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 5758e85b5d..9962965a9c 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -20,14 +20,20 @@ import ( "crypto/tls" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "cloud.google.com/go/auth" detect "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ClientCertProvider is a function that returns a TLS client certificate to be @@ -71,11 +77,14 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -108,10 +117,13 @@ func (o *Options) client() *http.Client { return nil } +<<<<<<< HEAD func (o *Options) logger() *slog.Logger { return internallog.New(o.Logger) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *Options) resolveDetectOptions() *detect.DetectOptions { io := o.InternalOptions // soft-clone these so we are not updating a ref the user holds and may reuse @@ -136,9 +148,12 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = detect.GoogleMTLSTokenURL } +<<<<<<< HEAD if do.Logger == nil { do.Logger = o.logger() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return do } @@ -211,7 +226,10 @@ func NewClient(opts *Options) (*http.Client, error) { ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, +<<<<<<< HEAD Logger: opts.logger(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go new file mode 100644 index 0000000000..467c477c04 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/httptransport/trace.go @@ -0,0 +1,93 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptransport + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + cloudTraceHeader = `X-Cloud-Trace-Context` +) + +// asserts the httpFormat fulfills this foreign interface +var _ propagation.HTTPFormat = (*httpFormat)(nil) + +// httpFormat implements propagation.httpFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Cloud Trace. +type httpFormat struct{} + +// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. +func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(cloudTraceHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 32) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Cloud Trace header. +func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(cloudTraceHeader, header) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index ee215b6dc6..2c51836db2 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -27,6 +27,10 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" +<<<<<<< HEAD +======= + "go.opencensus.io/plugin/ochttp" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) @@ -42,7 +46,14 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht +<<<<<<< HEAD trans = addOpenTelemetryTransport(trans, opts) +======= + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, opts) + trans = addOCTransport(trans, opts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch { case opts.DisableAuthentication: // Do nothing. @@ -175,6 +186,19 @@ func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.Roun return otelhttp.NewTransport(trans) } +<<<<<<< HEAD +======= +func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &httpFormat{}, + } +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type authTransport struct { creds *auth.Credentials base http.RoundTripper diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go index 05c7e8bdd4..634b590b4f 100644 --- a/vendor/cloud.google.com/go/auth/internal/compute/compute.go +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -31,7 +31,12 @@ var ( // This is a copy of the gRPC internal googlecloud.OnGCE() func at: // https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go // The functionality is similar to the metadata.OnGCE() func at: +<<<<<<< HEAD // https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go +======= +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. // In particular, OnComputeEngine() will return false on Serverless. func OnComputeEngine() bool { diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 6a8eab6eb9..5a1e2ca845 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -16,7 +16,11 @@ package internal import ( "context" +<<<<<<< HEAD "crypto" +======= + "crypto/rsa" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/x509" "encoding/json" "encoding/pem" @@ -72,27 +76,45 @@ func DefaultClient() *http.Client { } // ParseKey converts the binary contents of a private key file +<<<<<<< HEAD // to an crypto.Signer. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (crypto.Signer, error) { +======= +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) block, _ := pem.Decode(key) if block != nil { key = block.Bytes } +<<<<<<< HEAD var parsedKey crypto.PrivateKey var err error parsedKey, err = x509.ParsePKCS8PrivateKey(key) +======= + parsedKey, err := x509.ParsePKCS8PrivateKey(key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) } } +<<<<<<< HEAD parsed, ok := parsedKey.(crypto.Signer) if !ok { return nil, errors.New("private key is not a signer") +======= + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return parsed, nil } @@ -181,7 +203,10 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) { // ComputeUniverseDomainProvider fetches the credentials universe domain from // the google cloud metadata service. type ComputeUniverseDomainProvider struct { +<<<<<<< HEAD MetadataClient *metadata.Client +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) universeDomainOnce sync.Once universeDomain string universeDomainErr error @@ -191,7 +216,11 @@ type ComputeUniverseDomainProvider struct { // metadata service. func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { c.universeDomainOnce.Do(func() { +<<<<<<< HEAD c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) +======= + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if c.universeDomainErr != nil { return "", c.universeDomainErr @@ -200,6 +229,7 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string } // httpGetMetadataUniverseDomain is a package var for unit test substitution. +<<<<<<< HEAD var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() @@ -208,6 +238,16 @@ var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.C func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) +======= +var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return metadata.GetWithContext(ctx, "universe/universe-domain") +} + +func getMetadataUniverseDomain(ctx context.Context) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == nil { return universeDomain, nil } @@ -217,9 +257,12 @@ func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (st } return "", err } +<<<<<<< HEAD // FormatIAMServiceAccountResource sets a service account name in an IAM resource // name. func FormatIAMServiceAccountResource(name string) string { return fmt.Sprintf("projects/-/serviceAccounts/%s", name) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index 9bd55f510c..a6a65b2f55 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -111,7 +111,11 @@ func (c *Claims) encode() (string, error) { } // EncodeJWS encodes the data using the provided key as a JSON web signature. +<<<<<<< HEAD func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { +======= +func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) head, err := header.encode() if err != nil { return "", err @@ -123,7 +127,11 @@ func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) ss := fmt.Sprintf("%s.%s", head, claims) h := sha256.New() h.Write([]byte(ss)) +<<<<<<< HEAD sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) +======= + sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 2f922f7dfe..8e49691967 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -20,7 +20,10 @@ import ( "crypto/x509" "errors" "log" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "net/url" @@ -64,7 +67,10 @@ type Options struct { UniverseDomain string EnableDirectPath bool EnableDirectPathXds bool +<<<<<<< HEAD Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // getUniverseDomain returns the default service domain for a given Cloud @@ -265,8 +271,13 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { return &defaultTransportConfig, nil } +<<<<<<< HEAD s2aAddress := GetS2AAddress(opts.Logger) mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) +======= + s2aAddress := GetS2AAddress() + mtlsS2AAddress := GetMTLSS2AAddress() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index a633099563..85ae91c014 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -19,7 +19,10 @@ import ( "encoding/json" "fmt" "log" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "strconv" "sync" @@ -40,8 +43,13 @@ var ( // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. +<<<<<<< HEAD func GetS2AAddress(logger *slog.Logger) string { getMetadataMTLSAutoConfig(logger) +======= +func GetS2AAddress() string { + getMetadataMTLSAutoConfig() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !mtlsConfiguration.valid() { return "" } @@ -50,8 +58,13 @@ func GetS2AAddress(logger *slog.Logger) string { // GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. // Returns empty string if not set or invalid. +<<<<<<< HEAD func GetMTLSS2AAddress(logger *slog.Logger) string { getMetadataMTLSAutoConfig(logger) +======= +func GetMTLSS2AAddress() string { + getMetadataMTLSAutoConfig() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !mtlsConfiguration.valid() { return "" } @@ -75,16 +88,24 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } +<<<<<<< HEAD func getMetadataMTLSAutoConfig(logger *slog.Logger) { var err error mtlsOnce.Do(func() { mtlsConfiguration, err = queryConfig(logger) +======= +func getMetadataMTLSAutoConfig() { + var err error + mtlsOnce.Do(func() { + mtlsConfiguration, err = queryConfig() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { log.Printf("Getting MTLS config failed: %v", err) } }) } +<<<<<<< HEAD var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { metadataClient := metadata.NewWithOptions(&metadata.Options{ Logger: logger, @@ -94,6 +115,14 @@ var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { resp, err := httpGetMetadataMTLSConfig(logger) +======= +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.GetWithContext(context.Background(), configEndpointSuffix) +} + +func queryConfig() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 992ac40df0..a8cb41247b 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -49,7 +49,10 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt // These fields are are pointer types that we just want to use exactly // as the user set, copy the ref Client: oldDo.Client, +<<<<<<< HEAD Logger: oldDo.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AuthHandlerOptions: oldDo.AuthHandlerOptions, } diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index d9044f1a94..aff47ec173 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,6 @@ # Changelog +<<<<<<< HEAD ## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09) @@ -7,6 +8,8 @@ * **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 07804dc162..820b59eec8 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -20,7 +20,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "mime" "net/http" "net/url" @@ -29,7 +32,10 @@ import ( "time" "cloud.google.com/go/auth/internal" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for @@ -71,11 +77,14 @@ type Options3LO struct { // AuthHandlerOpts provides a set of options for doing a // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. AuthHandlerOpts *AuthorizationHandlerOptions +<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options3LO) validate() error { @@ -103,10 +112,13 @@ func (o *Options3LO) validate() error { return nil } +<<<<<<< HEAD func (o *Options3LO) logger() *slog.Logger { return internallog.New(o.Logger) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // PKCEOptions holds parameters to support PKCE. type PKCEOptions struct { // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. @@ -304,15 +316,22 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin if o.AuthStyle == StyleInHeader { req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) } +<<<<<<< HEAD logger := o.logger() logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Make request resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } +<<<<<<< HEAD logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index bcfb5d8165..2a8653f2e4 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,6 @@ # Changes +<<<<<<< HEAD ## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) @@ -7,6 +8,8 @@ * **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 4c18a383a4..9b0025a67d 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -24,7 +24,10 @@ import ( "encoding/json" "fmt" "io" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "net/url" @@ -61,10 +64,14 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) +<<<<<<< HEAD var defaultClient = &Client{ hc: newDefaultHTTPClient(), logger: slog.New(noOpHandler{}), } +======= +var defaultClient = &Client{hc: newDefaultHTTPClient()} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newDefaultHTTPClient() *http.Client { return &http.Client{ @@ -412,6 +419,7 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { +<<<<<<< HEAD hc *http.Client logger *slog.Logger } @@ -423,12 +431,16 @@ type Options struct { // Logger is used to log information about HTTP request and responses. // If not provided, nothing will be logged. Optional. Logger *slog.Logger +======= + hc *http.Client +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. // If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { +<<<<<<< HEAD return NewWithOptions(&Options{ Client: c, }) @@ -448,6 +460,12 @@ func NewWithOptions(opts *Options) *Client { logger = slog.New(noOpHandler{}) } return &Client{hc: client, logger: logger} +======= + if c == nil { + return defaultClient + } + return &Client{hc: c} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // getETag returns a value from the metadata service as well as the associated ETag. @@ -477,14 +495,20 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error +<<<<<<< HEAD var body []byte retryer := newRetryer() for { c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) +======= + retryer := newRetryer() + for { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) res, reqErr = c.hc.Do(req) var code int if res != nil { code = res.StatusCode +<<<<<<< HEAD body, err = io.ReadAll(res.Body) if err != nil { res.Body.Close() @@ -492,6 +516,8 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string } c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) res.Body.Close() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { if res != nil && res.Body != nil { @@ -507,6 +533,7 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } +<<<<<<< HEAD if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } @@ -514,6 +541,20 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string return "", "", &Error{Code: res.StatusCode, Message: string(body)} } return string(body), res.Header.Get("Etag"), nil +======= + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := io.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Get returns a value from the metadata service. diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go index 2e3add07f5..bd78a46098 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go @@ -20,7 +20,11 @@ import ( "bytes" "context" "fmt" +<<<<<<< HEAD "log/slog" +======= + "io" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -30,6 +34,10 @@ import ( kmspb "cloud.google.com/go/kms/apiv1/kmspb" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" +<<<<<<< HEAD +======= + "google.golang.org/api/googleapi" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -311,8 +319,11 @@ type autokeyAdminGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyAdminClient creates a new autokey admin client based on gRPC. @@ -346,7 +357,10 @@ func NewAutokeyAdminClient(ctx context.Context, opts ...option.ClientOption) (*A connPool: connPool, autokeyAdminClient: kmspb.NewAutokeyAdminClient(connPool), CallOptions: &client.CallOptions, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -396,8 +410,11 @@ type autokeyAdminRESTClient struct { // Points back to the CallOptions field of the containing AutokeyAdminClient CallOptions **AutokeyAdminCallOptions +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyAdminRESTClient creates a new autokey admin rest client. @@ -422,7 +439,10 @@ func NewAutokeyAdminRESTClient(ctx context.Context, opts ...option.ClientOption) endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -475,7 +495,11 @@ func (c *autokeyAdminGRPCClient) UpdateAutokeyConfig(ctx context.Context, req *k var resp *kmspb.AutokeyConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyAdminClient.UpdateAutokeyConfig, req, settings.GRPC, c.logger, "UpdateAutokeyConfig") +======= + resp, err = c.autokeyAdminClient.UpdateAutokeyConfig(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -493,7 +517,11 @@ func (c *autokeyAdminGRPCClient) GetAutokeyConfig(ctx context.Context, req *kmsp var resp *kmspb.AutokeyConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyAdminClient.GetAutokeyConfig, req, settings.GRPC, c.logger, "GetAutokeyConfig") +======= + resp, err = c.autokeyAdminClient.GetAutokeyConfig(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -511,7 +539,11 @@ func (c *autokeyAdminGRPCClient) ShowEffectiveAutokeyConfig(ctx context.Context, var resp *kmspb.ShowEffectiveAutokeyConfigResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyAdminClient.ShowEffectiveAutokeyConfig, req, settings.GRPC, c.logger, "ShowEffectiveAutokeyConfig") +======= + resp, err = c.autokeyAdminClient.ShowEffectiveAutokeyConfig(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -529,7 +561,11 @@ func (c *autokeyAdminGRPCClient) GetLocation(ctx context.Context, req *locationp var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") +======= + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -558,7 +594,11 @@ func (c *autokeyAdminGRPCClient) ListLocations(ctx context.Context, req *locatio } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") +======= + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -593,7 +633,11 @@ func (c *autokeyAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.Ge var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") +======= + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -611,7 +655,11 @@ func (c *autokeyAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.Se var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") +======= + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -629,7 +677,11 @@ func (c *autokeyAdminGRPCClient) TestIamPermissions(ctx context.Context, req *ia var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") +======= + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -647,7 +699,11 @@ func (c *autokeyAdminGRPCClient) GetOperation(ctx context.Context, req *longrunn var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") +======= + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -709,7 +765,21 @@ func (c *autokeyAdminRESTClient) UpdateAutokeyConfig(ctx context.Context, req *k httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateAutokeyConfig") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -760,7 +830,21 @@ func (c *autokeyAdminRESTClient) GetAutokeyConfig(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetAutokeyConfig") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -810,7 +894,21 @@ func (c *autokeyAdminRESTClient) ShowEffectiveAutokeyConfig(ctx context.Context, httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ShowEffectiveAutokeyConfig") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -860,7 +958,21 @@ func (c *autokeyAdminRESTClient) GetLocation(ctx context.Context, req *locationp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -925,10 +1037,28 @@ func (c *autokeyAdminRESTClient) ListLocations(ctx context.Context, req *locatio } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -995,7 +1125,21 @@ func (c *autokeyAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.Ge httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1055,7 +1199,21 @@ func (c *autokeyAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.Se httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1117,7 +1275,21 @@ func (c *autokeyAdminRESTClient) TestIamPermissions(ctx context.Context, req *ia httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1167,7 +1339,21 @@ func (c *autokeyAdminRESTClient) GetOperation(ctx context.Context, req *longrunn httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go index d4c7c3bd6d..1a31508dcb 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go @@ -20,7 +20,11 @@ import ( "bytes" "context" "fmt" +<<<<<<< HEAD "log/slog" +======= + "io" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -32,6 +36,10 @@ import ( lroauto "cloud.google.com/go/longrunning/autogen" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" +<<<<<<< HEAD +======= + "google.golang.org/api/googleapi" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -229,9 +237,15 @@ func (c *AutokeyClient) Connection() *grpc.ClientConn { // CreateKeyHandle creates a new KeyHandle, triggering the // provisioning of a new CryptoKey for CMEK // use with the given resource type in the configured key project and the same +<<<<<<< HEAD // location. GetOperation should // be used to resolve the resulting long-running operation and get the // resulting KeyHandle and +======= +// location. GetOperation should be used to resolve +// the resulting long-running operation and get the resulting +// KeyHandle and +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CryptoKey. func (c *AutokeyClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { return c.internalClient.CreateKeyHandle(ctx, req, opts...) @@ -320,8 +334,11 @@ type autokeyGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyClient creates a new autokey client based on gRPC. @@ -365,7 +382,10 @@ func NewAutokeyClient(ctx context.Context, opts ...option.ClientOption) (*Autoke connPool: connPool, autokeyClient: kmspb.NewAutokeyClient(connPool), CallOptions: &client.CallOptions, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -431,8 +451,11 @@ type autokeyRESTClient struct { // Points back to the CallOptions field of the containing AutokeyClient CallOptions **AutokeyCallOptions +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyRESTClient creates a new autokey rest client. @@ -467,7 +490,10 @@ func NewAutokeyRESTClient(ctx context.Context, opts ...option.ClientOption) (*Au endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -530,7 +556,11 @@ func (c *autokeyGRPCClient) CreateKeyHandle(ctx context.Context, req *kmspb.Crea var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyClient.CreateKeyHandle, req, settings.GRPC, c.logger, "CreateKeyHandle") +======= + resp, err = c.autokeyClient.CreateKeyHandle(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -550,7 +580,11 @@ func (c *autokeyGRPCClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyH var resp *kmspb.KeyHandle err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyClient.GetKeyHandle, req, settings.GRPC, c.logger, "GetKeyHandle") +======= + resp, err = c.autokeyClient.GetKeyHandle(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -579,7 +613,11 @@ func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListK } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyClient.ListKeyHandles, req, settings.GRPC, c.logger, "ListKeyHandles") +======= + resp, err = c.autokeyClient.ListKeyHandles(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -614,7 +652,11 @@ func (c *autokeyGRPCClient) GetLocation(ctx context.Context, req *locationpb.Get var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") +======= + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -643,7 +685,11 @@ func (c *autokeyGRPCClient) ListLocations(ctx context.Context, req *locationpb.L } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") +======= + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -678,7 +724,11 @@ func (c *autokeyGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamP var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") +======= + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -696,7 +746,11 @@ func (c *autokeyGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamP var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") +======= + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -714,7 +768,11 @@ func (c *autokeyGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.T var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") +======= + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -732,7 +790,11 @@ func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") +======= + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -744,9 +806,15 @@ func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb // CreateKeyHandle creates a new KeyHandle, triggering the // provisioning of a new CryptoKey for CMEK // use with the given resource type in the configured key project and the same +<<<<<<< HEAD // location. GetOperation should // be used to resolve the resulting long-running operation and get the // resulting KeyHandle and +======= +// location. GetOperation should be used to resolve +// the resulting long-running operation and get the resulting +// KeyHandle and +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CryptoKey. func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} @@ -789,10 +857,28 @@ func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.Crea httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateKeyHandle") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -843,7 +929,21 @@ func (c *autokeyRESTClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyH httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetKeyHandle") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -908,10 +1008,28 @@ func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListK } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListKeyHandles") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -974,7 +1092,21 @@ func (c *autokeyRESTClient) GetLocation(ctx context.Context, req *locationpb.Get httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1039,10 +1171,28 @@ func (c *autokeyRESTClient) ListLocations(ctx context.Context, req *locationpb.L } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1109,7 +1259,21 @@ func (c *autokeyRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamP httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1169,7 +1333,21 @@ func (c *autokeyRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamP httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1231,7 +1409,21 @@ func (c *autokeyRESTClient) TestIamPermissions(ctx context.Context, req *iampb.T httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1281,7 +1473,21 @@ func (c *autokeyRESTClient) GetOperation(ctx context.Context, req *longrunningpb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go index 57eff9a98d..07fb4b521f 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go +++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go @@ -111,7 +111,11 @@ type CryptoKeyIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -158,7 +162,11 @@ type CryptoKeyVersionIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -205,7 +213,11 @@ type EkmConnectionIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -252,7 +264,11 @@ type ImportJobIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *ImportJobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -299,7 +315,11 @@ type KeyHandleIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyHandle, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *KeyHandleIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -346,7 +366,11 @@ type KeyRingIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *KeyRingIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -393,7 +417,11 @@ type LocationIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error) } +<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. +======= +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *LocationIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go index 8e9ced1a3b..52f9925d57 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/doc.go +++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go @@ -36,7 +36,10 @@ // // To get started with this package, create a client. // +<<<<<<< HEAD // // go get cloud.google.com/go/kms/apiv1@latest +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ctx := context.Background() // // This snippet has been automatically generated and should be regarded as a code template only. // // It will require modifications to work: @@ -55,7 +58,23 @@ // // # Using the Client // +<<<<<<< HEAD // The following is an example of making an API call with the newly created client, mentioned above. +======= +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := kms.NewAutokeyClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // req := &kmspb.CreateKeyHandleRequest{ // // TODO: Fill request struct fields. @@ -87,3 +106,34 @@ // [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging // [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors package kms // import "cloud.google.com/go/kms/apiv1" +<<<<<<< HEAD +======= + +import ( + "context" + + "google.golang.org/api/option" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloudkms", + } +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go index 7488bb432c..da180ab529 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go @@ -20,7 +20,11 @@ import ( "bytes" "context" "fmt" +<<<<<<< HEAD "log/slog" +======= + "io" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -30,6 +34,10 @@ import ( kmspb "cloud.google.com/go/kms/apiv1/kmspb" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" +<<<<<<< HEAD +======= + "google.golang.org/api/googleapi" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -369,8 +377,11 @@ type ekmGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewEkmClient creates a new ekm service client based on gRPC. @@ -402,7 +413,10 @@ func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, connPool: connPool, ekmClient: kmspb.NewEkmServiceClient(connPool), CallOptions: &client.CallOptions, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -452,8 +466,11 @@ type ekmRESTClient struct { // Points back to the CallOptions field of the containing EkmClient CallOptions **EkmCallOptions +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewEkmRESTClient creates a new ekm service rest client. @@ -476,7 +493,10 @@ func NewEkmRESTClient(ctx context.Context, opts ...option.ClientOption) (*EkmCli endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -540,7 +560,11 @@ func (c *ekmGRPCClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.ListEkmConnections, req, settings.GRPC, c.logger, "ListEkmConnections") +======= + resp, err = c.ekmClient.ListEkmConnections(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -575,7 +599,11 @@ func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC var resp *kmspb.EkmConnection err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.GetEkmConnection, req, settings.GRPC, c.logger, "GetEkmConnection") +======= + resp, err = c.ekmClient.GetEkmConnection(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -593,7 +621,11 @@ func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea var resp *kmspb.EkmConnection err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.CreateEkmConnection, req, settings.GRPC, c.logger, "CreateEkmConnection") +======= + resp, err = c.ekmClient.CreateEkmConnection(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -611,7 +643,11 @@ func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda var resp *kmspb.EkmConnection err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.UpdateEkmConnection, req, settings.GRPC, c.logger, "UpdateEkmConnection") +======= + resp, err = c.ekmClient.UpdateEkmConnection(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -629,7 +665,11 @@ func (c *ekmGRPCClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfi var resp *kmspb.EkmConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.GetEkmConfig, req, settings.GRPC, c.logger, "GetEkmConfig") +======= + resp, err = c.ekmClient.GetEkmConfig(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -647,7 +687,11 @@ func (c *ekmGRPCClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk var resp *kmspb.EkmConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.UpdateEkmConfig, req, settings.GRPC, c.logger, "UpdateEkmConfig") +======= + resp, err = c.ekmClient.UpdateEkmConfig(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -665,7 +709,11 @@ func (c *ekmGRPCClient) VerifyConnectivity(ctx context.Context, req *kmspb.Verif var resp *kmspb.VerifyConnectivityResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.VerifyConnectivity, req, settings.GRPC, c.logger, "VerifyConnectivity") +======= + resp, err = c.ekmClient.VerifyConnectivity(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -683,7 +731,11 @@ func (c *ekmGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLoca var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") +======= + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -712,7 +764,11 @@ func (c *ekmGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListL } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") +======= + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -747,7 +803,11 @@ func (c *ekmGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") +======= + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -765,7 +825,11 @@ func (c *ekmGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") +======= + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -783,7 +847,11 @@ func (c *ekmGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestI var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") +======= + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -801,7 +869,11 @@ func (c *ekmGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.Get var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") +======= + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -861,10 +933,28 @@ func (c *ekmRESTClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListEkmConnections") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -928,7 +1018,21 @@ func (c *ekmRESTClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetEkmConnection") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -987,7 +1091,21 @@ func (c *ekmRESTClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateEkmConnection") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1051,7 +1169,21 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateEkmConnection") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1102,7 +1234,21 @@ func (c *ekmRESTClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfi httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetEkmConfig") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1167,7 +1313,21 @@ func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateEkmConfig") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1221,7 +1381,21 @@ func (c *ekmRESTClient) VerifyConnectivity(ctx context.Context, req *kmspb.Verif httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "VerifyConnectivity") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1271,7 +1445,21 @@ func (c *ekmRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLoca httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1336,10 +1524,28 @@ func (c *ekmRESTClient) ListLocations(ctx context.Context, req *locationpb.ListL } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1406,7 +1612,21 @@ func (c *ekmRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1466,7 +1686,21 @@ func (c *ekmRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1528,7 +1762,21 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1578,7 +1826,21 @@ func (c *ekmRESTClient) GetOperation(ctx context.Context, req *longrunningpb.Get httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go index 07f8e4ba05..af7a60594d 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -20,7 +20,11 @@ import ( "bytes" "context" "fmt" +<<<<<<< HEAD "log/slog" +======= + "io" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -30,6 +34,10 @@ import ( kmspb "cloud.google.com/go/kms/apiv1/kmspb" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" +<<<<<<< HEAD +======= + "google.golang.org/api/googleapi" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -1129,8 +1137,11 @@ type keyManagementGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewKeyManagementClient creates a new key management service client based on gRPC. @@ -1171,7 +1182,10 @@ func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (* connPool: connPool, keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool), CallOptions: &client.CallOptions, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -1221,8 +1235,11 @@ type keyManagementRESTClient struct { // Points back to the CallOptions field of the containing KeyManagementClient CallOptions **KeyManagementCallOptions +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewKeyManagementRESTClient creates a new key management service rest client. @@ -1254,7 +1271,10 @@ func NewKeyManagementRESTClient(ctx context.Context, opts ...option.ClientOption endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -1318,7 +1338,11 @@ func (c *keyManagementGRPCClient) ListKeyRings(ctx context.Context, req *kmspb.L } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListKeyRings, req, settings.GRPC, c.logger, "ListKeyRings") +======= + resp, err = c.keyManagementClient.ListKeyRings(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1364,7 +1388,11 @@ func (c *keyManagementGRPCClient) ListCryptoKeys(ctx context.Context, req *kmspb } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListCryptoKeys, req, settings.GRPC, c.logger, "ListCryptoKeys") +======= + resp, err = c.keyManagementClient.ListCryptoKeys(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1410,7 +1438,11 @@ func (c *keyManagementGRPCClient) ListCryptoKeyVersions(ctx context.Context, req } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListCryptoKeyVersions, req, settings.GRPC, c.logger, "ListCryptoKeyVersions") +======= + resp, err = c.keyManagementClient.ListCryptoKeyVersions(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1456,7 +1488,11 @@ func (c *keyManagementGRPCClient) ListImportJobs(ctx context.Context, req *kmspb } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListImportJobs, req, settings.GRPC, c.logger, "ListImportJobs") +======= + resp, err = c.keyManagementClient.ListImportJobs(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1491,7 +1527,11 @@ func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.Get var resp *kmspb.KeyRing err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetKeyRing, req, settings.GRPC, c.logger, "GetKeyRing") +======= + resp, err = c.keyManagementClient.GetKeyRing(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1509,7 +1549,11 @@ func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.G var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetCryptoKey, req, settings.GRPC, c.logger, "GetCryptoKey") +======= + resp, err = c.keyManagementClient.GetCryptoKey(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1527,7 +1571,11 @@ func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req * var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetCryptoKeyVersion, req, settings.GRPC, c.logger, "GetCryptoKeyVersion") +======= + resp, err = c.keyManagementClient.GetCryptoKeyVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1545,7 +1593,11 @@ func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.G var resp *kmspb.PublicKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetPublicKey, req, settings.GRPC, c.logger, "GetPublicKey") +======= + resp, err = c.keyManagementClient.GetPublicKey(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1563,7 +1615,11 @@ func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.G var resp *kmspb.ImportJob err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetImportJob, req, settings.GRPC, c.logger, "GetImportJob") +======= + resp, err = c.keyManagementClient.GetImportJob(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1581,7 +1637,11 @@ func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb. var resp *kmspb.KeyRing err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateKeyRing, req, settings.GRPC, c.logger, "CreateKeyRing") +======= + resp, err = c.keyManagementClient.CreateKeyRing(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1599,7 +1659,11 @@ func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmsp var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateCryptoKey, req, settings.GRPC, c.logger, "CreateCryptoKey") +======= + resp, err = c.keyManagementClient.CreateCryptoKey(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1617,7 +1681,11 @@ func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, re var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateCryptoKeyVersion, req, settings.GRPC, c.logger, "CreateCryptoKeyVersion") +======= + resp, err = c.keyManagementClient.CreateCryptoKeyVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1635,7 +1703,11 @@ func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, re var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ImportCryptoKeyVersion, req, settings.GRPC, c.logger, "ImportCryptoKeyVersion") +======= + resp, err = c.keyManagementClient.ImportCryptoKeyVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1653,7 +1725,11 @@ func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmsp var resp *kmspb.ImportJob err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateImportJob, req, settings.GRPC, c.logger, "CreateImportJob") +======= + resp, err = c.keyManagementClient.CreateImportJob(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1671,7 +1747,11 @@ func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmsp var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKey, req, settings.GRPC, c.logger, "UpdateCryptoKey") +======= + resp, err = c.keyManagementClient.UpdateCryptoKey(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1689,7 +1769,11 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, re var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKeyVersion, req, settings.GRPC, c.logger, "UpdateCryptoKeyVersion") +======= + resp, err = c.keyManagementClient.UpdateCryptoKeyVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1707,7 +1791,11 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKeyPrimaryVersion, req, settings.GRPC, c.logger, "UpdateCryptoKeyPrimaryVersion") +======= + resp, err = c.keyManagementClient.UpdateCryptoKeyPrimaryVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1725,7 +1813,11 @@ func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, r var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.DestroyCryptoKeyVersion, req, settings.GRPC, c.logger, "DestroyCryptoKeyVersion") +======= + resp, err = c.keyManagementClient.DestroyCryptoKeyVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1743,7 +1835,11 @@ func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, r var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.RestoreCryptoKeyVersion, req, settings.GRPC, c.logger, "RestoreCryptoKeyVersion") +======= + resp, err = c.keyManagementClient.RestoreCryptoKeyVersion(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1761,7 +1857,11 @@ func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.Encryp var resp *kmspb.EncryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.Encrypt, req, settings.GRPC, c.logger, "Encrypt") +======= + resp, err = c.keyManagementClient.Encrypt(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1779,7 +1879,11 @@ func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.Decryp var resp *kmspb.DecryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.Decrypt, req, settings.GRPC, c.logger, "Decrypt") +======= + resp, err = c.keyManagementClient.Decrypt(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1797,7 +1901,11 @@ func (c *keyManagementGRPCClient) RawEncrypt(ctx context.Context, req *kmspb.Raw var resp *kmspb.RawEncryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.RawEncrypt, req, settings.GRPC, c.logger, "RawEncrypt") +======= + resp, err = c.keyManagementClient.RawEncrypt(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1815,7 +1923,11 @@ func (c *keyManagementGRPCClient) RawDecrypt(ctx context.Context, req *kmspb.Raw var resp *kmspb.RawDecryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.RawDecrypt, req, settings.GRPC, c.logger, "RawDecrypt") +======= + resp, err = c.keyManagementClient.RawDecrypt(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1833,7 +1945,11 @@ func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb var resp *kmspb.AsymmetricSignResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.AsymmetricSign, req, settings.GRPC, c.logger, "AsymmetricSign") +======= + resp, err = c.keyManagementClient.AsymmetricSign(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1851,7 +1967,11 @@ func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *km var resp *kmspb.AsymmetricDecryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.AsymmetricDecrypt, req, settings.GRPC, c.logger, "AsymmetricDecrypt") +======= + resp, err = c.keyManagementClient.AsymmetricDecrypt(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1869,7 +1989,11 @@ func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSig var resp *kmspb.MacSignResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.MacSign, req, settings.GRPC, c.logger, "MacSign") +======= + resp, err = c.keyManagementClient.MacSign(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1887,7 +2011,11 @@ func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacV var resp *kmspb.MacVerifyResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.MacVerify, req, settings.GRPC, c.logger, "MacVerify") +======= + resp, err = c.keyManagementClient.MacVerify(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1905,7 +2033,11 @@ func (c *keyManagementGRPCClient) GenerateRandomBytes(ctx context.Context, req * var resp *kmspb.GenerateRandomBytesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GenerateRandomBytes, req, settings.GRPC, c.logger, "GenerateRandomBytes") +======= + resp, err = c.keyManagementClient.GenerateRandomBytes(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1923,7 +2055,11 @@ func (c *keyManagementGRPCClient) GetLocation(ctx context.Context, req *location var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") +======= + resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1952,7 +2088,11 @@ func (c *keyManagementGRPCClient) ListLocations(ctx context.Context, req *locati } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") +======= + resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1987,7 +2127,11 @@ func (c *keyManagementGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.G var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") +======= + resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2005,7 +2149,11 @@ func (c *keyManagementGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.S var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") +======= + resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2023,7 +2171,11 @@ func (c *keyManagementGRPCClient) TestIamPermissions(ctx context.Context, req *i var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") +======= + resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2041,7 +2193,11 @@ func (c *keyManagementGRPCClient) GetOperation(ctx context.Context, req *longrun var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") +======= + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2101,10 +2257,28 @@ func (c *keyManagementRESTClient) ListKeyRings(ctx context.Context, req *kmspb.L } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListKeyRings") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2188,10 +2362,28 @@ func (c *keyManagementRESTClient) ListCryptoKeys(ctx context.Context, req *kmspb } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListCryptoKeys") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2275,10 +2467,28 @@ func (c *keyManagementRESTClient) ListCryptoKeyVersions(ctx context.Context, req } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListCryptoKeyVersions") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2359,10 +2569,28 @@ func (c *keyManagementRESTClient) ListImportJobs(ctx context.Context, req *kmspb } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListImportJobs") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2425,7 +2653,21 @@ func (c *keyManagementRESTClient) GetKeyRing(ctx context.Context, req *kmspb.Get httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetKeyRing") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2477,7 +2719,21 @@ func (c *keyManagementRESTClient) GetCryptoKey(ctx context.Context, req *kmspb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetCryptoKey") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2528,7 +2784,21 @@ func (c *keyManagementRESTClient) GetCryptoKeyVersion(ctx context.Context, req * httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetCryptoKeyVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2583,7 +2853,21 @@ func (c *keyManagementRESTClient) GetPublicKey(ctx context.Context, req *kmspb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetPublicKey") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2633,7 +2917,21 @@ func (c *keyManagementRESTClient) GetImportJob(ctx context.Context, req *kmspb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetImportJob") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2692,7 +2990,21 @@ func (c *keyManagementRESTClient) CreateKeyRing(ctx context.Context, req *kmspb. httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateKeyRing") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2758,7 +3070,21 @@ func (c *keyManagementRESTClient) CreateCryptoKey(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateCryptoKey") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2820,7 +3146,21 @@ func (c *keyManagementRESTClient) CreateCryptoKeyVersion(ctx context.Context, re httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateCryptoKeyVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2883,7 +3223,21 @@ func (c *keyManagementRESTClient) ImportCryptoKeyVersion(ctx context.Context, re httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ImportCryptoKeyVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2945,7 +3299,21 @@ func (c *keyManagementRESTClient) CreateImportJob(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateImportJob") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3009,7 +3377,21 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKey") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3084,7 +3466,21 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKeyVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3145,7 +3541,21 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKeyPrimaryVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3221,7 +3631,21 @@ func (c *keyManagementRESTClient) DestroyCryptoKeyVersion(ctx context.Context, r httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "DestroyCryptoKeyVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3285,7 +3709,21 @@ func (c *keyManagementRESTClient) RestoreCryptoKeyVersion(ctx context.Context, r httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RestoreCryptoKeyVersion") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3344,7 +3782,21 @@ func (c *keyManagementRESTClient) Encrypt(ctx context.Context, req *kmspb.Encryp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Encrypt") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3403,7 +3855,21 @@ func (c *keyManagementRESTClient) Decrypt(ctx context.Context, req *kmspb.Decryp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Decrypt") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3464,7 +3930,21 @@ func (c *keyManagementRESTClient) RawEncrypt(ctx context.Context, req *kmspb.Raw httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RawEncrypt") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3523,7 +4003,21 @@ func (c *keyManagementRESTClient) RawDecrypt(ctx context.Context, req *kmspb.Raw httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RawDecrypt") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3583,7 +4077,21 @@ func (c *keyManagementRESTClient) AsymmetricSign(ctx context.Context, req *kmspb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "AsymmetricSign") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3643,7 +4151,21 @@ func (c *keyManagementRESTClient) AsymmetricDecrypt(ctx context.Context, req *km httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "AsymmetricDecrypt") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3701,7 +4223,21 @@ func (c *keyManagementRESTClient) MacSign(ctx context.Context, req *kmspb.MacSig httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "MacSign") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3760,7 +4296,21 @@ func (c *keyManagementRESTClient) MacVerify(ctx context.Context, req *kmspb.MacV httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "MacVerify") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3817,7 +4367,21 @@ func (c *keyManagementRESTClient) GenerateRandomBytes(ctx context.Context, req * httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "GenerateRandomBytes") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3867,7 +4431,21 @@ func (c *keyManagementRESTClient) GetLocation(ctx context.Context, req *location httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3932,10 +4510,28 @@ func (c *keyManagementRESTClient) ListLocations(ctx context.Context, req *locati } httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) + if err != nil { + return err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -4002,7 +4598,21 @@ func (c *keyManagementRESTClient) GetIamPolicy(ctx context.Context, req *iampb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4062,7 +4672,21 @@ func (c *keyManagementRESTClient) SetIamPolicy(ctx context.Context, req *iampb.S httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4124,7 +4748,21 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4174,7 +4812,21 @@ func (c *keyManagementRESTClient) GetOperation(ctx context.Context, req *longrun httpReq = httpReq.WithContext(ctx) httpReq.Header = headers +<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") +======= + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := io.ReadAll(httpRsp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go index e4737aca92..05be16d86e 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/autokey.proto @@ -62,9 +66,17 @@ type CreateKeyHandleRequest struct { func (x *CreateKeyHandleRequest) Reset() { *x = CreateKeyHandleRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateKeyHandleRequest) String() string { @@ -75,7 +87,11 @@ func (*CreateKeyHandleRequest) ProtoMessage() {} func (x *CreateKeyHandleRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,9 +141,17 @@ type GetKeyHandleRequest struct { func (x *GetKeyHandleRequest) Reset() { *x = GetKeyHandleRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetKeyHandleRequest) String() string { @@ -138,7 +162,11 @@ func (*GetKeyHandleRequest) ProtoMessage() {} func (x *GetKeyHandleRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -192,9 +220,17 @@ type KeyHandle struct { func (x *KeyHandle) Reset() { *x = KeyHandle{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyHandle) String() string { @@ -205,7 +241,11 @@ func (*KeyHandle) ProtoMessage() {} func (x *KeyHandle) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -252,9 +292,17 @@ type CreateKeyHandleMetadata struct { func (x *CreateKeyHandleMetadata) Reset() { *x = CreateKeyHandleMetadata{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateKeyHandleMetadata) String() string { @@ -265,7 +313,11 @@ func (*CreateKeyHandleMetadata) ProtoMessage() {} func (x *CreateKeyHandleMetadata) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -297,8 +349,13 @@ type ListKeyHandlesRequest struct { // [KeyHandles][google.cloud.kms.v1.KeyHandle] can subsequently be obtained by // including the // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token] +<<<<<<< HEAD // in a subsequent request. If unspecified, at most 100 // [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned. +======= + // in a subsequent request. If unspecified, at most + // 100 [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // Optional. Optional pagination token, returned earlier via // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token]. @@ -311,9 +368,17 @@ type ListKeyHandlesRequest struct { func (x *ListKeyHandlesRequest) Reset() { *x = ListKeyHandlesRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyHandlesRequest) String() string { @@ -324,7 +389,11 @@ func (*ListKeyHandlesRequest) ProtoMessage() {} func (x *ListKeyHandlesRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -384,9 +453,17 @@ type ListKeyHandlesResponse struct { func (x *ListKeyHandlesResponse) Reset() { *x = ListKeyHandlesResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyHandlesResponse) String() string { @@ -397,7 +474,11 @@ func (*ListKeyHandlesResponse) ProtoMessage() {} func (x *ListKeyHandlesResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -595,6 +676,83 @@ func file_google_cloud_kms_v1_autokey_proto_init() { if File_google_cloud_kms_v1_autokey_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_autokey_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CreateKeyHandleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetKeyHandleRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*KeyHandle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateKeyHandleMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyHandlesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyHandlesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ @@ -630,9 +788,15 @@ type AutokeyClient interface { // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK // use with the given resource type in the configured key project and the same +<<<<<<< HEAD // location. [GetOperation][google.longrunning.Operations.GetOperation] should // be used to resolve the resulting long-running operation and get the // resulting [KeyHandle][google.cloud.kms.v1.KeyHandle] and +======= + // location. [GetOperation][Operations.GetOperation] should be used to resolve + // the resulting long-running operation and get the resulting + // [KeyHandle][google.cloud.kms.v1.KeyHandle] and +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [CryptoKey][google.cloud.kms.v1.CryptoKey]. CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle]. @@ -681,9 +845,15 @@ type AutokeyServer interface { // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK // use with the given resource type in the configured key project and the same +<<<<<<< HEAD // location. [GetOperation][google.longrunning.Operations.GetOperation] should // be used to resolve the resulting long-running operation and get the // resulting [KeyHandle][google.cloud.kms.v1.KeyHandle] and +======= + // location. [GetOperation][Operations.GetOperation] should be used to resolve + // the resulting long-running operation and get the resulting + // [KeyHandle][google.cloud.kms.v1.KeyHandle] and +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [CryptoKey][google.cloud.kms.v1.CryptoKey]. CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error) // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle]. diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go index bb1abb0af6..0bd9759bdc 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/autokey_admin.proto @@ -118,9 +122,17 @@ type UpdateAutokeyConfigRequest struct { func (x *UpdateAutokeyConfigRequest) Reset() { *x = UpdateAutokeyConfigRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateAutokeyConfigRequest) String() string { @@ -131,7 +143,11 @@ func (*UpdateAutokeyConfigRequest) ProtoMessage() {} func (x *UpdateAutokeyConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -174,9 +190,17 @@ type GetAutokeyConfigRequest struct { func (x *GetAutokeyConfigRequest) Reset() { *x = GetAutokeyConfigRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetAutokeyConfigRequest) String() string { @@ -187,7 +211,11 @@ func (*GetAutokeyConfigRequest) ProtoMessage() {} func (x *GetAutokeyConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -236,9 +264,17 @@ type AutokeyConfig struct { func (x *AutokeyConfig) Reset() { *x = AutokeyConfig{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AutokeyConfig) String() string { @@ -249,7 +285,11 @@ func (*AutokeyConfig) ProtoMessage() {} func (x *AutokeyConfig) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -300,9 +340,17 @@ type ShowEffectiveAutokeyConfigRequest struct { func (x *ShowEffectiveAutokeyConfigRequest) Reset() { *x = ShowEffectiveAutokeyConfigRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ShowEffectiveAutokeyConfigRequest) String() string { @@ -313,7 +361,11 @@ func (*ShowEffectiveAutokeyConfigRequest) ProtoMessage() {} func (x *ShowEffectiveAutokeyConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,9 +401,17 @@ type ShowEffectiveAutokeyConfigResponse struct { func (x *ShowEffectiveAutokeyConfigResponse) Reset() { *x = ShowEffectiveAutokeyConfigResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ShowEffectiveAutokeyConfigResponse) String() string { @@ -362,7 +422,11 @@ func (*ShowEffectiveAutokeyConfigResponse) ProtoMessage() {} func (x *ShowEffectiveAutokeyConfigResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -549,6 +613,71 @@ func file_google_cloud_kms_v1_autokey_admin_proto_init() { if File_google_cloud_kms_v1_autokey_admin_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*UpdateAutokeyConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetAutokeyConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*AutokeyConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ShowEffectiveAutokeyConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ShowEffectiveAutokeyConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go index d0739cca51..8f25eae9d2 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/ekm_service.proto @@ -68,7 +72,10 @@ const ( // All [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this // [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key // management operations initiated from Cloud KMS. This means that: +<<<<<<< HEAD // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] // associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] // is @@ -78,8 +85,12 @@ const ( // external key material. // - Destruction of external key material associated with this // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by +<<<<<<< HEAD // calling // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]. +======= + // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion]. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // - Automatic rotation of key material is supported. EkmConnection_CLOUD_KMS EkmConnection_KeyManagementMode = 2 ) @@ -161,9 +172,17 @@ type ListEkmConnectionsRequest struct { func (x *ListEkmConnectionsRequest) Reset() { *x = ListEkmConnectionsRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListEkmConnectionsRequest) String() string { @@ -174,7 +193,11 @@ func (*ListEkmConnectionsRequest) ProtoMessage() {} func (x *ListEkmConnectionsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -244,9 +267,17 @@ type ListEkmConnectionsResponse struct { func (x *ListEkmConnectionsResponse) Reset() { *x = ListEkmConnectionsResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListEkmConnectionsResponse) String() string { @@ -257,7 +288,11 @@ func (*ListEkmConnectionsResponse) ProtoMessage() {} func (x *ListEkmConnectionsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -307,9 +342,17 @@ type GetEkmConnectionRequest struct { func (x *GetEkmConnectionRequest) Reset() { *x = GetEkmConnectionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetEkmConnectionRequest) String() string { @@ -320,7 +363,11 @@ func (*GetEkmConnectionRequest) ProtoMessage() {} func (x *GetEkmConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -363,9 +410,17 @@ type CreateEkmConnectionRequest struct { func (x *CreateEkmConnectionRequest) Reset() { *x = CreateEkmConnectionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateEkmConnectionRequest) String() string { @@ -376,7 +431,11 @@ func (*CreateEkmConnectionRequest) ProtoMessage() {} func (x *CreateEkmConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -428,9 +487,17 @@ type UpdateEkmConnectionRequest struct { func (x *UpdateEkmConnectionRequest) Reset() { *x = UpdateEkmConnectionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateEkmConnectionRequest) String() string { @@ -441,7 +508,11 @@ func (*UpdateEkmConnectionRequest) ProtoMessage() {} func (x *UpdateEkmConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -484,9 +555,17 @@ type GetEkmConfigRequest struct { func (x *GetEkmConfigRequest) Reset() { *x = GetEkmConfigRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetEkmConfigRequest) String() string { @@ -497,7 +576,11 @@ func (*GetEkmConfigRequest) ProtoMessage() {} func (x *GetEkmConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -534,9 +617,17 @@ type UpdateEkmConfigRequest struct { func (x *UpdateEkmConfigRequest) Reset() { *x = UpdateEkmConfigRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateEkmConfigRequest) String() string { @@ -547,7 +638,11 @@ func (*UpdateEkmConfigRequest) ProtoMessage() {} func (x *UpdateEkmConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -612,9 +707,17 @@ type Certificate struct { func (x *Certificate) Reset() { *x = Certificate{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Certificate) String() string { @@ -625,7 +728,11 @@ func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -708,7 +815,11 @@ func (x *Certificate) GetSha256Fingerprint() string { // [CryptoKeys][google.cloud.kms.v1.CryptoKey] and // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +<<<<<<< HEAD // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC], as well as +======= +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], as well as +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // performing cryptographic operations using keys created within the // [EkmConnection][google.cloud.kms.v1.EkmConnection]. type EkmConnection struct { @@ -747,9 +858,17 @@ type EkmConnection struct { func (x *EkmConnection) Reset() { *x = EkmConnection{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EkmConnection) String() string { @@ -760,7 +879,11 @@ func (*EkmConnection) ProtoMessage() {} func (x *EkmConnection) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -822,7 +945,11 @@ func (x *EkmConnection) GetCryptoSpacePath() string { // [CryptoKeys][google.cloud.kms.v1.CryptoKey] and // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +<<<<<<< HEAD // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] in a given +======= +// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC] in a given +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // project and location. type EkmConfig struct { state protoimpl.MessageState @@ -841,9 +968,17 @@ type EkmConfig struct { func (x *EkmConfig) Reset() { *x = EkmConfig{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EkmConfig) String() string { @@ -854,7 +989,11 @@ func (*EkmConfig) ProtoMessage() {} func (x *EkmConfig) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -897,9 +1036,17 @@ type VerifyConnectivityRequest struct { func (x *VerifyConnectivityRequest) Reset() { *x = VerifyConnectivityRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerifyConnectivityRequest) String() string { @@ -910,7 +1057,11 @@ func (*VerifyConnectivityRequest) ProtoMessage() {} func (x *VerifyConnectivityRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -942,9 +1093,17 @@ type VerifyConnectivityResponse struct { func (x *VerifyConnectivityResponse) Reset() { *x = VerifyConnectivityResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerifyConnectivityResponse) String() string { @@ -955,7 +1114,11 @@ func (*VerifyConnectivityResponse) ProtoMessage() {} func (x *VerifyConnectivityResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -999,9 +1162,17 @@ type EkmConnection_ServiceResolver struct { func (x *EkmConnection_ServiceResolver) Reset() { *x = EkmConnection_ServiceResolver{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EkmConnection_ServiceResolver) String() string { @@ -1012,7 +1183,11 @@ func (*EkmConnection_ServiceResolver) ProtoMessage() {} func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1344,7 +1519,11 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, +<<<<<<< HEAD 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x82, 0x02, +======= + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, @@ -1358,10 +1537,17 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, +<<<<<<< HEAD 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, + 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, + 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -1435,6 +1621,167 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() { if File_google_cloud_kms_v1_ekm_service_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListEkmConnectionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListEkmConnectionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CreateEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEkmConnectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GetEkmConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UpdateEkmConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*EkmConnection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*EkmConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*VerifyConnectivityRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*VerifyConnectivityResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*EkmConnection_ServiceResolver); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go index 3f40fa3ef8..d9bd076269 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/resources.proto @@ -404,8 +408,11 @@ func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int // The suffix following `HMAC_` corresponds to the hash algorithm being used // (eg. SHA256). // +<<<<<<< HEAD // Algorithms beginning with `PQ_` are post-quantum. // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // For more information, see [Key purposes and algorithms] // (https://cloud.google.com/kms/docs/algorithms). type CryptoKeyVersion_CryptoKeyVersionAlgorithm int32 @@ -957,9 +964,17 @@ type KeyRing struct { func (x *KeyRing) Reset() { *x = KeyRing{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyRing) String() string { @@ -970,7 +985,11 @@ func (*KeyRing) ProtoMessage() {} func (x *KeyRing) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1081,7 +1100,11 @@ type CryptoKey struct { // where all related cryptographic operations are performed. Only applicable // if [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] have a // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of +<<<<<<< HEAD // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC], with the +======= + // [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], with the +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // resource name in the format `projects/*/locations/*/ekmConnections/*`. // Note, this list is non-exhaustive and may apply to additional // [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future. @@ -1099,9 +1122,17 @@ type CryptoKey struct { func (x *CryptoKey) Reset() { *x = CryptoKey{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CryptoKey) String() string { @@ -1112,7 +1143,11 @@ func (*CryptoKey) ProtoMessage() {} func (x *CryptoKey) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1271,9 +1306,17 @@ type CryptoKeyVersionTemplate struct { func (x *CryptoKeyVersionTemplate) Reset() { *x = CryptoKeyVersionTemplate{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CryptoKeyVersionTemplate) String() string { @@ -1284,7 +1327,11 @@ func (*CryptoKeyVersionTemplate) ProtoMessage() {} func (x *CryptoKeyVersionTemplate) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1332,9 +1379,17 @@ type KeyOperationAttestation struct { func (x *KeyOperationAttestation) Reset() { *x = KeyOperationAttestation{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyOperationAttestation) String() string { @@ -1345,7 +1400,11 @@ func (*KeyOperationAttestation) ProtoMessage() {} func (x *KeyOperationAttestation) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1474,9 +1533,17 @@ type CryptoKeyVersion struct { func (x *CryptoKeyVersion) Reset() { *x = CryptoKeyVersion{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CryptoKeyVersion) String() string { @@ -1487,7 +1554,11 @@ func (*CryptoKeyVersion) ProtoMessage() {} func (x *CryptoKeyVersion) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1660,9 +1731,17 @@ type PublicKey struct { func (x *PublicKey) Reset() { *x = PublicKey{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKey) String() string { @@ -1673,7 +1752,11 @@ func (*PublicKey) ProtoMessage() {} func (x *PublicKey) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1807,9 +1890,17 @@ type ImportJob struct { func (x *ImportJob) Reset() { *x = ImportJob{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ImportJob) String() string { @@ -1820,7 +1911,11 @@ func (*ImportJob) ProtoMessage() {} func (x *ImportJob) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1928,9 +2023,17 @@ type ExternalProtectionLevelOptions struct { func (x *ExternalProtectionLevelOptions) Reset() { *x = ExternalProtectionLevelOptions{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ExternalProtectionLevelOptions) String() string { @@ -1941,7 +2044,11 @@ func (*ExternalProtectionLevelOptions) ProtoMessage() {} func (x *ExternalProtectionLevelOptions) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1990,9 +2097,17 @@ type KeyAccessJustificationsPolicy struct { func (x *KeyAccessJustificationsPolicy) Reset() { *x = KeyAccessJustificationsPolicy{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyAccessJustificationsPolicy) String() string { @@ -2003,7 +2118,11 @@ func (*KeyAccessJustificationsPolicy) ProtoMessage() {} func (x *KeyAccessJustificationsPolicy) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2043,9 +2162,17 @@ type KeyOperationAttestation_CertificateChains struct { func (x *KeyOperationAttestation_CertificateChains) Reset() { *x = KeyOperationAttestation_CertificateChains{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyOperationAttestation_CertificateChains) String() string { @@ -2056,7 +2183,11 @@ func (*KeyOperationAttestation_CertificateChains) ProtoMessage() {} func (x *KeyOperationAttestation_CertificateChains) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2110,9 +2241,17 @@ type ImportJob_WrappingPublicKey struct { func (x *ImportJob_WrappingPublicKey) Reset() { *x = ImportJob_WrappingPublicKey{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ImportJob_WrappingPublicKey) String() string { @@ -2123,7 +2262,11 @@ func (*ImportJob_WrappingPublicKey) ProtoMessage() {} func (x *ImportJob_WrappingPublicKey) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2621,16 +2764,27 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{ 0x4f, 0x44, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x45, 0x52, 0x54, 0x10, 0x0a, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, +<<<<<<< HEAD 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x0b, 0x42, 0x85, 0x01, 0x0a, +======= + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x0b, 0x42, 0x88, 0x01, 0x0a, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, +<<<<<<< HEAD 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, + 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -2723,6 +2877,143 @@ func file_google_cloud_kms_v1_resources_proto_init() { if File_google_cloud_kms_v1_resources_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_resources_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*KeyRing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*CryptoKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CryptoKeyVersionTemplate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*KeyOperationAttestation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*CryptoKeyVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*PublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ImportJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ExternalProtectionLevelOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*KeyAccessJustificationsPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*KeyOperationAttestation_CertificateChains); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_resources_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*ImportJob_WrappingPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_google_cloud_kms_v1_resources_proto_msgTypes[1].OneofWrappers = []any{ (*CryptoKey_RotationPeriod)(nil), } diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go index ce28760227..4bee1e82c3 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/service.proto @@ -78,9 +82,17 @@ type ListKeyRingsRequest struct { func (x *ListKeyRingsRequest) Reset() { *x = ListKeyRingsRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyRingsRequest) String() string { @@ -91,7 +103,11 @@ func (*ListKeyRingsRequest) ProtoMessage() {} func (x *ListKeyRingsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -178,9 +194,17 @@ type ListCryptoKeysRequest struct { func (x *ListCryptoKeysRequest) Reset() { *x = ListCryptoKeysRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeysRequest) String() string { @@ -191,7 +215,11 @@ func (*ListCryptoKeysRequest) ProtoMessage() {} func (x *ListCryptoKeysRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -286,9 +314,17 @@ type ListCryptoKeyVersionsRequest struct { func (x *ListCryptoKeyVersionsRequest) Reset() { *x = ListCryptoKeyVersionsRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeyVersionsRequest) String() string { @@ -299,7 +335,11 @@ func (*ListCryptoKeyVersionsRequest) ProtoMessage() {} func (x *ListCryptoKeyVersionsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -391,9 +431,17 @@ type ListImportJobsRequest struct { func (x *ListImportJobsRequest) Reset() { *x = ListImportJobsRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListImportJobsRequest) String() string { @@ -404,7 +452,11 @@ func (*ListImportJobsRequest) ProtoMessage() {} func (x *ListImportJobsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -474,9 +526,17 @@ type ListKeyRingsResponse struct { func (x *ListKeyRingsResponse) Reset() { *x = ListKeyRingsResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyRingsResponse) String() string { @@ -487,7 +547,11 @@ func (*ListKeyRingsResponse) ProtoMessage() {} func (x *ListKeyRingsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -543,9 +607,17 @@ type ListCryptoKeysResponse struct { func (x *ListCryptoKeysResponse) Reset() { *x = ListCryptoKeysResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeysResponse) String() string { @@ -556,7 +628,11 @@ func (*ListCryptoKeysResponse) ProtoMessage() {} func (x *ListCryptoKeysResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,9 +689,17 @@ type ListCryptoKeyVersionsResponse struct { func (x *ListCryptoKeyVersionsResponse) Reset() { *x = ListCryptoKeyVersionsResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeyVersionsResponse) String() string { @@ -626,7 +710,11 @@ func (*ListCryptoKeyVersionsResponse) ProtoMessage() {} func (x *ListCryptoKeyVersionsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -682,9 +770,17 @@ type ListImportJobsResponse struct { func (x *ListImportJobsResponse) Reset() { *x = ListImportJobsResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListImportJobsResponse) String() string { @@ -695,7 +791,11 @@ func (*ListImportJobsResponse) ProtoMessage() {} func (x *ListImportJobsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -745,9 +845,17 @@ type GetKeyRingRequest struct { func (x *GetKeyRingRequest) Reset() { *x = GetKeyRingRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetKeyRingRequest) String() string { @@ -758,7 +866,11 @@ func (*GetKeyRingRequest) ProtoMessage() {} func (x *GetKeyRingRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -794,9 +906,17 @@ type GetCryptoKeyRequest struct { func (x *GetCryptoKeyRequest) Reset() { *x = GetCryptoKeyRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetCryptoKeyRequest) String() string { @@ -807,7 +927,11 @@ func (*GetCryptoKeyRequest) ProtoMessage() {} func (x *GetCryptoKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -843,9 +967,17 @@ type GetCryptoKeyVersionRequest struct { func (x *GetCryptoKeyVersionRequest) Reset() { *x = GetCryptoKeyVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetCryptoKeyVersionRequest) String() string { @@ -856,7 +988,11 @@ func (*GetCryptoKeyVersionRequest) ProtoMessage() {} func (x *GetCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -892,9 +1028,17 @@ type GetPublicKeyRequest struct { func (x *GetPublicKeyRequest) Reset() { *x = GetPublicKeyRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetPublicKeyRequest) String() string { @@ -905,7 +1049,11 @@ func (*GetPublicKeyRequest) ProtoMessage() {} func (x *GetPublicKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -941,9 +1089,17 @@ type GetImportJobRequest struct { func (x *GetImportJobRequest) Reset() { *x = GetImportJobRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetImportJobRequest) String() string { @@ -954,7 +1110,11 @@ func (*GetImportJobRequest) ProtoMessage() {} func (x *GetImportJobRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -997,9 +1157,17 @@ type CreateKeyRingRequest struct { func (x *CreateKeyRingRequest) Reset() { *x = CreateKeyRingRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateKeyRingRequest) String() string { @@ -1010,7 +1178,11 @@ func (*CreateKeyRingRequest) ProtoMessage() {} func (x *CreateKeyRingRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1075,9 +1247,17 @@ type CreateCryptoKeyRequest struct { func (x *CreateCryptoKeyRequest) Reset() { *x = CreateCryptoKeyRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateCryptoKeyRequest) String() string { @@ -1088,7 +1268,11 @@ func (*CreateCryptoKeyRequest) ProtoMessage() {} func (x *CreateCryptoKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1149,9 +1333,17 @@ type CreateCryptoKeyVersionRequest struct { func (x *CreateCryptoKeyVersionRequest) Reset() { *x = CreateCryptoKeyVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateCryptoKeyVersionRequest) String() string { @@ -1162,7 +1354,11 @@ func (*CreateCryptoKeyVersionRequest) ProtoMessage() {} func (x *CreateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1216,9 +1412,13 @@ type ImportCryptoKeyVersionRequest struct { // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], the // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] must be a child of // [ImportCryptoKeyVersionRequest.parent][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.parent], +<<<<<<< HEAD // have been previously created via // [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion], // and be in +======= + // have been previously created via [ImportCryptoKeyVersion][], and be in +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED] // or // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED] @@ -1291,9 +1491,17 @@ type ImportCryptoKeyVersionRequest struct { func (x *ImportCryptoKeyVersionRequest) Reset() { *x = ImportCryptoKeyVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ImportCryptoKeyVersionRequest) String() string { @@ -1304,7 +1512,11 @@ func (*ImportCryptoKeyVersionRequest) ProtoMessage() {} func (x *ImportCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1404,9 +1616,17 @@ type CreateImportJobRequest struct { func (x *CreateImportJobRequest) Reset() { *x = CreateImportJobRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateImportJobRequest) String() string { @@ -1417,7 +1637,11 @@ func (*CreateImportJobRequest) ProtoMessage() {} func (x *CreateImportJobRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1468,9 +1692,17 @@ type UpdateCryptoKeyRequest struct { func (x *UpdateCryptoKeyRequest) Reset() { *x = UpdateCryptoKeyRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateCryptoKeyRequest) String() string { @@ -1481,7 +1713,11 @@ func (*UpdateCryptoKeyRequest) ProtoMessage() {} func (x *UpdateCryptoKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1526,9 +1762,17 @@ type UpdateCryptoKeyVersionRequest struct { func (x *UpdateCryptoKeyVersionRequest) Reset() { *x = UpdateCryptoKeyVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateCryptoKeyVersionRequest) String() string { @@ -1539,7 +1783,11 @@ func (*UpdateCryptoKeyVersionRequest) ProtoMessage() {} func (x *UpdateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1585,9 +1833,17 @@ type UpdateCryptoKeyPrimaryVersionRequest struct { func (x *UpdateCryptoKeyPrimaryVersionRequest) Reset() { *x = UpdateCryptoKeyPrimaryVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateCryptoKeyPrimaryVersionRequest) String() string { @@ -1598,7 +1854,11 @@ func (*UpdateCryptoKeyPrimaryVersionRequest) ProtoMessage() {} func (x *UpdateCryptoKeyPrimaryVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1641,9 +1901,17 @@ type DestroyCryptoKeyVersionRequest struct { func (x *DestroyCryptoKeyVersionRequest) Reset() { *x = DestroyCryptoKeyVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DestroyCryptoKeyVersionRequest) String() string { @@ -1654,7 +1922,11 @@ func (*DestroyCryptoKeyVersionRequest) ProtoMessage() {} func (x *DestroyCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1690,9 +1962,17 @@ type RestoreCryptoKeyVersionRequest struct { func (x *RestoreCryptoKeyVersionRequest) Reset() { *x = RestoreCryptoKeyVersionRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RestoreCryptoKeyVersionRequest) String() string { @@ -1703,7 +1983,11 @@ func (*RestoreCryptoKeyVersionRequest) ProtoMessage() {} func (x *RestoreCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1810,9 +2094,17 @@ type EncryptRequest struct { func (x *EncryptRequest) Reset() { *x = EncryptRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EncryptRequest) String() string { @@ -1823,7 +2115,11 @@ func (*EncryptRequest) ProtoMessage() {} func (x *EncryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1934,9 +2230,17 @@ type DecryptRequest struct { func (x *DecryptRequest) Reset() { *x = DecryptRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DecryptRequest) String() string { @@ -1947,7 +2251,11 @@ func (*DecryptRequest) ProtoMessage() {} func (x *DecryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2093,9 +2401,17 @@ type RawEncryptRequest struct { func (x *RawEncryptRequest) Reset() { *x = RawEncryptRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawEncryptRequest) String() string { @@ -2106,7 +2422,11 @@ func (*RawEncryptRequest) ProtoMessage() {} func (x *RawEncryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2248,9 +2568,17 @@ type RawDecryptRequest struct { func (x *RawDecryptRequest) Reset() { *x = RawDecryptRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawDecryptRequest) String() string { @@ -2261,7 +2589,11 @@ func (*RawDecryptRequest) ProtoMessage() {} func (x *RawDecryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2400,9 +2732,17 @@ type AsymmetricSignRequest struct { func (x *AsymmetricSignRequest) Reset() { *x = AsymmetricSignRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricSignRequest) String() string { @@ -2413,7 +2753,11 @@ func (*AsymmetricSignRequest) ProtoMessage() {} func (x *AsymmetricSignRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2502,9 +2846,17 @@ type AsymmetricDecryptRequest struct { func (x *AsymmetricDecryptRequest) Reset() { *x = AsymmetricDecryptRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricDecryptRequest) String() string { @@ -2515,7 +2867,11 @@ func (*AsymmetricDecryptRequest) ProtoMessage() {} func (x *AsymmetricDecryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2587,9 +2943,17 @@ type MacSignRequest struct { func (x *MacSignRequest) Reset() { *x = MacSignRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacSignRequest) String() string { @@ -2600,7 +2964,11 @@ func (*MacSignRequest) ProtoMessage() {} func (x *MacSignRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2680,8 +3048,12 @@ type MacVerifyRequest struct { // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] // will report an error if the checksum verification fails. If you receive a // checksum error, your client should verify that +<<<<<<< HEAD // CRC32C([MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac]) is // equal to +======= + // CRC32C([MacVerifyRequest.tag][]) is equal to +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c], // and if so, perform a limited number of retries. A persistent mismatch may // indicate an issue in your computation of the CRC32C checksum. Note: This @@ -2694,9 +3066,17 @@ type MacVerifyRequest struct { func (x *MacVerifyRequest) Reset() { *x = MacVerifyRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacVerifyRequest) String() string { @@ -2707,7 +3087,11 @@ func (*MacVerifyRequest) ProtoMessage() {} func (x *MacVerifyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2779,9 +3163,17 @@ type GenerateRandomBytesRequest struct { func (x *GenerateRandomBytesRequest) Reset() { *x = GenerateRandomBytesRequest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GenerateRandomBytesRequest) String() string { @@ -2792,7 +3184,11 @@ func (*GenerateRandomBytesRequest) ProtoMessage() {} func (x *GenerateRandomBytesRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2894,9 +3290,17 @@ type EncryptResponse struct { func (x *EncryptResponse) Reset() { *x = EncryptResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EncryptResponse) String() string { @@ -2907,7 +3311,11 @@ func (*EncryptResponse) ProtoMessage() {} func (x *EncryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3002,9 +3410,17 @@ type DecryptResponse struct { func (x *DecryptResponse) Reset() { *x = DecryptResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DecryptResponse) String() string { @@ -3015,7 +3431,11 @@ func (*DecryptResponse) ProtoMessage() {} func (x *DecryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3154,9 +3574,17 @@ type RawEncryptResponse struct { func (x *RawEncryptResponse) Reset() { *x = RawEncryptResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawEncryptResponse) String() string { @@ -3167,7 +3595,11 @@ func (*RawEncryptResponse) ProtoMessage() {} func (x *RawEncryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3327,9 +3759,17 @@ type RawDecryptResponse struct { func (x *RawDecryptResponse) Reset() { *x = RawDecryptResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawDecryptResponse) String() string { @@ -3340,7 +3780,11 @@ func (*RawDecryptResponse) ProtoMessage() {} func (x *RawDecryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3461,9 +3905,17 @@ type AsymmetricSignResponse struct { func (x *AsymmetricSignResponse) Reset() { *x = AsymmetricSignResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricSignResponse) String() string { @@ -3474,7 +3926,11 @@ func (*AsymmetricSignResponse) ProtoMessage() {} func (x *AsymmetricSignResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3577,9 +4033,17 @@ type AsymmetricDecryptResponse struct { func (x *AsymmetricDecryptResponse) Reset() { *x = AsymmetricDecryptResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricDecryptResponse) String() string { @@ -3590,7 +4054,11 @@ func (*AsymmetricDecryptResponse) ProtoMessage() {} func (x *AsymmetricDecryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3682,9 +4150,17 @@ type MacSignResponse struct { func (x *MacSignResponse) Reset() { *x = MacSignResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacSignResponse) String() string { @@ -3695,7 +4171,11 @@ func (*MacSignResponse) ProtoMessage() {} func (x *MacSignResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3805,9 +4285,17 @@ type MacVerifyResponse struct { func (x *MacVerifyResponse) Reset() { *x = MacVerifyResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacVerifyResponse) String() string { @@ -3818,7 +4306,11 @@ func (*MacVerifyResponse) ProtoMessage() {} func (x *MacVerifyResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3902,9 +4394,17 @@ type GenerateRandomBytesResponse struct { func (x *GenerateRandomBytesResponse) Reset() { *x = GenerateRandomBytesResponse{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GenerateRandomBytesResponse) String() string { @@ -3915,7 +4415,11 @@ func (*GenerateRandomBytesResponse) ProtoMessage() {} func (x *GenerateRandomBytesResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3962,9 +4466,17 @@ type Digest struct { func (x *Digest) Reset() { *x = Digest{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Digest) String() string { @@ -3975,7 +4487,11 @@ func (*Digest) ProtoMessage() {} func (x *Digest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4064,9 +4580,17 @@ type LocationMetadata struct { func (x *LocationMetadata) Reset() { *x = LocationMetadata{} +<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LocationMetadata) String() string { @@ -4077,7 +4601,11 @@ func (*LocationMetadata) ProtoMessage() {} func (x *LocationMetadata) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5141,16 +5669,27 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, +<<<<<<< HEAD 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7c, +======= + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7f, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, +<<<<<<< HEAD 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -5341,6 +5880,527 @@ func file_google_cloud_kms_v1_service_proto_init() { return } file_google_cloud_kms_v1_resources_proto_init() +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_google_cloud_kms_v1_service_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyRingsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeysRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeyVersionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListImportJobsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListKeyRingsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeysResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ListCryptoKeyVersionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ListImportJobsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*GetKeyRingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*GetCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*GetCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*GetPublicKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*GetImportJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*CreateKeyRingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*CreateCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*CreateCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*ImportCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*CreateImportJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCryptoKeyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCryptoKeyPrimaryVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*DestroyCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*RestoreCryptoKeyVersionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*EncryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*DecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*RawEncryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*RawDecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricSignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricDecryptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*MacSignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*MacVerifyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[31].Exporter = func(v any, i int) any { + switch v := v.(*GenerateRandomBytesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[32].Exporter = func(v any, i int) any { + switch v := v.(*EncryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[33].Exporter = func(v any, i int) any { + switch v := v.(*DecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[34].Exporter = func(v any, i int) any { + switch v := v.(*RawEncryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[35].Exporter = func(v any, i int) any { + switch v := v.(*RawDecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[36].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricSignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[37].Exporter = func(v any, i int) any { + switch v := v.(*AsymmetricDecryptResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[38].Exporter = func(v any, i int) any { + switch v := v.(*MacSignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[39].Exporter = func(v any, i int) any { + switch v := v.(*MacVerifyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[40].Exporter = func(v any, i int) any { + switch v := v.(*GenerateRandomBytesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[41].Exporter = func(v any, i int) any { + switch v := v.(*Digest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_cloud_kms_v1_service_proto_msgTypes[42].Exporter = func(v any, i int) any { + switch v := v.(*LocationMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_google_cloud_kms_v1_service_proto_msgTypes[16].OneofWrappers = []any{ (*ImportCryptoKeyVersionRequest_RsaAesWrappedKey)(nil), } diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go index b3799f0518..8bf1733b74 100644 --- a/vendor/cloud.google.com/go/kms/internal/version.go +++ b/vendor/cloud.google.com/go/kms/internal/version.go @@ -15,4 +15,8 @@ package internal // Version is the current tagged release of the library. +<<<<<<< HEAD const Version = "1.20.4" +======= +const Version = "1.20.1" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index e90454d01a..a82766a95d 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,7 @@ # Changes +<<<<<<< HEAD ## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.49.0...storage/v1.50.0) (2025-01-09) @@ -43,6 +44,8 @@ * **storage:** Monitored resource detection ([#11197](https://github.com/googleapis/google-cloud-go/issues/11197)) ([911bcd8](https://github.com/googleapis/google-cloud-go/commit/911bcd8b1816256482bd52e85da7eaf00c315293)) * **storage:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.47.0...storage/v1.48.0) (2024-12-05) diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 1ea1d98ce5..550503301f 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -62,7 +62,10 @@ type storageClient interface { GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) +<<<<<<< HEAD MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Default Object ACL methods. @@ -108,8 +111,11 @@ type storageClient interface { ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error +<<<<<<< HEAD NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (*MultiRangeDownloader, error) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // settings contains transport-agnostic configuration for API calls made via @@ -263,9 +269,12 @@ type openWriterParams struct { // sendCRC32C - see `Writer.SendCRC32C`. // Optional. sendCRC32C bool +<<<<<<< HEAD // append - Write with appendable object semantics. // Optional. append bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Writer callbacks @@ -283,6 +292,7 @@ type openWriterParams struct { setObj func(*ObjectAttrs) } +<<<<<<< HEAD type newMultiRangeDownloaderParams struct { bucket string conds *Conditions @@ -292,6 +302,8 @@ type newMultiRangeDownloaderParams struct { handle *ReadHandle } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type newRangeReaderParams struct { bucket string conds *Conditions @@ -301,7 +313,10 @@ type newRangeReaderParams struct { object string offset int64 readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. +<<<<<<< HEAD handle *ReadHandle +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type getObjectParams struct { @@ -329,6 +344,7 @@ type restoreObjectParams struct { copySourceACL bool } +<<<<<<< HEAD type moveObjectParams struct { bucket, srcObject, dstObject string srcConds *Conditions @@ -336,6 +352,8 @@ type moveObjectParams struct { encryptionKey []byte } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type composeObjectRequest struct { dstBucket string dstObject destinationObject diff --git a/vendor/cloud.google.com/go/storage/experimental/experimental.go b/vendor/cloud.google.com/go/storage/experimental/experimental.go index 5bcc59ad2f..4e1427fe32 100644 --- a/vendor/cloud.google.com/go/storage/experimental/experimental.go +++ b/vendor/cloud.google.com/go/storage/experimental/experimental.go @@ -73,6 +73,7 @@ type ReadStallTimeoutConfig struct { // and retried. TargetPercentile float64 } +<<<<<<< HEAD // WithGRPCBidiReads provides an [option.ClientOption] that may be passed to // [cloud.google.com/go/storage.NewGRPCClient]. @@ -85,3 +86,5 @@ type ReadStallTimeoutConfig struct { func WithGRPCBidiReads() option.ClientOption { return internal.WithGRPCBidiReads.(func() option.ClientOption)() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index 2d243bf9fe..b9e176c8f9 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -24,18 +24,28 @@ import ( "log" "net/url" "os" +<<<<<<< HEAD "sync" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" "cloud.google.com/go/storage/internal/apiv2/storagepb" +<<<<<<< HEAD +======= + "github.com/google/uuid" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" +<<<<<<< HEAD "google.golang.org/api/transport" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" @@ -105,7 +115,12 @@ func defaultGRPCOptions() []option.ClientOption { // Only enable DirectPath when the emulator is not being targeted. defaults = append(defaults, internaloption.EnableDirectPath(true), +<<<<<<< HEAD internaloption.EnableDirectPathXds()) +======= + internaloption.EnableDirectPathXds(), + internaloption.AllowNonDefaultServiceAccount(true)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return defaults @@ -116,6 +131,7 @@ func defaultGRPCOptions() []option.ClientOption { type grpcStorageClient struct { raw *gapic.Client settings *settings +<<<<<<< HEAD config *storageConfig } @@ -135,6 +151,8 @@ func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) return nil, fmt.Errorf("gRPC Metrics: %w", err) } return metricsContext, nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // newGRPCStorageClient initializes a new storageClient that uses the gRPC @@ -167,7 +185,10 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl return &grpcStorageClient{ raw: g, settings: s, +<<<<<<< HEAD config: &config, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -682,6 +703,7 @@ func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreOb return attrs, err } +<<<<<<< HEAD func (c *grpcStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := &storagepb.MoveObjectRequest{ @@ -712,6 +734,8 @@ func (c *grpcStorageClient) MoveObject(ctx context.Context, params *moveObjectPa return attrs, err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Default Object ACL methods. func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -976,7 +1000,11 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } +<<<<<<< HEAD if err := applySourceCondsProto("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil { +======= + if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } @@ -1012,7 +1040,11 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } +<<<<<<< HEAD // Custom codec to be used for unmarshaling BidiReadObjectResponse messages. +======= +// Custom codec to be used for unmarshaling ReadObjectResponse messages. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This is used to avoid a copy of object data in proto.Unmarshal. type bytesCodecV2 struct { } @@ -1036,7 +1068,11 @@ func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) { return data, nil } +<<<<<<< HEAD // Unmarshal is used for data received for BidiReadObjectResponse. We want to preserve +======= +// Unmarshal is used for data received for ReadObjectResponse. We want to preserve +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal. func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error { switch v := v.(type) { @@ -1057,6 +1093,7 @@ func (bytesCodecV2) Name() string { return "" } +<<<<<<< HEAD func contextMetadataFromBidiReadObject(req *storagepb.BidiReadObjectRequest) []string { if len(req.GetReadObjectSpec().GetRoutingToken()) > 0 { return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s&routing_token=%s", req.GetReadObjectSpec().GetBucket(), req.GetReadObjectSpec().GetRoutingToken())} @@ -1490,6 +1527,9 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return c.NewRangeReaderReadObject(ctx, params, opts...) } +======= +func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() @@ -1504,13 +1544,18 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } b := bucketResourceName(globalProjectAlias, params.bucket) +<<<<<<< HEAD // Create a BidiReadObjectRequest. spec := &storagepb.BidiReadObjectSpec{ +======= + req := &storagepb.ReadObjectRequest{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Bucket: b, Object: params.object, CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), } +<<<<<<< HEAD if err := applyCondsProto("gRPCReader.NewRangeReader", params.gen, params.conds, spec); err != nil { return nil, err } @@ -1523,6 +1568,12 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange ReadObjectSpec: spec, } ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...) +======= + // The default is a negative value, which means latest. + if params.gen >= 0 { + req.Generation = params.gen + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. @@ -1535,6 +1586,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange cc, cancel := context.WithCancel(ctx) +<<<<<<< HEAD // BidiReadObject can take multiple ranges, but we just request one in this case. readRange := &storagepb.ReadRange{ ReadOffset: params.offset + seen, @@ -1550,10 +1602,27 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.ReadRanges = []*storagepb.ReadRange{readRange} var stream storagepb.Storage_BidiReadObjectClient +======= + req.ReadOffset = params.offset + seen + + // Only set a ReadLimit if length is greater than zero, because <= 0 means + // to read it all. + if params.length > 0 { + req.ReadLimit = params.length - seen + } + + if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { + cancel() + return nil, nil, err + } + + var stream storagepb.Storage_ReadObjectClient +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var err error var decoder *readResponseDecoder err = run(cc, func(ctx context.Context) error { +<<<<<<< HEAD stream, err = c.raw.BidiReadObject(ctx, s.gax...) if err != nil { return err @@ -1565,13 +1634,24 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange if err := stream.CloseSend(); err != nil { return err } +======= + stream, err = c.raw.ReadObject(ctx, req, s.gax...) + if err != nil { + return err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Receive the message into databuf as a wire-encoded message so we can // use a custom decoder to avoid an extra copy at the protobuf layer. databufs := mem.BufferSlice{} err := stream.RecvMsg(&databufs) +<<<<<<< HEAD // These types of errors show up on the RecvMsg call, rather than the // initialization of the stream via BidiReadObject above. +======= + // These types of errors show up on the Recv call, rather than the + // initialization of the stream via ReadObject above. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } @@ -1598,21 +1678,33 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return nil, nil, err } +<<<<<<< HEAD return &readStreamResponse{ stream: stream, decoder: decoder, }, cancel, nil +======= + return &readStreamResponse{stream, decoder}, cancel, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } res, cancel, err := reopen(0) if err != nil { return nil, err } +<<<<<<< HEAD // The first message was Recv'd on stream open, use it to populate the // object metadata and read handle. msg := res.decoder.msg obj := msg.GetMetadata() handle := ReadHandle(msg.GetReadHandle().GetHandle()) +======= + + // The first message was Recv'd on stream open, use it to populate the + // object metadata. + msg := res.decoder.msg + obj := msg.GetMetadata() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() @@ -1621,13 +1713,18 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange wantCRC uint32 checkCRC bool ) +<<<<<<< HEAD if checksums := obj.GetChecksums(); checksums != nil && checksums.Crc32C != nil { +======= + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if params.offset == 0 && params.length < 0 { checkCRC = true } wantCRC = checksums.GetCrc32C() } +<<<<<<< HEAD startOffset := params.offset if params.offset < 0 { startOffset = size + params.offset @@ -1648,6 +1745,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange Attrs: ReaderObjectAttrs{ Size: size, StartOffset: startOffset, +======= + r = &Reader{ + Attrs: ReaderObjectAttrs{ + Size: size, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ContentType: obj.GetContentType(), ContentEncoding: obj.GetContentEncoding(), CacheControl: obj.GetCacheControl(), @@ -1656,7 +1758,10 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange Generation: obj.GetGeneration(), CRC32C: wantCRC, }, +<<<<<<< HEAD objectMetadata: &metadata, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reader: &gRPCReader{ stream: res.stream, reopen: reopen, @@ -1670,8 +1775,19 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange checkCRC: checkCRC, }, checkCRC: checkCRC, +<<<<<<< HEAD handle: &handle, remain: remain, +======= + } + + cr := msg.GetContentRange() + if cr != nil { + r.Attrs.StartOffset = cr.GetStart() + r.remain = cr.GetEnd() - cr.GetStart() + } else { + r.remain = size +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // For a zero-length request, explicitly close the stream and set remaining @@ -1685,16 +1801,33 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { +<<<<<<< HEAD var offset int64 errorf := params.setError setObj := params.setObj pr, pw := io.Pipe() s := callSettings(c.settings, opts...) +======= + s := callSettings(c.settings, opts...) + + var offset int64 + errorf := params.setError + progress := params.progress + setObj := params.setObj + + pr, pw := io.Pipe() + gw := newGRPCWriter(c, params, pr) + gw.settings = s + if s.userProject != "" { + gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This function reads the data sent to the pipe and sends sets of messages // on the gRPC client-stream as the buffer is filled. go func() { +<<<<<<< HEAD err := func() error { // Unless the user told us the content type, we have to determine it from // the first read. @@ -1745,6 +1878,72 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage errorf(err) pr.CloseWithError(err) close(params.donec) +======= + defer close(params.donec) + + // Loop until there is an error or the Object has been finalized. + for { + // Note: This blocks until either the buffer is full or EOF is read. + recvd, doneReading, err := gw.read() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + if params.attrs.Retention != nil { + // TO-DO: remove once ObjectRetention is available - see b/308194853 + err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + errorf(err) + pr.CloseWithError(err) + return + } + // The chunk buffer is full, but there is no end in sight. This + // means that either: + // 1. A resumable upload will need to be used to send + // multiple chunks, until we are done reading data. Start a + // resumable upload if it has not already been started. + // 2. ChunkSize of zero may also have a full buffer, but a resumable + // session should not be initiated in this case. + if !doneReading && gw.upid == "" && params.chunkSize != 0 { + err = gw.startResumableUpload() + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + } + + o, off, err := gw.uploadBuffer(recvd, offset, doneReading, newUploadBufferRetryConfig(gw.settings)) + if err != nil { + err = checkCanceled(err) + errorf(err) + pr.CloseWithError(err) + return + } + + // At this point, the current buffer has been uploaded. For resumable + // uploads and chunkSize = 0, capture the committed offset here in case + // the upload was not finalized and another chunk is to be uploaded. Call + // the progress function for resumable uploads only. + if gw.upid != "" || gw.chunkSize == 0 { + offset = off + } + if gw.upid != "" { + progress(offset) + } + + // When we are done reading data without errors, set the object and + // finish. + if doneReading { + // Build Object from server's response. + setObj(newObjectFromProto(o)) + return + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }() return pw, nil @@ -1864,6 +2063,7 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context } type readStreamResponse struct { +<<<<<<< HEAD stream storagepb.Storage_BidiReadObjectClient decoder *readResponseDecoder } @@ -1901,6 +2101,16 @@ type gRPCReader struct { seen, size int64 zeroRange bool stream storagepb.Storage_BidiReadObjectClient +======= + stream storagepb.Storage_ReadObjectClient + decoder *readResponseDecoder +} + +type gRPCReader struct { + seen, size int64 + zeroRange bool + stream storagepb.Storage_ReadObjectClient +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte currMsg *readResponseDecoder // decoder for the current message @@ -2054,6 +2264,10 @@ func (r *gRPCReader) Close() error { if r.cancel != nil { r.cancel() } +<<<<<<< HEAD +======= + r.stream = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.currMsg = nil return nil } @@ -2069,10 +2283,17 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. +<<<<<<< HEAD func (r *gRPCReader) recv() error { databufs := mem.BufferSlice{} err := r.stream.RecvMsg(&databufs) +======= +func (r *gRPCReader) recv() error { + databufs := mem.BufferSlice{} + err := r.stream.RecvMsg(&databufs) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { shouldRetry = r.settings.retry.shouldRetry @@ -2095,6 +2316,7 @@ func (r *gRPCReader) recv() error { // ReadObjectResponse field and subfield numbers. const ( +<<<<<<< HEAD // Top level fields. metadataField = protowire.Number(4) objectRangeDataField = protowire.Number(6) @@ -2106,6 +2328,14 @@ const ( // Nested in ObjectRangeData.ChecksummedData checksummedDataContentField = protowire.Number(1) checksummedDataCRC32CField = protowire.Number(2) +======= + checksummedDataField = protowire.Number(1) + checksummedDataContentField = protowire.Number(1) + checksummedDataCRC32CField = protowire.Number(2) + objectChecksumsField = protowire.Number(2) + contentRangeField = protowire.Number(3) + metadataField = protowire.Number(4) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // readResponseDecoder is a wrapper on the raw message, used to decode one message @@ -2118,9 +2348,15 @@ type readResponseDecoder struct { currBuf int // index of the current buffer being processed currOff uint64 // offset in the current buffer // Processed data +<<<<<<< HEAD msg *storagepb.BidiReadObjectResponse // processed response message with all fields other than object data populated dataOffsets bufferSliceOffsets // offsets of the object data in the message. done bool // true if the data has been completely read. +======= + msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated + dataOffsets bufferSliceOffsets // offsets of the object data in the message. + done bool // true if the data has been completely read. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type bufferSliceOffsets struct { @@ -2401,15 +2637,26 @@ func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) { return b, nil } +<<<<<<< HEAD // readFullObjectResponse returns the BidiReadObjectResponse that is encoded in the // wire-encoded message buffer b, or an error if the message is invalid. // This must be used on the first recv of an object as it may contain all fields // of BidiReadObjectResponse, and we use or pass on those fields to the user. +======= +// readFullObjectResponse returns the ReadObjectResponse that is encoded in the +// wire-encoded message buffer b, or an error if the message is invalid. +// This must be used on the first recv of an object as it may contain all fields +// of ReadObjectResponse, and we use or pass on those fields to the user. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This function is essentially identical to proto.Unmarshal, except it aliases // the data in the input []byte. If the proto library adds a feature to // Unmarshal that does that, this function can be dropped. func (d *readResponseDecoder) readFullObjectResponse() error { +<<<<<<< HEAD msg := &storagepb.BidiReadObjectResponse{} +======= + msg := &storagepb.ReadObjectResponse{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Loop over the entire message, extracting fields as we go. This does not // handle field concatenation, in which the contents of a single field @@ -2423,6 +2670,7 @@ func (d *readResponseDecoder) readFullObjectResponse() error { // Unmarshal the field according to its type. Only fields that are not // nil will be present. switch { +<<<<<<< HEAD // This is a repeated field, so it can occur more than once. But, for now // we can just take the first range per message since Reader only requests // a single range. @@ -2432,14 +2680,25 @@ func (d *readResponseDecoder) readFullObjectResponse() error { // The object data field was found. Initialize the data ranges assuming // exactly one range in the message. msg.ObjectDataRanges = []*storagepb.ObjectRangeData{{ChecksummedData: &storagepb.ChecksummedData{}, ReadRange: &storagepb.ReadRange{}}} +======= + case fieldNum == checksummedDataField && fieldType == protowire.BytesType: + // The ChecksummedData field was found. Initialize the struct. + msg.ChecksummedData = &storagepb.ChecksummedData{} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bytesFieldLen, err := d.consumeVarint() if err != nil { return fmt.Errorf("consuming bytes: %v", err) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var contentEndOff = d.off + bytesFieldLen for d.off < contentEndOff { gotNum, gotTyp, err := d.consumeTag() if err != nil { +<<<<<<< HEAD return fmt.Errorf("consuming objectRangeData tag: %w", err) } @@ -2498,11 +2757,64 @@ func (d *readResponseDecoder) readFullObjectResponse() error { buf, err := d.consumeBytesCopy() if err != nil { return fmt.Errorf("invalid BidiReadObjectResponse.Metadata: %v", err) +======= + return fmt.Errorf("consuming checksummedData tag: %w", err) + } + + switch { + case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: + // Get the offsets of the content bytes. + d.dataOffsets, err = d.consumeBytes() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err) + } + case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: + v, err := d.consumeFixed32() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err) + } + msg.ChecksummedData.Crc32C = &v + default: + err := d.consumeFieldValue(gotNum, gotTyp) + if err != nil { + return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err) + } + } + } + case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: + // The field was found. Initialize the struct. + msg.ObjectChecksums = &storagepb.ObjectChecksums{} + // Consume the bytes and copy them into a single buffer if they are split across buffers. + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err) + } + // Unmarshal. + if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil { + return err + } + case fieldNum == contentRangeField && fieldType == protowire.BytesType: + msg.ContentRange = &storagepb.ContentRange{} + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err) + } + if err := proto.Unmarshal(buf, msg.ContentRange); err != nil { + return err + } + case fieldNum == metadataField && fieldType == protowire.BytesType: + msg.Metadata = &storagepb.Object{} + + buf, err := d.consumeBytesCopy() + if err != nil { + return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := proto.Unmarshal(buf, msg.Metadata); err != nil { return err } +<<<<<<< HEAD case fieldNum == readHandleField && fieldType == protowire.BytesType: msg.ReadHandle = &storagepb.BidiReadHandle{} buf, err := d.consumeBytesCopy() @@ -2517,11 +2829,20 @@ func (d *readResponseDecoder) readFullObjectResponse() error { err := d.consumeFieldValue(fieldNum, fieldType) if err != nil { return fmt.Errorf("invalid field in BidiReadObjectResponse: %w", err) +======= + default: + err := d.consumeFieldValue(fieldNum, fieldType) + if err != nil { + return fmt.Errorf("invalid field in ReadObjectResponse: %w", err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } d.msg = msg +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -2541,6 +2862,7 @@ func (r *gRPCReader) reopenStream() error { return nil } +<<<<<<< HEAD func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, r io.Reader) (*gRPCWriter, error) { if params.attrs.Retention != nil { // TO-DO: remove once ObjectRetention is available - see b/308194853 @@ -2554,12 +2876,17 @@ func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, if params.chunkSize > 0 { size = params.chunkSize } +======= +func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { + size := params.chunkSize +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Round up chunksize to nearest 256KiB if size%googleapi.MinUploadChunkSize != 0 { size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize) } +<<<<<<< HEAD if s.userProject != "" { params.ctx = setUserProjectMetadata(params.ctx, s.userProject) } @@ -2571,6 +2898,13 @@ func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, // WriteObject doesn't support the generation condition, so use default. if err := applyCondsProto("WriteObject", defaultGen, params.conds, spec); err != nil { return nil, err +======= + // A completely bufferless upload is not possible as it is in JSON because + // the buffer must be provided to the message. However use the minimum size + // possible in this case. + if params.chunkSize == 0 { + size = googleapi.MinUploadChunkSize +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &gRPCWriter{ @@ -2581,6 +2915,7 @@ func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, bucket: params.bucket, attrs: params.attrs, conds: params.conds, +<<<<<<< HEAD spec: spec, encryptionKey: params.encryptionKey, settings: s, @@ -2590,6 +2925,13 @@ func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, forceEmptyContentType: params.forceEmptyContentType, append: params.append, }, nil +======= + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + chunkSize: params.chunkSize, + forceEmptyContentType: params.forceEmptyContentType, + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // gRPCWriter is a wrapper around the the gRPC client-stream API that manages @@ -2604,6 +2946,7 @@ type gRPCWriter struct { bucket string attrs *ObjectAttrs conds *Conditions +<<<<<<< HEAD spec *storagepb.WriteObjectSpec encryptionKey []byte settings *settings @@ -2781,10 +3124,47 @@ func (w *gRPCWriter) newGRPCResumableBidiWriteBufferSender() (*gRPCResumableBidi stream: stream, settings: w.settings, }, nil +======= + encryptionKey []byte + settings *settings + + sendCRC32C bool + chunkSize int + forceEmptyContentType bool + + // The gRPC client-stream used for sending buffers. + stream storagepb.Storage_BidiWriteObjectClient + + // The Resumable Upload ID started by a gRPC-based Writer. + upid string +} + +// startResumableUpload initializes a Resumable Upload with gRPC and sets the +// upload ID on the Writer. +func (w *gRPCWriter) startResumableUpload() error { + spec, err := w.writeObjectSpec() + if err != nil { + return err + } + req := &storagepb.StartResumableWriteRequest{ + WriteObjectSpec: spec, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), + } + // TODO: Currently the checksums are only sent on the request to initialize + // the upload, but in the future, we must also support sending it + // on the *last* message of the stream. + req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) + return run(w.ctx, func(ctx context.Context) error { + upres, err := w.c.raw.StartResumableWrite(w.ctx, req) + w.upid = upres.GetUploadId() + return err + }, w.settings.retry, w.settings.idempotent) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // queryProgress is a helper that queries the status of the resumable upload // associated with the given upload ID. +<<<<<<< HEAD func (s *gRPCResumableBidiWriteBufferSender) queryProgress() (int64, error) { var persistedSize int64 err := run(s.ctx, func(ctx context.Context) error { @@ -2920,11 +3300,296 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (obj } } return +======= +func (w *gRPCWriter) queryProgress() (int64, error) { + var persistedSize int64 + err := run(w.ctx, func(ctx context.Context) error { + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ + UploadId: w.upid, + }) + persistedSize = q.GetPersistedSize() + return err + }, w.settings.retry, true) + + // q.GetCommittedSize() will return 0 if q is nil. + return persistedSize, err +} + +// uploadBuffer uploads the buffer at the given offset using a bi-directional +// Write stream. It will open a new stream if necessary (on the first call or +// after resuming from failure). The resulting write offset after uploading the +// buffer is returned, as well as well as the final Object if the upload is +// completed. +// +// Returns object, persisted size, and any error that is not retriable. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool, retryConfig *uploadBufferRetryConfig) (*storagepb.Object, int64, error) { + var err error + var lastWriteOfEntireObject bool + + sent := 0 + writeOffset := start + + toWrite := w.buf[:recvd] + + // Send a request with as many bytes as possible. + // Loop until all bytes are sent. +sendBytes: // label this loop so that we can use a continue statement from a nested block + for { + bytesNotYetSent := recvd - sent + remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize + + if remainingDataFitsInSingleReq && doneReading { + lastWriteOfEntireObject = true + } + + // Send the maximum amount of bytes we can, unless we don't have that many. + bytesToSendInCurrReq := maxPerMessageWriteSize + if remainingDataFitsInSingleReq { + bytesToSendInCurrReq = bytesNotYetSent + } + + // Prepare chunk section for upload. + data := toWrite[sent : sent+bytesToSendInCurrReq] + + req := &storagepb.BidiWriteObjectRequest{ + Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{ + ChecksummedData: &storagepb.ChecksummedData{ + Content: data, + }, + }, + WriteOffset: writeOffset, + FinishWrite: lastWriteOfEntireObject, + Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, + StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, + } + + // Open a new stream if necessary and set the first_message field on + // the request. The first message on the WriteObject stream must either + // be the Object or the Resumable Upload ID. + if w.stream == nil { + hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))} + ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...) + ctx = setInvocationHeaders(ctx, retryConfig.invocationID, retryConfig.attempts) + + w.stream, err = w.c.raw.BidiWriteObject(ctx) + if err != nil { + return nil, 0, err + } + + if w.upid != "" { // resumable upload + req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid} + } else { // non-resumable + spec, err := w.writeObjectSpec() + if err != nil { + return nil, 0, err + } + req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{ + WriteObjectSpec: spec, + } + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) + // For a non-resumable upload, checksums must be sent in this message. + // TODO: Currently the checksums are only sent on the first message + // of the stream, but in the future, we must also support sending it + // on the *last* message of the stream (instead of the first). + req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) + } + } + + err = w.stream.Send(req) + if err == io.EOF { + // err was io.EOF. The client-side of a stream only gets an EOF on Send + // when the backend closes the stream and wants to return an error + // status. + + // Receive from the stream Recv() until it returns a non-nil error + // to receive the server's status as an error. We may get multiple + // messages before the error due to buffering. + err = nil + for err == nil { + _, err = w.stream.Recv() + } + // Drop the stream reference as a new one will need to be created if + // we retry. + w.stream = nil + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received. + err = retryConfig.retriable(w.ctx, err) + + if err == nil { + retryConfig.doBackOff(w.ctx) + + // TODO: Add test case for failure modes of querying progress. + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + + // Continue sending requests, opening a new stream and resending + // any bytes not yet persisted as per QueryWriteStatus + continue sendBytes + } + } + if err != nil { + return nil, 0, err + } + + // Update the immediate stream's sent total and the upload offset with + // the data sent. + sent += len(data) + writeOffset += int64(len(data)) + + // Not done sending data, do not attempt to commit it yet, loop around + // and send more data. + if recvd-sent > 0 { + continue sendBytes + } + + // The buffer has been uploaded and there is still more data to be + // uploaded, but this is not a resumable upload session. Therefore, + // don't check persisted data. + if !lastWriteOfEntireObject && w.chunkSize == 0 { + return nil, writeOffset, nil + } + + // Done sending the data in the buffer (remainingDataFitsInSingleReq + // should == true if we reach this code). + // If we are done sending the whole object, close the stream and get the final + // object. Otherwise, receive from the stream to confirm the persisted data. + if !lastWriteOfEntireObject { + resp, err := w.stream.Recv() + + if err != nil { + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + err = retryConfig.retriable(w.ctx, err) + if err != nil { + return nil, 0, err + } + + retryConfig.doBackOff(w.ctx) + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + + // Drop the stream reference as a new one will need to be created. + w.stream = nil + + continue sendBytes + } + + if resp.GetPersistedSize() != writeOffset { + // Retry if not all bytes were persisted. + writeOffset = resp.GetPersistedSize() + sent = int(writeOffset) - int(start) + continue sendBytes + } + } else { + // If the object is done uploading, close the send stream to signal + // to the server that we are done sending so that we can receive + // from the stream without blocking. + err = w.stream.CloseSend() + if err != nil { + // CloseSend() retries the send internally. It never returns an + // error in the current implementation, but we check it anyway in + // case that it does in the future. + return nil, 0, err + } + + // Stream receives do not block once send is closed, but we may not + // receive the response with the object right away; loop until we + // receive the object or error out. + var obj *storagepb.Object + for obj == nil { + resp, err := w.stream.Recv() + + if err != nil { + err = retryConfig.retriable(w.ctx, err) + if err != nil { + return nil, 0, err + } + retryConfig.doBackOff(w.ctx) + + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + w.stream = nil + continue sendBytes + } + + obj = resp.GetResource() + } + + // Even though we received the object response, continue reading + // until we receive a non-nil error, to ensure the stream does not + // leak even if the context isn't cancelled. See: + // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream + for err == nil { + _, err = w.stream.Recv() + } + + return obj, writeOffset, nil + } + + return nil, writeOffset, nil + } +} + +// determineOffset either returns the offset given to it in the case of a simple +// upload, or queries the write status in the case a resumable upload is being +// used. +func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { + // For a Resumable Upload, we must start from however much data + // was committed. + if w.upid != "" { + committed, err := w.queryProgress() + if err != nil { + return 0, err + } + offset = committed + } + return offset, nil +} + +// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's +// ObjectAttrs and applies its Conditions. This is only used for gRPC. +func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { + // To avoid modifying the ObjectAttrs embeded in the calling writer, deref + // the ObjectAttrs pointer to make a copy, then assign the desired name to + // the attribute. + attrs := *w.attrs + + spec := &storagepb.WriteObjectSpec{ + Resource: attrs.toProtoObject(w.bucket), + } + // WriteObject doesn't support the generation condition, so use default. + if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { + return nil, err + } + return spec, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // read copies the data in the reader to the given buffer and reports how much // data was read into the buffer and if there is no more data to read (EOF). +<<<<<<< HEAD +func (w *gRPCWriter) read() (int, bool, error) { +======= +// Furthermore, if the attrs.ContentType is unset, the first bytes of content +// will be sniffed for a matching content type unless forceEmptyContentType is enabled. func (w *gRPCWriter) read() (int, bool, error) { + if w.attrs.ContentType == "" && !w.forceEmptyContentType { + w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set n to -1 to start the Read loop. var n, recvd int = -1, 0 var err error @@ -2948,3 +3613,92 @@ func checkCanceled(err error) error { return err } +<<<<<<< HEAD +======= + +type uploadBufferRetryConfig struct { + attempts int + invocationID string + config *retryConfig + lastErr error +} + +func newUploadBufferRetryConfig(settings *settings) *uploadBufferRetryConfig { + config := settings.retry + + if config == nil { + config = defaultRetry.clone() + } + + if config.shouldRetry == nil { + config.shouldRetry = ShouldRetry + } + + if config.backoff == nil { + config.backoff = &gaxBackoff{} + } else { + config.backoff.SetMultiplier(settings.retry.backoff.GetMultiplier()) + config.backoff.SetInitial(settings.retry.backoff.GetInitial()) + config.backoff.SetMax(settings.retry.backoff.GetMax()) + } + + return &uploadBufferRetryConfig{ + attempts: 1, + invocationID: uuid.New().String(), + config: config, + } +} + +// retriable determines if a retry is necessary and if so returns a nil error; +// otherwise it returns the error to be surfaced to the user. +func (retry *uploadBufferRetryConfig) retriable(ctx context.Context, err error) error { + if err == nil { + // a nil err does not need to be retried + return nil + } + if err != context.Canceled && err != context.DeadlineExceeded { + retry.lastErr = err + } + + if retry.config.policy == RetryNever { + return err + } + + if retry.config.maxAttempts != nil && retry.attempts >= *retry.config.maxAttempts { + return fmt.Errorf("storage: retry failed after %v attempts; last error: %w", retry.attempts, err) + } + + retry.attempts++ + + // Explicitly check context cancellation so that we can distinguish between a + // DEADLINE_EXCEEDED error from the server and a user-set context deadline. + // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's + // sent by the server) in both cases. + ctxErr := ctx.Err() + if errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) { + if retry.lastErr != nil { + return fmt.Errorf("retry failed with %v; last error: %w", ctxErr, retry.lastErr) + } + return ctxErr + } + + if !retry.config.shouldRetry(err) { + return err + } + return nil +} + +// doBackOff pauses for the appropriate amount of time; it should be called after +// encountering a retriable error. +func (retry *uploadBufferRetryConfig) doBackOff(ctx context.Context) error { + p := retry.config.backoff.Pause() + + if ctxErr := gax.Sleep(ctx, p); ctxErr != nil { + if retry.lastErr != nil { + return fmt.Errorf("retry failed with %v; last error: %w", ctxErr, retry.lastErr) + } + return ctxErr + } + return nil +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go index f7bebd1def..15ecfd7eca 100644 --- a/vendor/cloud.google.com/go/storage/grpc_metrics.go +++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go @@ -16,8 +16,13 @@ package storage import ( "context" +<<<<<<< HEAD "errors" "fmt" +======= + "fmt" + "log" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "time" @@ -29,8 +34,13 @@ import ( "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" "google.golang.org/api/option" +<<<<<<< HEAD "google.golang.org/grpc" "google.golang.org/grpc/experimental/stats" +======= + "google.golang.org/api/transport" + "google.golang.org/grpc" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/stats/opentelemetry" ) @@ -39,6 +49,7 @@ const ( metricPrefix = "storage.googleapis.com/client/" ) +<<<<<<< HEAD // Added to help with tests type storageMonitoredResource struct { project string @@ -229,6 +240,8 @@ func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.Resou return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func latencyHistogramBoundaries() []float64 { boundaries := []float64{} boundary := 0.0 @@ -268,6 +281,73 @@ func sizeHistogramBoundaries() []float64 { return boundaries } +<<<<<<< HEAD +======= +func metricFormatter(m metricdata.Metrics) string { + return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/") +} + +func gcpAttributeExpectedDefaults() []attribute.KeyValue { + return []attribute.KeyValue{ + {Key: "location", Value: attribute.StringValue("global")}, + {Key: "cloud_platform", Value: attribute.StringValue("unknown")}, + {Key: "host_id", Value: attribute.StringValue("unknown")}} +} + +// Added to help with tests +type preparedResource struct { + projectToUse string + resource *resource.Resource +} + +func newPreparedResource(ctx context.Context, project string, resourceOptions []resource.Option) (*preparedResource, error) { + detectedAttrs, err := resource.New(ctx, resourceOptions...) + if err != nil { + return nil, err + } + preparedResource := &preparedResource{} + s := detectedAttrs.Set() + p, present := s.Value("cloud.account.id") + if present { + preparedResource.projectToUse = p.AsString() + } else { + preparedResource.projectToUse = project + } + updates := []attribute.KeyValue{} + for _, kv := range gcpAttributeExpectedDefaults() { + if val, present := s.Value(kv.Key); !present || val.AsString() == "" { + updates = append(updates, attribute.KeyValue{Key: kv.Key, Value: kv.Value}) + } + } + r, err := resource.New( + ctx, + resource.WithAttributes( + attribute.KeyValue{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)}, + attribute.KeyValue{Key: "instance_id", Value: attribute.StringValue(uuid.New().String())}, + attribute.KeyValue{Key: "project_id", Value: attribute.StringValue(project)}, + attribute.KeyValue{Key: "api", Value: attribute.StringValue("grpc")}, + ), + resource.WithAttributes(detectedAttrs.Attributes()...), + // Last duplicate key / value wins + resource.WithAttributes(updates...), + ) + if err != nil { + return nil, err + } + preparedResource.resource = r + return preparedResource, nil +} + +type metricsContext struct { + // client options passed to gRPC channels + clientOpts []option.ClientOption + // instance of metric reader used by gRPC client-side metrics + provider *metric.MeterProvider + // clean func to call when closing gRPC client + close func() +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func createHistogramView(name string, boundaries []float64) metric.View { return metric.NewView(metric.Instrument{ Name: name, @@ -278,6 +358,135 @@ func createHistogramView(name string, boundaries []float64) metric.View { }) } +<<<<<<< HEAD func metricFormatter(m metricdata.Metrics) string { return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/") +======= +func newGRPCMetricContext(ctx context.Context, project string, config storageConfig) (*metricsContext, error) { + var exporter metric.Exporter + meterOpts := []metric.Option{} + if config.metricExporter != nil { + exporter = *config.metricExporter + } else { + preparedResource, err := newPreparedResource(ctx, project, []resource.Option{resource.WithDetectors(gcp.NewDetector())}) + if err != nil { + return nil, err + } + meterOpts = append(meterOpts, metric.WithResource(preparedResource.resource)) + // Implementation requires a project, if one is not determined possibly user + // credentials. Then we will fail stating gRPC Metrics require a project-id. + if project == "" && preparedResource.projectToUse == "" { + return nil, fmt.Errorf("google cloud project is required to start client-side metrics") + } + // If projectTouse isn't the same as project provided to Storage client, then + // emit a log stating which project is being used to emit metrics to. + if project != preparedResource.projectToUse { + log.Printf("The Project ID configured for metrics is %s, but the Project ID of the storage client is %s. Make sure that the service account in use has the required metric writing role (roles/monitoring.metricWriter) in the project projectIdToUse or metrics will not be written.", preparedResource.projectToUse, project) + } + meOpts := []mexporter.Option{ + mexporter.WithProjectID(preparedResource.projectToUse), + mexporter.WithMetricDescriptorTypeFormatter(metricFormatter), + mexporter.WithCreateServiceTimeSeries(), + mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"})} + exporter, err = mexporter.New(meOpts...) + if err != nil { + return nil, err + } + } + // Metric views update histogram boundaries to be relevant to GCS + // otherwise default OTel histogram boundaries are used. + metricViews := []metric.View{ + createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()), + createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()), + createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries()), + } + interval := time.Minute + if config.metricInterval > 0 { + interval = config.metricInterval + } + meterOpts = append(meterOpts, metric.WithReader(metric.NewPeriodicReader(&exporterLogSuppressor{exporter: exporter}, metric.WithInterval(interval))), + metric.WithView(metricViews...)) + provider := metric.NewMeterProvider(meterOpts...) + mo := opentelemetry.MetricsOptions{ + MeterProvider: provider, + Metrics: opentelemetry.DefaultMetrics().Add( + "grpc.lb.wrr.rr_fallback", + "grpc.lb.wrr.endpoint_weight_not_yet_usable", + "grpc.lb.wrr.endpoint_weight_stale", + "grpc.lb.wrr.endpoint_weights", + "grpc.lb.rls.cache_entries", + "grpc.lb.rls.cache_size", + "grpc.lb.rls.default_target_picks", + "grpc.lb.rls.target_picks", + "grpc.lb.rls.failed_picks"), + OptionalLabels: []string{"grpc.lb.locality"}, + } + opts := []option.ClientOption{ + option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})), + } + context := &metricsContext{ + clientOpts: opts, + provider: provider, + close: createShutdown(ctx, provider), + } + return context, nil +} + +func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) { + var project string + c, err := transport.Creds(ctx, s.clientOption...) + if err == nil { + project = c.ProjectID + } + // Enable client-side metrics for gRPC + metricsContext, err := newGRPCMetricContext(ctx, project, config) + if err != nil { + return nil, fmt.Errorf("gRPC Metrics: %w", err) + } + return metricsContext, nil +} + +func createShutdown(ctx context.Context, provider *metric.MeterProvider) func() { + return func() { + provider.Shutdown(ctx) + } +} + +// Silences permission errors after initial error is emitted to prevent +// chatty logs. +type exporterLogSuppressor struct { + exporter metric.Exporter + emittedFailure bool +} + +// Implements OTel SDK metric.Exporter interface to prevent noisy logs from +// lack of credentials after initial failure. +// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter +func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if err := e.exporter.Export(ctx, rm); err != nil && !e.emittedFailure { + if strings.Contains(err.Error(), "PermissionDenied") { + e.emittedFailure = true + return fmt.Errorf("gRPC metrics failed due permission issue: %w", err) + } + return err + } + return nil +} + +func (e *exporterLogSuppressor) Temporality(k metric.InstrumentKind) metricdata.Temporality { + return e.exporter.Temporality(k) +} + +func (e *exporterLogSuppressor) Aggregation(k metric.InstrumentKind) metric.Aggregation { + return e.exporter.Aggregation(k) +} + +func (e *exporterLogSuppressor) ForceFlush(ctx context.Context) error { + return e.exporter.ForceFlush(ctx) +} + +func (e *exporterLogSuppressor) Shutdown(ctx context.Context) error { + return e.exporter.Shutdown(ctx) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 61b20555f4..8ce372a766 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -34,6 +34,10 @@ import ( "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" +<<<<<<< HEAD +======= + "github.com/googleapis/gax-go/v2" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/googleapis/gax-go/v2/callctx" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" @@ -592,6 +596,7 @@ func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreOb return newObject(obj), err } +<<<<<<< HEAD func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := c.raw.Objects.Move(params.bucket, params.srcObject, params.dstObject).Context(ctx) @@ -617,6 +622,8 @@ func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectPa return newObject(obj), err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Default Object ACL methods. func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -822,7 +829,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } +<<<<<<< HEAD if err := applySourceConds("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil { +======= + if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } if s.userProject != "" { @@ -861,11 +872,14 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } +<<<<<<< HEAD // NewMultiRangeDownloader is not supported by http client. func (c *httpStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) { return nil, errMethodNotSupported } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() @@ -982,10 +996,13 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR } func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { +<<<<<<< HEAD if params.append { return nil, errors.New("storage: append not supported on HTTP Client; use gRPC") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s := callSettings(c.settings, opts...) errorf := params.setError setObj := params.setObj @@ -1059,7 +1076,17 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage } if useRetry { if s.retry != nil { +<<<<<<< HEAD call.WithRetry(s.retry.backoff, s.retry.shouldRetry) +======= + bo := &gax.Backoff{} + if s.retry.backoff != nil { + bo.Multiplier = s.retry.backoff.GetMultiplier() + bo.Initial = s.retry.backoff.GetInitial() + bo.Max = s.retry.backoff.GetMax() + } + call.WithRetry(bo, s.retry.shouldRetry) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { call.WithRetry(nil, nil) } @@ -1550,6 +1577,7 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen } } +<<<<<<< HEAD metadata := map[string]string{} for key, values := range res.Header { if len(values) > 0 && strings.HasPrefix(key, "X-Goog-Meta-") { @@ -1558,6 +1586,8 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) attrs := ReaderObjectAttrs{ Size: size, ContentType: res.Header.Get("Content-Type"), @@ -1571,11 +1601,18 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen Decompressed: res.Uncompressed || uncompressedByServer(res), } return &Reader{ +<<<<<<< HEAD Attrs: attrs, objectMetadata: &metadata, size: size, remain: remain, checkCRC: checkCRC, +======= + Attrs: attrs, + size: size, + remain: remain, + checkCRC: checkCRC, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reader: &httpReader{ reopen: reopen, body: body, diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index 03c3f8c170..10553cca10 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC +======= +// Copyright 2024 Google LLC +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go index a51532f60f..61145a195e 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC +======= +// Copyright 2024 Google LLC +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 502fa56786..e46b441618 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC +======= +// Copyright 2024 Google LLC +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -64,12 +68,20 @@ // // The following is an example of making an API call with the newly created client, mentioned above. // +<<<<<<< HEAD // stream, err := c.BidiReadObject(ctx) +======= +// stream, err := c.BidiWriteObject(ctx) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // if err != nil { // // TODO: Handle error. // } // go func() { +<<<<<<< HEAD // reqs := []*storagepb.BidiReadObjectRequest{ +======= +// reqs := []*storagepb.BidiWriteObjectRequest{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // TODO: Create requests. // } // for _, req := range reqs { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 7e4d99ec91..f89935cc8c 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -10,11 +10,14 @@ "grpc": { "libraryClient": "Client", "rpcs": { +<<<<<<< HEAD "BidiReadObject": { "methods": [ "BidiReadObject" ] }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "BidiWriteObject": { "methods": [ "BidiWriteObject" @@ -75,11 +78,14 @@ "LockBucketRetentionPolicy" ] }, +<<<<<<< HEAD "MoveObject": { "methods": [ "MoveObject" ] }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "QueryWriteStatus": { "methods": [ "QueryWriteStatus" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go index 0de9b31f64..dbd521c8c8 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC +======= +// Copyright 2024 Google LLC +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +22,7 @@ package storage import ( "context" +<<<<<<< HEAD "log/slog" "github.com/googleapis/gax-go/v2/internallog/grpclog" @@ -28,6 +33,12 @@ import ( const serviceName = "storage.googleapis.com" +======= + + "google.golang.org/api/option" +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // For more information on implementing a client constructor hook, see // https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. type clientHookParams struct{} @@ -52,6 +63,7 @@ func DefaultAuthScopes() []string { "https://www.googleapis.com/auth/devstorage.read_write", } } +<<<<<<< HEAD func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) { var zero O @@ -63,3 +75,5 @@ func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(c logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp)) return resp, err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 4a50254d89..3e134ae764 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC +======= +// Copyright 2024 Google LLC +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,7 +23,10 @@ package storage import ( "context" "fmt" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/url" "regexp" @@ -57,7 +64,10 @@ type CallOptions struct { CancelResumableWrite []gax.CallOption GetObject []gax.CallOption ReadObject []gax.CallOption +<<<<<<< HEAD BidiReadObject []gax.CallOption +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UpdateObject []gax.CallOption WriteObject []gax.CallOption BidiWriteObject []gax.CallOption @@ -65,7 +75,10 @@ type CallOptions struct { RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption QueryWriteStatus []gax.CallOption +<<<<<<< HEAD MoveObject []gax.CallOption +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func defaultGRPCClientOptions() []option.ClientOption { @@ -279,6 +292,7 @@ func defaultCallOptions() *CallOptions { }) }), }, +<<<<<<< HEAD BidiReadObject: []gax.CallOption{ gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ @@ -291,6 +305,8 @@ func defaultCallOptions() *CallOptions { }) }), }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UpdateObject: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -380,6 +396,7 @@ func defaultCallOptions() *CallOptions { }) }), }, +<<<<<<< HEAD MoveObject: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -393,6 +410,8 @@ func defaultCallOptions() *CallOptions { }) }), }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -416,7 +435,10 @@ type internalClient interface { CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) +<<<<<<< HEAD BidiReadObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) @@ -424,7 +446,10 @@ type internalClient interface { RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) +<<<<<<< HEAD MoveObject(context.Context, *storagepb.MoveObjectRequest, ...gax.CallOption) (*storagepb.Object, error) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Client is a client for interacting with Cloud Storage API. @@ -544,6 +569,7 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject return c.internalClient.ComposeObject(ctx, req, opts...) } +<<<<<<< HEAD // DeleteObject deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the generation parameter is used, or // if soft delete (at https://cloud.google.com/storage/docs/soft-delete) is not @@ -564,6 +590,14 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject // Requires storage.objects.delete // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= +// DeleteObject deletes an object and its metadata. +// +// Deletions are normally permanent when versioning is disabled or whenever +// the generation parameter is used. However, if soft delete is enabled for +// the bucket, deleted objects can be restored using RestoreObject until the +// soft delete retention period has passed. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { return c.internalClient.DeleteObject(ctx, req, opts...) } @@ -585,6 +619,7 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel return c.internalClient.CancelResumableWrite(ctx, req, opts...) } +<<<<<<< HEAD // GetObject retrieves object metadata. // // IAM Permissions: @@ -593,10 +628,14 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. To return object ACLs, the authenticated user must also have // the storage.objects.getIamPolicy permission. +======= +// GetObject retrieves an object’s metadata. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { return c.internalClient.GetObject(ctx, req, opts...) } +<<<<<<< HEAD // ReadObject retrieves object data. // // IAM Permissions: @@ -604,10 +643,14 @@ func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, // Requires storage.objects.get // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= +// ReadObject reads an object’s data. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { return c.internalClient.ReadObject(ctx, req, opts...) } +<<<<<<< HEAD // BidiReadObject reads an object’s data. // // This is a bi-directional API with the added support for reading multiple @@ -631,6 +674,8 @@ func (c *Client) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (st return c.internalClient.BidiReadObject(ctx, opts...) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // UpdateObject updates an object’s metadata. // Equivalent to JSON API’s storage.objects.patch. func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { @@ -700,12 +745,15 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // Alternatively, the BidiWriteObject operation may be used to write an // object with controls over flushing and the ability to fetch the ability to // determine the current persisted size. +<<<<<<< HEAD // // IAM Permissions: // // Requires storage.objects.create // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { return c.internalClient.WriteObject(ctx, opts...) } @@ -730,6 +778,7 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s } // ListObjects retrieves a list of objects matching the criteria. +<<<<<<< HEAD // // IAM Permissions: // @@ -737,6 +786,8 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) // to use this method. To return object ACLs, the authenticated user must also // have the storage.objects.getIamPolicy permission. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { return c.internalClient.ListObjects(ctx, req, opts...) } @@ -747,6 +798,7 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject return c.internalClient.RewriteObject(ctx, req, opts...) } +<<<<<<< HEAD // StartResumableWrite starts a resumable write operation. This // method is part of the Resumable // upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -760,10 +812,16 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject // Requires storage.objects.create // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= +// StartResumableWrite starts a resumable write. How long the write operation remains valid, and +// what happens when the write operation becomes invalid, are +// service-dependent. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { return c.internalClient.StartResumableWrite(ctx, req, opts...) } +<<<<<<< HEAD // QueryWriteStatus determines the persisted_size of an object that is being written. This // method is part of the resumable // upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -780,16 +838,33 @@ func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartRe // time to determine how much data has been logged for this object. // For any sequence of QueryWriteStatus() calls for a given // object name, the sequence of returned persisted_size values are +======= +// QueryWriteStatus determines the persisted_size for an object that is being written, which +// can then be used as the write_offset for the next Write() call. +// +// If the object does not exist (i.e., the object has been deleted, or the +// first Write() has not yet reached the service), this method returns the +// error NOT_FOUND. +// +// The client may call QueryWriteStatus() at any time to determine how +// much data has been processed for this object. This is useful if the +// client is buffering data and needs to know which data can be safely +// evicted. For any sequence of QueryWriteStatus() calls for a given +// object name, the sequence of returned persisted_size values will be +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // non-decreasing. func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { return c.internalClient.QueryWriteStatus(ctx, req, opts...) } +<<<<<<< HEAD // MoveObject moves the source object to the destination object in the same bucket. func (c *Client) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { return c.internalClient.MoveObject(ctx, req, opts...) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. // // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. @@ -805,8 +880,11 @@ type gRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewClient creates a new storage client based on gRPC. @@ -854,7 +932,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error connPool: connPool, client: storagepb.NewStorageClient(connPool), CallOptions: &client.CallOptions, +<<<<<<< HEAD logger: internaloption.GetLogger(opts), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -905,7 +986,11 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD _, err = executeRPC(ctx, c.client.DeleteBucket, req, settings.GRPC, c.logger, "DeleteBucket") +======= + _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) return err @@ -929,7 +1014,11 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.GetBucket, req, settings.GRPC, c.logger, "GetBucket") +======= + resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -959,7 +1048,11 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.CreateBucket, req, settings.GRPC, c.logger, "CreateBucket") +======= + resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -997,7 +1090,11 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.ListBuckets, req, settings.GRPC, c.logger, "ListBuckets") +======= + resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1041,7 +1138,11 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.LockBucketRetentionPolicy, req, settings.GRPC, c.logger, "LockBucketRetentionPolicy") +======= + resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1068,7 +1169,11 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") +======= + resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1095,7 +1200,11 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") +======= + resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1128,7 +1237,11 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") +======= + resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1155,7 +1268,11 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.UpdateBucket, req, settings.GRPC, c.logger, "UpdateBucket") +======= + resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1182,7 +1299,11 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.ComposeObject, req, settings.GRPC, c.logger, "ComposeObject") +======= + resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1208,7 +1329,11 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD _, err = executeRPC(ctx, c.client.DeleteObject, req, settings.GRPC, c.logger, "DeleteObject") +======= + _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) return err @@ -1232,7 +1357,11 @@ func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreOb var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.RestoreObject, req, settings.GRPC, c.logger, "RestoreObject") +======= + resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1259,7 +1388,11 @@ func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.Ca var resp *storagepb.CancelResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.CancelResumableWrite, req, settings.GRPC, c.logger, "CancelResumableWrite") +======= + resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1286,7 +1419,11 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.GetObject, req, settings.GRPC, c.logger, "GetObject") +======= + resp, err = c.client.GetObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1313,6 +1450,7 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe var resp storagepb.Storage_ReadObjectClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "ReadObject") resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "ReadObject") @@ -1333,6 +1471,9 @@ func (c *gRPCClient) BidiReadObject(ctx context.Context, opts ...gax.CallOption) c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiReadObject") resp, err = c.client.BidiReadObject(ctx, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiReadObject") +======= + resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1359,7 +1500,11 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.UpdateObject, req, settings.GRPC, c.logger, "UpdateObject") +======= + resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1374,9 +1519,13 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "WriteObject") resp, err = c.client.WriteObject(ctx, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "WriteObject") +======= + resp, err = c.client.WriteObject(ctx, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1391,9 +1540,13 @@ func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiWriteObject") resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiWriteObject") +======= + resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1431,7 +1584,11 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.ListObjects, req, settings.GRPC, c.logger, "ListObjects") +======= + resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1478,7 +1635,11 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb var resp *storagepb.RewriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.RewriteObject, req, settings.GRPC, c.logger, "RewriteObject") +======= + resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1505,7 +1666,11 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta var resp *storagepb.StartResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.StartResumableWrite, req, settings.GRPC, c.logger, "StartResumableWrite") +======= + resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1532,6 +1697,7 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW var resp *storagepb.QueryWriteStatusResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error +<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.QueryWriteStatus, req, settings.GRPC, c.logger, "QueryWriteStatus") return err }, opts...) @@ -1560,6 +1726,9 @@ func (c *gRPCClient) MoveObject(ctx context.Context, req *storagepb.MoveObjectRe err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = executeRPC(ctx, c.client.MoveObject, req, settings.GRPC, c.logger, "MoveObject") +======= + resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 7f286f3549..14243c9322 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/storage/v2/storage.proto @@ -27,11 +31,18 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" _ "google.golang.org/genproto/googleapis/api/annotations" +<<<<<<< HEAD status "google.golang.org/genproto/googleapis/rpc/status" date "google.golang.org/genproto/googleapis/type/date" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status1 "google.golang.org/grpc/status" +======= + date "google.golang.org/genproto/googleapis/type/date" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -178,7 +189,11 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29, 0} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Request message for DeleteBucket. @@ -198,9 +213,17 @@ type DeleteBucketRequest struct { func (x *DeleteBucketRequest) Reset() { *x = DeleteBucketRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DeleteBucketRequest) String() string { @@ -211,7 +234,11 @@ func (*DeleteBucketRequest) ProtoMessage() {} func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -269,9 +296,17 @@ type GetBucketRequest struct { func (x *GetBucketRequest) Reset() { *x = GetBucketRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetBucketRequest) String() string { @@ -282,7 +317,11 @@ func (*GetBucketRequest) ProtoMessage() {} func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -357,9 +396,17 @@ type CreateBucketRequest struct { func (x *CreateBucketRequest) Reset() { *x = CreateBucketRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateBucketRequest) String() string { @@ -370,7 +417,11 @@ func (*CreateBucketRequest) ProtoMessage() {} func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -447,9 +498,17 @@ type ListBucketsRequest struct { func (x *ListBucketsRequest) Reset() { *x = ListBucketsRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListBucketsRequest) String() string { @@ -460,7 +519,11 @@ func (*ListBucketsRequest) ProtoMessage() {} func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -525,9 +588,17 @@ type ListBucketsResponse struct { func (x *ListBucketsResponse) Reset() { *x = ListBucketsResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListBucketsResponse) String() string { @@ -538,7 +609,11 @@ func (*ListBucketsResponse) ProtoMessage() {} func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -582,9 +657,17 @@ type LockBucketRetentionPolicyRequest struct { func (x *LockBucketRetentionPolicyRequest) Reset() { *x = LockBucketRetentionPolicyRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LockBucketRetentionPolicyRequest) String() string { @@ -595,7 +678,11 @@ func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -660,9 +747,17 @@ type UpdateBucketRequest struct { func (x *UpdateBucketRequest) Reset() { *x = UpdateBucketRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateBucketRequest) String() string { @@ -673,7 +768,11 @@ func (*UpdateBucketRequest) ProtoMessage() {} func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -765,9 +864,17 @@ type ComposeObjectRequest struct { func (x *ComposeObjectRequest) Reset() { *x = ComposeObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ComposeObjectRequest) String() string { @@ -778,7 +885,11 @@ func (*ComposeObjectRequest) ProtoMessage() {} func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -886,9 +997,17 @@ type DeleteObjectRequest struct { func (x *DeleteObjectRequest) Reset() { *x = DeleteObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DeleteObjectRequest) String() string { @@ -899,7 +1018,11 @@ func (*DeleteObjectRequest) ProtoMessage() {} func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1014,9 +1137,17 @@ type RestoreObjectRequest struct { func (x *RestoreObjectRequest) Reset() { *x = RestoreObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RestoreObjectRequest) String() string { @@ -1027,7 +1158,11 @@ func (*RestoreObjectRequest) ProtoMessage() {} func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[9] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1126,9 +1261,17 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CancelResumableWriteRequest) String() string { @@ -1139,7 +1282,11 @@ func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[10] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1171,9 +1318,17 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CancelResumableWriteResponse) String() string { @@ -1184,7 +1339,11 @@ func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[11] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1257,9 +1416,17 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ReadObjectRequest) String() string { @@ -1270,7 +1437,11 @@ func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[12] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1409,9 +1580,17 @@ type GetObjectRequest struct { func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetObjectRequest) String() string { @@ -1422,7 +1601,11 @@ func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[13] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1540,9 +1723,17 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ReadObjectResponse) String() string { @@ -1553,7 +1744,11 @@ func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[14] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1596,6 +1791,7 @@ func (x *ReadObjectResponse) GetMetadata() *Object { return nil } +<<<<<<< HEAD // Describes the object to read in a BidiReadObject request. type BidiReadObjectSpec struct { state protoimpl.MessageState @@ -2372,6 +2568,8 @@ func (x *BidiWriteHandle) GetHandle() []byte { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes an attempt to insert an object, possibly over multiple requests. type WriteObjectSpec struct { state protoimpl.MessageState @@ -2407,16 +2605,27 @@ type WriteObjectSpec struct { // you must start the upload over from scratch, this time sending the correct // number of bytes. ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` +<<<<<<< HEAD // If true, the object will be created in appendable mode. // This field may only be set when using BidiWriteObject. Appendable *bool `protobuf:"varint,9,opt,name=appendable,proto3,oneof" json:"appendable,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectSpec) String() string { @@ -2426,8 +2635,13 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[26] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2439,7 +2653,11 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectSpec) GetResource() *Object { @@ -2491,6 +2709,7 @@ func (x *WriteObjectSpec) GetObjectSize() int64 { return 0 } +<<<<<<< HEAD func (x *WriteObjectSpec) GetAppendable() bool { if x != nil && x.Appendable != nil { return *x.Appendable @@ -2498,6 +2717,8 @@ func (x *WriteObjectSpec) GetAppendable() bool { return false } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Request message for WriteObject. type WriteObjectRequest struct { state protoimpl.MessageState @@ -2549,9 +2770,17 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectRequest) String() string { @@ -2561,8 +2790,13 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[27] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2574,7 +2808,11 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2689,9 +2927,17 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectResponse) String() string { @@ -2701,8 +2947,13 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[28] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2714,7 +2965,11 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2758,6 +3013,7 @@ func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} +<<<<<<< HEAD // Describes an attempt to append to an object, possibly over multiple requests. type AppendObjectSpec struct { state protoimpl.MessageState @@ -2863,6 +3119,8 @@ func (x *AppendObjectSpec) GetWriteHandle() *BidiWriteHandle { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Request message for BidiWriteObject. type BidiWriteObjectRequest struct { state protoimpl.MessageState @@ -2875,7 +3133,10 @@ type BidiWriteObjectRequest struct { // // *BidiWriteObjectRequest_UploadId // *BidiWriteObjectRequest_WriteObjectSpec +<<<<<<< HEAD // *BidiWriteObjectRequest_AppendObjectSpec +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` // Required. The offset from the beginning of the object at which the data // should be written. @@ -2899,8 +3160,12 @@ type BidiWriteObjectRequest struct { Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service // don't match the specified checksums the call will fail. May only be +<<<<<<< HEAD // provided in the first request or the // last request (with finish_write set). +======= + // provided in last request (with finish_write set). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` // For each BidiWriteObjectRequest where state_lookup is `true` or the client // closes the stream, the service will send a BidiWriteObjectResponse @@ -2931,9 +3196,17 @@ type BidiWriteObjectRequest struct { func (x *BidiWriteObjectRequest) Reset() { *x = BidiWriteObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BidiWriteObjectRequest) String() string { @@ -2943,8 +3216,13 @@ func (x *BidiWriteObjectRequest) String() string { func (*BidiWriteObjectRequest) ProtoMessage() {} func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[30] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2956,7 +3234,11 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { @@ -2980,6 +3262,7 @@ func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { return nil } +<<<<<<< HEAD func (x *BidiWriteObjectRequest) GetAppendObjectSpec() *AppendObjectSpec { if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_AppendObjectSpec); ok { return x.AppendObjectSpec @@ -2987,6 +3270,8 @@ func (x *BidiWriteObjectRequest) GetAppendObjectSpec() *AppendObjectSpec { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { if x != nil { return x.WriteOffset @@ -3059,17 +3344,23 @@ type BidiWriteObjectRequest_WriteObjectSpec struct { WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` } +<<<<<<< HEAD type BidiWriteObjectRequest_AppendObjectSpec struct { // For appendable uploads. Describes the object to append to. AppendObjectSpec *AppendObjectSpec `protobuf:"bytes,11,opt,name=append_object_spec,json=appendObjectSpec,proto3,oneof"` } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} +<<<<<<< HEAD func (*BidiWriteObjectRequest_AppendObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type isBidiWriteObjectRequest_Data interface { isBidiWriteObjectRequest_Data() } @@ -3095,17 +3386,28 @@ type BidiWriteObjectResponse struct { // *BidiWriteObjectResponse_PersistedSize // *BidiWriteObjectResponse_Resource WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +<<<<<<< HEAD // An optional write handle that will periodically be present in response // messages. Clients should save it for later use in establishing a new stream // if a connection is interrupted. WriteHandle *BidiWriteHandle `protobuf:"bytes,3,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BidiWriteObjectResponse) Reset() { *x = BidiWriteObjectResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BidiWriteObjectResponse) String() string { @@ -3115,8 +3417,13 @@ func (x *BidiWriteObjectResponse) String() string { func (*BidiWriteObjectResponse) ProtoMessage() {} func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[31] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3128,7 +3435,11 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { @@ -3152,6 +3463,7 @@ func (x *BidiWriteObjectResponse) GetResource() *Object { return nil } +<<<<<<< HEAD func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle { if x != nil { return x.WriteHandle @@ -3159,6 +3471,8 @@ func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type isBidiWriteObjectResponse_WriteStatus interface { isBidiWriteObjectResponse_WriteStatus() } @@ -3243,9 +3557,17 @@ type ListObjectsRequest struct { func (x *ListObjectsRequest) Reset() { *x = ListObjectsRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsRequest) String() string { @@ -3255,8 +3577,13 @@ func (x *ListObjectsRequest) String() string { func (*ListObjectsRequest) ProtoMessage() {} func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[32] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3268,7 +3595,11 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. func (*ListObjectsRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsRequest) GetParent() string { @@ -3377,9 +3708,17 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QueryWriteStatusRequest) String() string { @@ -3389,8 +3728,13 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[33] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3402,7 +3746,11 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -3436,9 +3784,17 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QueryWriteStatusResponse) String() string { @@ -3448,8 +3804,13 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[34] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3461,7 +3822,11 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -3618,9 +3983,17 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteObjectRequest) String() string { @@ -3630,8 +4003,13 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[35] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3643,7 +4021,11 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -3832,9 +4214,17 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteResponse) String() string { @@ -3844,8 +4234,13 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[36] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3857,7 +4252,11 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -3895,6 +4294,7 @@ func (x *RewriteResponse) GetResource() *Object { return nil } +<<<<<<< HEAD // Request message for MoveObject. type MoveObjectRequest struct { state protoimpl.MessageState @@ -4064,12 +4464,15 @@ func (x *MoveObjectRequest) GetIfMetagenerationNotMatch() int64 { return 0 } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Request message StartResumableWrite. type StartResumableWriteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields +<<<<<<< HEAD // Required. Contains the information necessary to start a resumable write. WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` // A set of parameters common to Storage API requests related to an object. @@ -4079,14 +4482,33 @@ type StartResumableWriteRequest struct { // initiating a resumable upload with`StartResumableWriteRequest` or when // completing a write with `WriteObjectRequest` with // `finish_write` set to `true`. +======= + // Required. The destination bucket, object, and metadata, as well as any + // preconditions. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` + // The checksums of the complete object. This will be used to validate the + // uploaded object. For each upload, object_checksums can be provided with + // either StartResumableWriteRequest or the WriteObjectRequest with + // finish_write set to `true`. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` } func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteRequest) String() string { @@ -4096,8 +4518,13 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[38] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4109,7 +4536,11 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -4139,19 +4570,32 @@ type StartResumableWriteResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields +<<<<<<< HEAD // A unique identifier for the initiated resumable write operation. // As the ID grants write access, you should keep it confidential during // the upload to prevent unauthorized access and data tampering during your // upload. This ID should be included in subsequent `WriteObject` requests to // upload the object data. +======= + // The upload_id of the newly started resumable write operation. This + // value should be copied into the `WriteObjectRequest.upload_id` field. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` } func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteResponse) String() string { @@ -4161,8 +4605,13 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[39] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4174,7 +4623,11 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -4230,9 +4683,17 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateObjectRequest) String() string { @@ -4242,8 +4703,13 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[40] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4255,7 +4721,11 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateObjectRequest) GetObject() *Object { @@ -4333,9 +4803,17 @@ type CommonObjectRequestParams struct { func (x *CommonObjectRequestParams) Reset() { *x = CommonObjectRequestParams{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CommonObjectRequestParams) String() string { @@ -4345,8 +4823,13 @@ func (x *CommonObjectRequestParams) String() string { func (*CommonObjectRequestParams) ProtoMessage() {} func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[41] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4358,7 +4841,11 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { // Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { @@ -4391,9 +4878,17 @@ type ServiceConstants struct { func (x *ServiceConstants) Reset() { *x = ServiceConstants{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServiceConstants) String() string { @@ -4403,8 +4898,13 @@ func (x *ServiceConstants) String() string { func (*ServiceConstants) ProtoMessage() {} func (x *ServiceConstants) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[42] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4416,7 +4916,11 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. func (*ServiceConstants) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A bucket. @@ -4550,9 +5054,17 @@ type Bucket struct { func (x *Bucket) Reset() { *x = Bucket{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket) String() string { @@ -4562,8 +5074,13 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[43] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4575,7 +5092,11 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket) GetName() string { @@ -4837,9 +5358,17 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BucketAccessControl) String() string { @@ -4849,8 +5378,13 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[44] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4862,7 +5396,11 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BucketAccessControl) GetRole() string { @@ -4943,9 +5481,17 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ChecksummedData) String() string { @@ -4955,8 +5501,13 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[45] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4968,7 +5519,11 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ChecksummedData) GetContent() []byte { @@ -5008,9 +5563,17 @@ type ObjectChecksums struct { func (x *ObjectChecksums) Reset() { *x = ObjectChecksums{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectChecksums) String() string { @@ -5020,8 +5583,13 @@ func (x *ObjectChecksums) String() string { func (*ObjectChecksums) ProtoMessage() {} func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[46] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5033,7 +5601,11 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. func (*ObjectChecksums) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectChecksums) GetCrc32C() uint32 { @@ -5066,9 +5638,17 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CustomerEncryption) String() string { @@ -5078,8 +5658,13 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[47] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5091,7 +5676,11 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -5167,8 +5756,11 @@ type Object struct { // Output only. If this object is noncurrent, this is the time when the object // became noncurrent. DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` +<<<<<<< HEAD // Output only. The time when the object was finalized. FinalizeTime *timestamppb.Timestamp `protobuf:"bytes,36,opt,name=finalize_time,json=finalizeTime,proto3" json:"finalize_time,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. // If an object is stored without a Content-Type, it is served as @@ -5248,9 +5840,17 @@ type Object struct { func (x *Object) Reset() { *x = Object{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Object) String() string { @@ -5260,8 +5860,13 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[48] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5273,7 +5878,11 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Object) GetName() string { @@ -5374,6 +5983,7 @@ func (x *Object) GetDeleteTime() *timestamppb.Timestamp { return nil } +<<<<<<< HEAD func (x *Object) GetFinalizeTime() *timestamppb.Timestamp { if x != nil { return x.FinalizeTime @@ -5381,6 +5991,8 @@ func (x *Object) GetFinalizeTime() *timestamppb.Timestamp { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *Object) GetContentType() string { if x != nil { return x.ContentType @@ -5545,9 +6157,17 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectAccessControl) String() string { @@ -5557,8 +6177,13 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[49] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5570,7 +6195,11 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectAccessControl) GetRole() string { @@ -5654,9 +6283,17 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsResponse) String() string { @@ -5666,8 +6303,13 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[50] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5679,7 +6321,11 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -5717,9 +6363,17 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ProjectTeam) String() string { @@ -5729,8 +6383,13 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[51] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5742,7 +6401,11 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ProjectTeam) GetProjectNumber() string { @@ -5773,9 +6436,17 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Owner) String() string { @@ -5785,8 +6456,13 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[52] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5798,7 +6474,11 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Owner) GetEntity() string { @@ -5831,9 +6511,17 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ContentRange) String() string { @@ -5843,8 +6531,13 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[53] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5856,7 +6549,11 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ContentRange) GetStart() int64 { @@ -5897,9 +6594,17 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ComposeObjectRequest_SourceObject) String() string { @@ -5909,8 +6614,13 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[54] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5960,9 +6670,17 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { @@ -5972,8 +6690,13 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[55] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6007,9 +6730,17 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Billing) String() string { @@ -6019,8 +6750,13 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[56] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6032,7 +6768,11 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 0} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -6071,9 +6811,17 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Cors) String() string { @@ -6083,8 +6831,13 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[57] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6096,7 +6849,11 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 1} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Cors) GetOrigin() []string { @@ -6140,9 +6897,17 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Encryption) String() string { @@ -6152,8 +6917,13 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[58] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6165,7 +6935,11 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 2} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -6190,9 +6964,17 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig) String() string { @@ -6202,8 +6984,13 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[59] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6215,7 +7002,11 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -6246,9 +7037,17 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle) String() string { @@ -6258,8 +7057,13 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[60] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6271,7 +7075,11 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -6296,9 +7104,17 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Logging) String() string { @@ -6308,8 +7124,13 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[61] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6321,7 +7142,11 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 5} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Logging) GetLogBucket() string { @@ -6359,9 +7184,17 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_RetentionPolicy) String() string { @@ -6371,8 +7204,13 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[62] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6384,7 +7222,11 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 6} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -6424,9 +7266,17 @@ type Bucket_SoftDeletePolicy struct { func (x *Bucket_SoftDeletePolicy) Reset() { *x = Bucket_SoftDeletePolicy{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_SoftDeletePolicy) String() string { @@ -6436,8 +7286,13 @@ func (x *Bucket_SoftDeletePolicy) String() string { func (*Bucket_SoftDeletePolicy) ProtoMessage() {} func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[63] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6449,7 +7304,11 @@ func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 7} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { @@ -6480,9 +7339,17 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Versioning) String() string { @@ -6492,8 +7359,13 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[64] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6505,7 +7377,11 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 8} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Versioning) GetEnabled() bool { @@ -6538,9 +7414,17 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Website) String() string { @@ -6550,8 +7434,13 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[65] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6563,7 +7452,11 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 9} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -6594,9 +7487,17 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_CustomPlacementConfig) String() string { @@ -6606,8 +7507,13 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[66] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6619,7 +7525,11 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 10} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -6653,9 +7563,17 @@ type Bucket_Autoclass struct { func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Autoclass) String() string { @@ -6665,8 +7583,13 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[67] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6678,7 +7601,11 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 11} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -6721,9 +7648,17 @@ type Bucket_HierarchicalNamespace struct { func (x *Bucket_HierarchicalNamespace) Reset() { *x = Bucket_HierarchicalNamespace{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_HierarchicalNamespace) String() string { @@ -6733,8 +7668,13 @@ func (x *Bucket_HierarchicalNamespace) String() string { func (*Bucket_HierarchicalNamespace) ProtoMessage() {} func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[68] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6746,7 +7686,11 @@ func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 12} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { @@ -6774,9 +7718,17 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { @@ -6786,8 +7738,13 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[70] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6799,7 +7756,11 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3, 0} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6831,9 +7792,17 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule) String() string { @@ -6843,8 +7812,13 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[71] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6856,7 +7830,11 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6889,9 +7867,17 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Action) String() string { @@ -6901,8 +7887,13 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[72] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6914,7 +7905,11 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 0} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -6985,9 +7980,17 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Condition) String() string { @@ -6997,8 +8000,13 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[73] if x != nil { +======= + mi := &file_google_storage_v2_storage_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7010,7 +8018,11 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { +<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} +======= + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 1} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -7115,6 +8127,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, +<<<<<<< HEAD 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -7212,6 +8225,15 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, +======= + 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, @@ -7219,6 +8241,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, +<<<<<<< HEAD 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, @@ -7473,66 +8496,309 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, +======= + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc6, 0x06, 0x0a, 0x12, 0x42, 0x69, 0x64, 0x69, - 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3d, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, - 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, + 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, + 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, + 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, + 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, + 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd3, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, +<<<<<<< HEAD + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc6, 0x06, 0x0a, 0x12, 0x42, 0x69, 0x64, 0x69, + 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, + 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x18, 0x01, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, @@ -7853,6 +9119,197 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, +======= + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, + 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, + 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, + 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, + 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x8e, 0x06, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, + 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, + 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, + 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, + 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, + 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, + 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, + 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, + 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, + 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, + 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, + 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, @@ -7860,6 +9317,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, +<<<<<<< HEAD 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, @@ -8037,6 +9495,218 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x07, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, +======= + 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, + 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, + 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, + 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, + 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, + 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, + 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, + 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, + 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, + 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, + 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, + 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, + 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, + 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, + 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, + 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, + 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, + 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, + 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, + 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, @@ -8045,6 +9715,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, +<<<<<<< HEAD 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, @@ -8914,6 +10585,860 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, + 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, + 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, + 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, + 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, + 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, + 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, + 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, + 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, + 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, + 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, + 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, + 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, + 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, + 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, + 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, + 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, + 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, + 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, + 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, + 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, + 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, + 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, + 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, + 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, + 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, + 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, + 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, + 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, + 0x10, 0x01, 0x22, 0x86, 0x24, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, + 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, + 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, + 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, + 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, + 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, + 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, + 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, + 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, + 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, + 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, + 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, + 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, + 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, + 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, + 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, + 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, + 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, + 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, + 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, + 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, + 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, + 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, + 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, + 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, + 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, + 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, + 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, + 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2a, 0x07, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x32, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x97, 0x02, 0x0a, 0x13, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, + 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, + 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, + 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, + 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf7, 0x0d, 0x0a, 0x06, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, + 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, + 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, + 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, + 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x48, 0x02, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x48, 0x03, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, + 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, + 0x13, 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, + 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, + 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x48, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x8c, 0x1c, 0x0a, 0x07, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, + 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, + 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, + 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, + 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, + 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, + 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, + 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, + 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, + 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, + 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, + 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x1a, 0xa7, 0x02, + 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, + 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, + 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, + 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -8929,7 +11454,11 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +<<<<<<< HEAD var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 75) +======= +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 62) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var file_google_storage_v2_storage_proto_goTypes = []any{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8947,6 +11476,7 @@ var file_google_storage_v2_storage_proto_goTypes = []any{ (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse +<<<<<<< HEAD (*BidiReadObjectSpec)(nil), // 16: google.storage.v2.BidiReadObjectSpec (*BidiReadObjectRequest)(nil), // 17: google.storage.v2.BidiReadObjectRequest (*BidiReadObjectResponse)(nil), // 18: google.storage.v2.BidiReadObjectResponse @@ -9185,6 +11715,210 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 112, // [112:112] is the sub-list for extension type_name 112, // [112:112] is the sub-list for extension extendee 0, // [0:112] is the sub-list for field type_name +======= + (*WriteObjectSpec)(nil), // 16: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 17: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 18: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 19: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 20: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest + (*CommonObjectRequestParams)(nil), // 29: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 30: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 31: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 32: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 33: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 34: google.storage.v2.ObjectChecksums + (*CustomerEncryption)(nil), // 35: google.storage.v2.CustomerEncryption + (*Object)(nil), // 36: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 37: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 38: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 39: google.storage.v2.ProjectTeam + (*Owner)(nil), // 40: google.storage.v2.Owner + (*ContentRange)(nil), // 41: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 42: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 43: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 44: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 45: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 46: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 47: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 48: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 49: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 50: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 51: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 52: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 53: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 54: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 55: google.storage.v2.Bucket.Autoclass + (*Bucket_HierarchicalNamespace)(nil), // 56: google.storage.v2.Bucket.HierarchicalNamespace + nil, // 57: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 58: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 59: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 60: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 61: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 62: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 63: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 64: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 65: google.protobuf.Duration + (*date.Date)(nil), // 66: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 67: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 68: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 69: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 70: google.protobuf.Empty + (*iampb.Policy)(nil), // 71: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 72: google.iam.v1.TestIamPermissionsResponse +} +var file_google_storage_v2_storage_proto_depIdxs = []int32{ + 63, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 31, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 63, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 31, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 31, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 63, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 36, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 29, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 29, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 29, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 63, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 29, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 63, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 33, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 41, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 36, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 36, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 16, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 33, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 16, // 26: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 33, // 27: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 34, // 28: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 29, // 29: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 30: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 63, // 31: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 29, // 32: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 36, // 33: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 36, // 34: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 29, // 35: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 36: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 36, // 37: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 16, // 38: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 29, // 39: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 34, // 40: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 36, // 41: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 63, // 42: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 29, // 43: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 32, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 37, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 48, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 64, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 45, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 64, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 57, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 53, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 52, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 49, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 40, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 46, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 44, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 50, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 47, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 54, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 55, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 56, // 61: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace + 51, // 62: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 39, // 63: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 37, // 64: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 65: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 64, // 66: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 34, // 67: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 64, // 68: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 64, // 69: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 64, // 70: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 62, // 71: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 40, // 72: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 35, // 73: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 64, // 74: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 64, // 75: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp + 64, // 76: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp + 39, // 77: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 36, // 78: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 43, // 79: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 58, // 80: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 59, // 81: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 64, // 82: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 65, // 83: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 65, // 84: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 64, // 85: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 64, // 86: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 64, // 87: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 64, // 88: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 60, // 89: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 61, // 90: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 66, // 91: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 66, // 92: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 66, // 93: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 94: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 95: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 96: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 97: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 98: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 67, // 99: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 68, // 100: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 69, // 101: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 102: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 9, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 10, // 105: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 11, // 106: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 14, // 107: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 13, // 108: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 28, // 109: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 17, // 110: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 19, // 111: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 21, // 112: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 24, // 113: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 26, // 114: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 22, // 115: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 70, // 116: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 31, // 117: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 31, // 118: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 119: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 31, // 120: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 71, // 121: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 71, // 122: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 72, // 123: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 31, // 124: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 36, // 125: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 70, // 126: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 36, // 127: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 12, // 128: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 36, // 129: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 15, // 130: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 36, // 131: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 18, // 132: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 20, // 133: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 38, // 134: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 25, // 135: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 27, // 136: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 23, // 137: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 116, // [116:138] is the sub-list for method output_type + 94, // [94:116] is the sub-list for method input_type + 94, // [94:94] is the sub-list for extension type_name + 94, // [94:94] is the sub-list for extension extendee + 0, // [0:94] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_storage_v2_storage_proto_init() } @@ -9192,6 +11926,731 @@ func file_google_storage_v2_storage_proto_init() { if File_google_storage_v2_storage_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*DeleteBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GetBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CreateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ListBucketsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListBucketsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*LockBucketRetentionPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*UpdateBucketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ComposeObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*DeleteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*RestoreObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*CancelResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*CancelResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*ReadObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*GetObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*ReadObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*WriteObjectSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*WriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*WriteObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*BidiWriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*BidiWriteObjectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*ListObjectsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { + switch v := v.(*QueryWriteStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { + switch v := v.(*QueryWriteStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { + switch v := v.(*RewriteObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { + switch v := v.(*RewriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { + switch v := v.(*StartResumableWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { + switch v := v.(*StartResumableWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*UpdateObjectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*CommonObjectRequestParams); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { + switch v := v.(*ServiceConstants); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { + switch v := v.(*BucketAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { + switch v := v.(*ChecksummedData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { + switch v := v.(*ObjectChecksums); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { + switch v := v.(*CustomerEncryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { + switch v := v.(*ObjectAccessControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { + switch v := v.(*ListObjectsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { + switch v := v.(*ProjectTeam); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { + switch v := v.(*Owner); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { + switch v := v.(*ContentRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { + switch v := v.(*ComposeObjectRequest_SourceObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Billing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Cors); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Encryption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_IamConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Logging); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_RetentionPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_SoftDeletePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Versioning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Website); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_CustomPlacementConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_HierarchicalNamespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle_Rule_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { + switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} @@ -9202,14 +12661,19 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{} +<<<<<<< HEAD file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[26].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ +======= + file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []any{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } +<<<<<<< HEAD file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), @@ -9240,13 +12704,46 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[63].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[67].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[73].OneofWrappers = []any{} +======= + file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{ + (*WriteObjectResponse_PersistedSize)(nil), + (*WriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{ + (*BidiWriteObjectRequest_UploadId)(nil), + (*BidiWriteObjectRequest_WriteObjectSpec)(nil), + (*BidiWriteObjectRequest_ChecksummedData)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{ + (*BidiWriteObjectResponse_PersistedSize)(nil), + (*BidiWriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ + (*QueryWriteStatusResponse_PersistedSize)(nil), + (*QueryWriteStatusResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[33].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[35].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[54].OneofWrappers = []any{} + file_google_storage_v2_storage_proto_msgTypes[60].OneofWrappers = []any{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, +<<<<<<< HEAD NumMessages: 75, +======= + NumMessages: 62, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NumExtensions: 0, NumServices: 1, }, @@ -9304,6 +12801,7 @@ type StorageClient interface { // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) +<<<<<<< HEAD // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the generation parameter is used, or // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not @@ -9324,6 +12822,14 @@ type StorageClient interface { // Requires `storage.objects.delete` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Restores a soft-deleted object. RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -9336,6 +12842,7 @@ type StorageClient interface { // they could either complete before the cancellation or fail if the // cancellation completes first. CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) +<<<<<<< HEAD // Retrieves object metadata. // // **IAM Permissions**: @@ -9373,6 +12880,12 @@ type StorageClient interface { // This API is currently in preview and is not yet available for general // use. BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error) +======= + // Retrieves an object's metadata. + GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) + // Reads an object's data. + ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Updates an object's metadata. // Equivalent to JSON API's storage.objects.patch. UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -9429,18 +12942,25 @@ type StorageClient interface { // whether the service views the object as complete. // // Attempting to resume an already finalized object will result in an OK +<<<<<<< HEAD // status, with a `WriteObjectResponse` containing the finalized object's +======= + // status, with a WriteObjectResponse containing the finalized object's +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // metadata. // // Alternatively, the BidiWriteObject operation may be used to write an // object with controls over flushing and the ability to fetch the ability to // determine the current persisted size. +<<<<<<< HEAD // // **IAM Permissions**: // // Requires `storage.objects.create` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) // Stores a new object and metadata. // @@ -9459,6 +12979,7 @@ type StorageClient interface { // always be sent to the client, regardless of the value of `state_lookup`. BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) // Retrieves a list of objects matching the criteria. +<<<<<<< HEAD // // **IAM Permissions**: // @@ -9466,10 +12987,13 @@ type StorageClient interface { // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) // to use this method. To return object ACLs, the authenticated user must also // have the `storage.objects.getIamPolicy` permission. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides // metadata. RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) +<<<<<<< HEAD // Starts a resumable write operation. This // method is part of the [Resumable // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -9504,6 +13028,26 @@ type StorageClient interface { QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) // Moves the source object to the destination object in the same bucket. MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error) +======= + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type storageClient struct { @@ -9672,6 +13216,7 @@ func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { return m, nil } +<<<<<<< HEAD func (c *storageClient) BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error) { stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/BidiReadObject", opts...) if err != nil { @@ -9703,6 +13248,8 @@ func (x *storageBidiReadObjectClient) Recv() (*BidiReadObjectResponse, error) { return m, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) @@ -9713,7 +13260,11 @@ func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectReques } func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { +<<<<<<< HEAD stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/WriteObject", opts...) +======= + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9747,7 +13298,11 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) } func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { +<<<<<<< HEAD stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[3], "/google.storage.v2.Storage/BidiWriteObject", opts...) +======= + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9813,6 +13368,7 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat return out, nil } +<<<<<<< HEAD func (c *storageClient) MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/MoveObject", in, out, opts...) @@ -9822,6 +13378,8 @@ func (c *storageClient) MoveObject(ctx context.Context, in *MoveObjectRequest, o return out, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. @@ -9855,6 +13413,7 @@ type StorageServer interface { // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) +<<<<<<< HEAD // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the generation parameter is used, or // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not @@ -9875,6 +13434,14 @@ type StorageServer interface { // Requires `storage.objects.delete` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) // Restores a soft-deleted object. RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) @@ -9887,6 +13454,7 @@ type StorageServer interface { // they could either complete before the cancellation or fail if the // cancellation completes first. CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) +<<<<<<< HEAD // Retrieves object metadata. // // **IAM Permissions**: @@ -9924,6 +13492,12 @@ type StorageServer interface { // This API is currently in preview and is not yet available for general // use. BidiReadObject(Storage_BidiReadObjectServer) error +======= + // Retrieves an object's metadata. + GetObject(context.Context, *GetObjectRequest) (*Object, error) + // Reads an object's data. + ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Updates an object's metadata. // Equivalent to JSON API's storage.objects.patch. UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) @@ -9980,18 +13554,25 @@ type StorageServer interface { // whether the service views the object as complete. // // Attempting to resume an already finalized object will result in an OK +<<<<<<< HEAD // status, with a `WriteObjectResponse` containing the finalized object's +======= + // status, with a WriteObjectResponse containing the finalized object's +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // metadata. // // Alternatively, the BidiWriteObject operation may be used to write an // object with controls over flushing and the ability to fetch the ability to // determine the current persisted size. +<<<<<<< HEAD // // **IAM Permissions**: // // Requires `storage.objects.create` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WriteObject(Storage_WriteObjectServer) error // Stores a new object and metadata. // @@ -10010,6 +13591,7 @@ type StorageServer interface { // always be sent to the client, regardless of the value of `state_lookup`. BidiWriteObject(Storage_BidiWriteObjectServer) error // Retrieves a list of objects matching the criteria. +<<<<<<< HEAD // // **IAM Permissions**: // @@ -10017,10 +13599,13 @@ type StorageServer interface { // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) // to use this method. To return object ACLs, the authenticated user must also // have the `storage.objects.getIamPolicy` permission. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides // metadata. RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) +<<<<<<< HEAD // Starts a resumable write operation. This // method is part of the [Resumable // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -10055,6 +13640,26 @@ type StorageServer interface { QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) // Moves the source object to the destination object in the same bucket. MoveObject(context.Context, *MoveObjectRequest) (*Object, error) +======= + // Starts a resumable write. How long the write operation remains valid, and + // what happens when the write operation becomes invalid, are + // service-dependent. + StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) + // Determines the `persisted_size` for an object that is being written, which + // can then be used as the `write_offset` for the next `Write()` call. + // + // If the object does not exist (i.e., the object has been deleted, or the + // first `Write()` has not yet reached the service), this method returns the + // error `NOT_FOUND`. + // + // The client **may** call `QueryWriteStatus()` at any time to determine how + // much data has been processed for this object. This is useful if the + // client is buffering data and needs to know which data can be safely + // evicted. For any sequence of `QueryWriteStatus()` calls for a given + // object name, the sequence of returned `persisted_size` values will be + // non-decreasing. + QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // UnimplementedStorageServer can be embedded to have forward compatible implementations. @@ -10062,6 +13667,7 @@ type UnimplementedStorageServer struct { } func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { +<<<<<<< HEAD return nil, status1.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") } func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { @@ -10132,6 +13738,72 @@ func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWrite } func (*UnimplementedStorageServer) MoveObject(context.Context, *MoveObjectRequest) (*Object, error) { return nil, status1.Errorf(codes.Unimplemented, "method MoveObject not implemented") +======= + return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") +} +func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") +} +func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") +} +func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") +} +func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") +} +func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} +func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") +} +func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") +} +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") +} +func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") +} +func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") +} +func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") +} +func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { + return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") +} +func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") +} +func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") +} +func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") +} +func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") +} +func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") +} +func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") +} +func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func RegisterStorageServer(s *grpc.Server, srv StorageServer) { @@ -10411,6 +14083,7 @@ func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { return x.ServerStream.SendMsg(m) } +<<<<<<< HEAD func _Storage_BidiReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(StorageServer).BidiReadObject(&storageBidiReadObjectServer{stream}) } @@ -10437,6 +14110,8 @@ func (x *storageBidiReadObjectServer) Recv() (*BidiReadObjectRequest, error) { return m, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateObjectRequest) if err := dec(in); err != nil { @@ -10579,6 +14254,7 @@ func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +<<<<<<< HEAD func _Storage_MoveObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MoveObjectRequest) if err := dec(in); err != nil { @@ -10597,6 +14273,8 @@ func _Storage_MoveObject_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _Storage_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.storage.v2.Storage", HandlerType: (*StorageServer)(nil), @@ -10677,10 +14355,13 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "QueryWriteStatus", Handler: _Storage_QueryWriteStatus_Handler, }, +<<<<<<< HEAD { MethodName: "MoveObject", Handler: _Storage_MoveObject_Handler, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, Streams: []grpc.StreamDesc{ { @@ -10689,12 +14370,15 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, { +<<<<<<< HEAD StreamName: "BidiReadObject", Handler: _Storage_BidiReadObject_Handler, ServerStreams: true, ClientStreams: true, }, { +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) StreamName: "WriteObject", Handler: _Storage_WriteObject_Handler, ClientStreams: true, diff --git a/vendor/cloud.google.com/go/storage/internal/experimental.go b/vendor/cloud.google.com/go/storage/internal/experimental.go index 2fd5111fb3..1483c46f5c 100644 --- a/vendor/cloud.google.com/go/storage/internal/experimental.go +++ b/vendor/cloud.google.com/go/storage/internal/experimental.go @@ -29,8 +29,11 @@ var ( // WithReadStallTimeout is a function which is implemented by storage package. // It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption. WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption +<<<<<<< HEAD // WithGRPCBidiReads is a function which is implemented by the storage package. // It sets the gRPC client to use the BidiReadObject API for downloads. WithGRPCBidiReads any // func() option.ClientOption +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index ba56cacd8e..b923404b74 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,8 @@ package internal // Version is the current tagged release of the library. +<<<<<<< HEAD const Version = "1.50.0" +======= +const Version = "1.48.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index 99783f3df4..985310ae82 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -58,9 +58,15 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry } bo := gax.Backoff{} if retry.backoff != nil { +<<<<<<< HEAD bo.Multiplier = retry.backoff.Multiplier bo.Initial = retry.backoff.Initial bo.Max = retry.backoff.Max +======= + bo.Multiplier = retry.backoff.GetMultiplier() + bo.Initial = retry.backoff.GetInitial() + bo.Max = retry.backoff.GetMax() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var errorFunc func(err error) bool = ShouldRetry if retry.shouldRetry != nil { diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index 16d57644aa..467719b3a0 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -40,7 +40,10 @@ func init() { storageinternal.WithMetricExporter = withMetricExporter storageinternal.WithMetricInterval = withMetricInterval storageinternal.WithReadStallTimeout = withReadStallTimeout +<<<<<<< HEAD storageinternal.WithGRPCBidiReads = withGRPCBidiReads +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable. @@ -80,9 +83,13 @@ type storageConfig struct { disableClientMetrics bool metricExporter *metric.Exporter metricInterval time.Duration +<<<<<<< HEAD manualReader *metric.ManualReader readStallTimeoutConfig *experimental.ReadStallTimeoutConfig grpcBidiReads bool +======= + readStallTimeoutConfig *experimental.ReadStallTimeoutConfig +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // newStorageConfig generates a new storageConfig with all the given @@ -195,6 +202,7 @@ func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) { c.metricExporter = w.metricExporter } +<<<<<<< HEAD type withTestMetricReaderConfig struct { internaloption.EmbeddableAdapter // reader override @@ -209,6 +217,8 @@ func (w *withTestMetricReaderConfig) ApplyStorageOpt(c *storageConfig) { c.manualReader = w.metricReader } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithReadStallTimeout is an option that may be passed to [NewClient]. // It enables the client to retry the stalled read request, happens as part of // storage.Reader creation. As the name suggest, timeout is adjusted dynamically @@ -242,6 +252,7 @@ type withReadStallTimeoutConfig struct { func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) { config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig } +<<<<<<< HEAD func withGRPCBidiReads() option.ClientOption { return &withGRPCBidiReadsConfig{} @@ -254,3 +265,5 @@ type withGRPCBidiReadsConfig struct { func (w *withGRPCBidiReadsConfig) ApplyStorageOpt(config *storageConfig) { config.grpcBidiReads = true } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 6b14fd1dce..7bcb66af70 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -22,7 +22,10 @@ import ( "io/ioutil" "net/http" "strings" +<<<<<<< HEAD "sync" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" "cloud.google.com/go/internal/trace" @@ -141,7 +144,10 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) encryptionKey: o.encryptionKey, conds: o.conds, readCompressed: o.readCompressed, +<<<<<<< HEAD handle: &o.readHandle, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } r, err = o.c.tc.NewRangeReader(ctx, params, opts...) @@ -157,6 +163,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return r, err } +<<<<<<< HEAD // NewMultiRangeDownloader creates a multi-range reader for an object. // Must be called on a gRPC client created using [NewGRPCClient]. // @@ -200,6 +207,8 @@ func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiR return r, err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // decompressiveTranscoding returns true if the request was served decompressed // and different than its original storage form. This happens when the "Content-Encoding" // header is "gzip". @@ -267,16 +276,23 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) // the stored CRC, returning an error from Read if there is a mismatch. This integrity check // is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. type Reader struct { +<<<<<<< HEAD Attrs ReaderObjectAttrs objectMetadata *map[string]string +======= + Attrs ReaderObjectAttrs +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) seen, remain, size int64 checkCRC bool // Did we check the CRC? This is now only used by tests. reader io.ReadCloser ctx context.Context +<<<<<<< HEAD mu sync.Mutex handle *ReadHandle +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Close closes the Reader. It must be called when done reading. @@ -347,6 +363,7 @@ func (r *Reader) CacheControl() string { func (r *Reader) LastModified() (time.Time, error) { return r.Attrs.LastModified, nil } +<<<<<<< HEAD // Metadata returns user-provided metadata, in key/value pairs. // @@ -439,3 +456,5 @@ func (mrd *MultiRangeDownloader) Wait() { func (mrd *MultiRangeDownloader) GetHandle() []byte { return mrd.reader.getHandle() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 9c40ca1b47..e43104e4a4 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -72,8 +72,13 @@ var ( // errMethodNotSupported indicates that the method called is not currently supported by the client. // TODO: Export this error when launching the transport-agnostic client. errMethodNotSupported = errors.New("storage: method is not currently supported") +<<<<<<< HEAD // errSignedURLMethodNotValid indicates that given HTTP method is not valid. errSignedURLMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) +======= + // errMethodNotValid indicates that given HTTP method is not valid. + errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) @@ -689,7 +694,11 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error { } opts.Method = strings.ToUpper(opts.Method) if _, ok := signedURLMethods[opts.Method]; !ok { +<<<<<<< HEAD return errSignedURLMethodNotValid +======= + return errMethodNotValid +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts.Expires.IsZero() { return errors.New("storage: missing required expires option") @@ -937,9 +946,12 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { return u.String(), nil } +<<<<<<< HEAD // ReadHandle associated with the object. This is periodically refreshed. type ReadHandle []byte +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ObjectHandle provides operations on an object in a Google Cloud Storage bucket. // Use BucketHandle.Object to get a handle. type ObjectHandle struct { @@ -955,6 +967,7 @@ type ObjectHandle struct { retry *retryConfig overrideRetention *bool softDeleted bool +<<<<<<< HEAD readHandle ReadHandle } @@ -972,6 +985,8 @@ func (o *ObjectHandle) ReadHandle(r ReadHandle) *ObjectHandle { o2 := *o o2.readHandle = r return &o2 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ACL provides access to the object's access control list. @@ -1176,6 +1191,7 @@ func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*Obje }, sOpts...) } +<<<<<<< HEAD // Move changes the name of the object to the destination name. // It can only be used to rename an object within the same bucket. The // bucket must have [HierarchicalNamespace] enabled to use this method. @@ -1208,6 +1224,8 @@ type MoveObjectDestination struct { Conditions *Conditions } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // @@ -2107,6 +2125,7 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e return nil } +<<<<<<< HEAD // applySourceConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. // This is specifically for calls like Rewrite and Move which have a source and destination @@ -2117,15 +2136,25 @@ func applySourceConds(method string, gen int64, conds *Conditions, call interfac if !setSourceGeneration(cval, gen) { return fmt.Errorf("storage: %s: source generation not supported", method) } +======= +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if conds == nil { return nil } +<<<<<<< HEAD if err := conds.validate(method); err != nil { +======= + if err := conds.validate("CopyTo source"); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } switch { case conds.GenerationMatch != 0: +<<<<<<< HEAD if !setIfSourceGenerationMatch(cval, conds.GenerationMatch) { return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method) } @@ -2147,10 +2176,24 @@ func applySourceConds(method string, gen int64, conds *Conditions, call interfac if !setIfSourceMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method) } +======= + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } +<<<<<<< HEAD // applySourceCondsProto validates and attempts to set the conditions on a protobuf // message using protobuf reflection. This is specifically for RPCs which have separate // preconditions for source and destination objects (e.g. Rewrite and Move). @@ -2161,10 +2204,16 @@ func applySourceCondsProto(method string, gen int64, conds *Conditions, msg prot if !setConditionProtoField(rmsg, "source_generation", gen) { return fmt.Errorf("storage: %s: generation not supported", method) } +======= +func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { + if gen >= 0 { + call.SourceGeneration = gen +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if conds == nil { return nil } +<<<<<<< HEAD if err := conds.validate(method); err != nil { return err } @@ -2192,6 +2241,24 @@ func applySourceCondsProto(method string, gen int64, conds *Conditions, msg prot if !setConditionProtoField(rmsg, "if_source_metageneration_not_match", conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method) } +======= + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch = proto.Int64(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -2230,6 +2297,7 @@ func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool { return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value) } +<<<<<<< HEAD // More methods to set source object precondition fields (used by Rewrite and Move APIs). func setSourceGeneration(cval reflect.Value, value interface{}) bool { return setCondition(cval.MethodByName("SourceGeneration"), value) @@ -2251,6 +2319,8 @@ func setIfSourceMetagenerationNotMatch(cval reflect.Value, value interface{}) bo return setCondition(cval.MethodByName("IfSourceMetagenerationNotMatch"), value) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func setCondition(setter reflect.Value, value interface{}) bool { if setter.IsValid() { setter.Call([]reflect.Value{reflect.ValueOf(value)}) @@ -2324,7 +2394,11 @@ type withBackoff struct { } func (wb *withBackoff) apply(config *retryConfig) { +<<<<<<< HEAD config.backoff = &wb.backoff +======= + config.backoff = gaxBackoffFromStruct(&wb.backoff) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // WithMaxAttempts configures the maximum number of times an API call can be made @@ -2415,8 +2489,63 @@ func (wef *withErrorFunc) apply(config *retryConfig) { config.shouldRetry = wef.shouldRetry } +<<<<<<< HEAD type retryConfig struct { backoff *gax.Backoff +======= +type backoff interface { + Pause() time.Duration + + SetInitial(time.Duration) + SetMax(time.Duration) + SetMultiplier(float64) + + GetInitial() time.Duration + GetMax() time.Duration + GetMultiplier() float64 +} + +func gaxBackoffFromStruct(bo *gax.Backoff) *gaxBackoff { + if bo == nil { + return nil + } + b := &gaxBackoff{} + b.Backoff = *bo + return b +} + +// gaxBackoff is a gax.Backoff that implements the backoff interface +type gaxBackoff struct { + gax.Backoff +} + +func (b *gaxBackoff) SetInitial(i time.Duration) { + b.Initial = i +} + +func (b *gaxBackoff) SetMax(m time.Duration) { + b.Max = m +} + +func (b *gaxBackoff) SetMultiplier(m float64) { + b.Multiplier = m +} + +func (b *gaxBackoff) GetInitial() time.Duration { + return b.Initial +} + +func (b *gaxBackoff) GetMax() time.Duration { + return b.Max +} + +func (b *gaxBackoff) GetMultiplier() float64 { + return b.Multiplier +} + +type retryConfig struct { + backoff backoff +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) policy RetryPolicy shouldRetry func(err error) bool maxAttempts *int @@ -2426,6 +2555,7 @@ func (r *retryConfig) clone() *retryConfig { if r == nil { return nil } +<<<<<<< HEAD var bo *gax.Backoff if r.backoff != nil { @@ -2438,10 +2568,27 @@ func (r *retryConfig) clone() *retryConfig { return &retryConfig{ backoff: bo, +======= + newConfig := &retryConfig{ + backoff: nil, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) policy: r.policy, shouldRetry: r.shouldRetry, maxAttempts: r.maxAttempts, } +<<<<<<< HEAD +======= + + if r.backoff != nil { + bo := &gaxBackoff{} + bo.Initial = r.backoff.GetInitial() + bo.Max = r.backoff.GetMax() + bo.Multiplier = r.backoff.GetMultiplier() + newConfig.backoff = bo + } + + return newConfig +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index ae8f6a6392..c0c280693d 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -102,6 +102,7 @@ type Writer struct { // is provided, then gax.DetermineContentType is called to sniff the type. ForceEmptyContentType bool +<<<<<<< HEAD // Append is a parameter to indicate whether the writer should use appendable // object semantics for the new object generation. Appendable objects are // visible on the first Write() call, and can be appended to until they are @@ -111,6 +112,8 @@ type Writer struct { // yet available for general use. Append bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ProgressFunc can be used to monitor the progress of a large write // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see @@ -212,7 +215,10 @@ func (w *Writer) openWriter() (err error) { conds: w.o.conds, encryptionKey: w.o.encryptionKey, sendCRC32C: w.SendCRC32C, +<<<<<<< HEAD append: w.Append, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) donec: w.donec, setError: w.error, progress: w.progress, diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/filippo.io/edwards25519/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md new file mode 100644 index 0000000000..24e2457d87 --- /dev/null +++ b/vendor/filippo.io/edwards25519/README.md @@ -0,0 +1,14 @@ +# filippo.io/edwards25519 + +``` +import "filippo.io/edwards25519" +``` + +This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives. +Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519). + +The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality. + +Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative. + +Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements. diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go new file mode 100644 index 0000000000..ab6aaebc0f --- /dev/null +++ b/vendor/filippo.io/edwards25519/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edwards25519 implements group logic for the twisted Edwards curve +// +// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2 +// +// This is better known as the Edwards curve equivalent to Curve25519, and is +// the curve used by the Ed25519 signature scheme. +// +// Most users don't need this package, and should instead use crypto/ed25519 for +// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or +// github.com/gtank/ristretto255 for prime order group logic. +// +// However, developers who do need to interact with low-level edwards25519 +// operations can use this package, which is an extended version of +// crypto/internal/edwards25519 from the standard library repackaged as +// an importable module. +package edwards25519 diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go new file mode 100644 index 0000000000..a744da2c6d --- /dev/null +++ b/vendor/filippo.io/edwards25519/edwards25519.go @@ -0,0 +1,427 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// Point types. + +type projP1xP1 struct { + X, Y, Z, T field.Element +} + +type projP2 struct { + X, Y, Z field.Element +} + +// Point represents a point on the edwards25519 curve. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is NOT valid, and it may be used only as a receiver. +type Point struct { + // Make the type not comparable (i.e. used with == or as a map key), as + // equivalent points can be represented by different Go values. + _ incomparable + + // The point is internally represented in extended coordinates (X, Y, Z, T) + // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522. + x, y, z, t field.Element +} + +type incomparable [0]func() + +func checkInitialized(points ...*Point) { + for _, p := range points { + if p.x == (field.Element{}) && p.y == (field.Element{}) { + panic("edwards25519: use of uninitialized Point") + } + } +} + +type projCached struct { + YplusX, YminusX, Z, T2d field.Element +} + +type affineCached struct { + YplusX, YminusX, T2d field.Element +} + +// Constructors. + +func (v *projP2) Zero() *projP2 { + v.X.Zero() + v.Y.One() + v.Z.One() + return v +} + +// identity is the point at infinity. +var identity, _ = new(Point).SetBytes([]byte{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + +// NewIdentityPoint returns a new Point set to the identity. +func NewIdentityPoint() *Point { + return new(Point).Set(identity) +} + +// generator is the canonical curve basepoint. See TestGenerator for the +// correspondence of this encoding with the values in RFC 8032. +var generator, _ = new(Point).SetBytes([]byte{ + 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}) + +// NewGeneratorPoint returns a new Point set to the canonical generator. +func NewGeneratorPoint() *Point { + return new(Point).Set(generator) +} + +func (v *projCached) Zero() *projCached { + v.YplusX.One() + v.YminusX.One() + v.Z.One() + v.T2d.Zero() + return v +} + +func (v *affineCached) Zero() *affineCached { + v.YplusX.One() + v.YminusX.One() + v.T2d.Zero() + return v +} + +// Assignments. + +// Set sets v = u, and returns v. +func (v *Point) Set(u *Point) *Point { + *v = *u + return v +} + +// Encoding. + +// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032, +// Section 5.1.2. +func (v *Point) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytes(&buf) +} + +func (v *Point) bytes(buf *[32]byte) []byte { + checkInitialized(v) + + var zInv, x, y field.Element + zInv.Invert(&v.z) // zInv = 1 / Z + x.Multiply(&v.x, &zInv) // x = X / Z + y.Multiply(&v.y, &zInv) // y = Y / Z + + out := copyFieldElement(buf, &y) + out[31] |= byte(x.IsNegative() << 7) + return out +} + +var feOne = new(field.Element).One() + +// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not +// represent a valid point on the curve, SetBytes returns nil and an error and +// the receiver is unchanged. Otherwise, SetBytes returns v. +// +// Note that SetBytes accepts all non-canonical encodings of valid points. +// That is, it follows decoding rules that match most implementations in +// the ecosystem rather than RFC 8032. +func (v *Point) SetBytes(x []byte) (*Point, error) { + // Specifically, the non-canonical encodings that are accepted are + // 1) the ones where the field element is not reduced (see the + // (*field.Element).SetBytes docs) and + // 2) the ones where the x-coordinate is zero and the sign bit is set. + // + // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am, + // specifically the "Canonical A, R" section. + + y, err := new(field.Element).SetBytes(x) + if err != nil { + return nil, errors.New("edwards25519: invalid point encoding length") + } + + // -x² + y² = 1 + dx²y² + // x² + dx²y² = x²(dy² + 1) = y² - 1 + // x² = (y² - 1) / (dy² + 1) + + // u = y² - 1 + y2 := new(field.Element).Square(y) + u := new(field.Element).Subtract(y2, feOne) + + // v = dy² + 1 + vv := new(field.Element).Multiply(y2, d) + vv = vv.Add(vv, feOne) + + // x = +√(u/v) + xx, wasSquare := new(field.Element).SqrtRatio(u, vv) + if wasSquare == 0 { + return nil, errors.New("edwards25519: invalid point encoding") + } + + // Select the negative square root if the sign bit is set. + xxNeg := new(field.Element).Negate(xx) + xx = xx.Select(xxNeg, xx, int(x[31]>>7)) + + v.x.Set(xx) + v.y.Set(y) + v.z.One() + v.t.Multiply(xx, y) // xy = T / Z + + return v, nil +} + +func copyFieldElement(buf *[32]byte, v *field.Element) []byte { + copy(buf[:], v.Bytes()) + return buf[:] +} + +// Conversions. + +func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 { + v.X.Multiply(&p.X, &p.T) + v.Y.Multiply(&p.Y, &p.Z) + v.Z.Multiply(&p.Z, &p.T) + return v +} + +func (v *projP2) FromP3(p *Point) *projP2 { + v.X.Set(&p.x) + v.Y.Set(&p.y) + v.Z.Set(&p.z) + return v +} + +func (v *Point) fromP1xP1(p *projP1xP1) *Point { + v.x.Multiply(&p.X, &p.T) + v.y.Multiply(&p.Y, &p.Z) + v.z.Multiply(&p.Z, &p.T) + v.t.Multiply(&p.X, &p.Y) + return v +} + +func (v *Point) fromP2(p *projP2) *Point { + v.x.Multiply(&p.X, &p.Z) + v.y.Multiply(&p.Y, &p.Z) + v.z.Square(&p.Z) + v.t.Multiply(&p.X, &p.Y) + return v +} + +// d is a constant in the curve equation. +var d, _ = new(field.Element).SetBytes([]byte{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52}) +var d2 = new(field.Element).Add(d, d) + +func (v *projCached) FromP3(p *Point) *projCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.Z.Set(&p.z) + v.T2d.Multiply(&p.t, d2) + return v +} + +func (v *affineCached) FromP3(p *Point) *affineCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.T2d.Multiply(&p.t, d2) + + var invZ field.Element + invZ.Invert(&p.z) + v.YplusX.Multiply(&v.YplusX, &invZ) + v.YminusX.Multiply(&v.YminusX, &invZ) + v.T2d.Multiply(&v.T2d, &invZ) + return v +} + +// (Re)addition and subtraction. + +// Add sets v = p + q, and returns v. +func (v *Point) Add(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Add(p, qCached) + return v.fromP1xP1(result) +} + +// Subtract sets v = p - q, and returns v. +func (v *Point) Subtract(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Sub(p, qCached) + return v.fromP1xP1(result) +} + +func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&ZZ2, &TT2d) + v.T.Subtract(&ZZ2, &TT2d) + return v +} + +func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&ZZ2, &TT2d) // flipped sign + v.T.Add(&ZZ2, &TT2d) // flipped sign + return v +} + +func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&Z2, &TT2d) + v.T.Subtract(&Z2, &TT2d) + return v +} + +func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&Z2, &TT2d) // flipped sign + v.T.Add(&Z2, &TT2d) // flipped sign + return v +} + +// Doubling. + +func (v *projP1xP1) Double(p *projP2) *projP1xP1 { + var XX, YY, ZZ2, XplusYsq field.Element + + XX.Square(&p.X) + YY.Square(&p.Y) + ZZ2.Square(&p.Z) + ZZ2.Add(&ZZ2, &ZZ2) + XplusYsq.Add(&p.X, &p.Y) + XplusYsq.Square(&XplusYsq) + + v.Y.Add(&YY, &XX) + v.Z.Subtract(&YY, &XX) + + v.X.Subtract(&XplusYsq, &v.Y) + v.T.Subtract(&ZZ2, &v.Z) + return v +} + +// Negation. + +// Negate sets v = -p, and returns v. +func (v *Point) Negate(p *Point) *Point { + checkInitialized(p) + v.x.Negate(&p.x) + v.y.Set(&p.y) + v.z.Set(&p.z) + v.t.Negate(&p.t) + return v +} + +// Equal returns 1 if v is equivalent to u, and 0 otherwise. +func (v *Point) Equal(u *Point) int { + checkInitialized(v, u) + + var t1, t2, t3, t4 field.Element + t1.Multiply(&v.x, &u.z) + t2.Multiply(&u.x, &v.z) + t3.Multiply(&v.y, &u.z) + t4.Multiply(&u.y, &v.z) + + return t1.Equal(&t2) & t3.Equal(&t4) +} + +// Constant-time operations + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *projCached) Select(a, b *projCached, cond int) *projCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.Z.Select(&a.Z, &b.Z, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *projCached) CondNeg(cond int) *projCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *affineCached) CondNeg(cond int) *affineCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go new file mode 100644 index 0000000000..d152d68ff4 --- /dev/null +++ b/vendor/filippo.io/edwards25519/extra.go @@ -0,0 +1,349 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This file contains additional functionality that is not included in the +// upstream crypto/internal/edwards25519 package. + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. Don't change the style without making + // sure it doesn't increase the inliner cost. + var e [4]field.Element + X, Y, Z, T = v.extendedCoordinates(&e) + return +} + +func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) { + checkInitialized(v) + X = e[0].Set(&v.x) + Y = e[1].Set(&v.y) + Z = e[2].Set(&v.z) + T = e[3].Set(&v.t) + return +} + +// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +// +// If the coordinates are invalid or don't represent a valid point on the curve, +// SetExtendedCoordinates returns nil and an error and the receiver is +// unchanged. Otherwise, SetExtendedCoordinates returns v. +func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) { + if !isOnCurve(X, Y, Z, T) { + return nil, errors.New("edwards25519: invalid point coordinates") + } + v.x.Set(X) + v.y.Set(Y) + v.z.Set(Z) + v.t.Set(T) + return v, nil +} + +func isOnCurve(X, Y, Z, T *field.Element) bool { + var lhs, rhs field.Element + XX := new(field.Element).Square(X) + YY := new(field.Element).Square(Y) + ZZ := new(field.Element).Square(Z) + TT := new(field.Element).Square(T) + // -x² + y² = 1 + dx²y² + // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)² + // -X² + Y² = Z² + dT² + lhs.Subtract(YY, XX) + rhs.Multiply(d, TT).Add(&rhs, ZZ) + if lhs.Equal(&rhs) != 1 { + return false + } + // xy = T/Z + // XY/Z² = T/Z + // XY = TZ + lhs.Multiply(X, Y) + rhs.Multiply(T, Z) + return lhs.Equal(&rhs) == 1 +} + +// BytesMontgomery converts v to a point on the birationally-equivalent +// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding +// according to RFC 7748. +// +// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode +// to the same value. If v is the identity point, BytesMontgomery returns 32 +// zero bytes, analogously to the X25519 function. +// +// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate: +// while every valid edwards25519 point has a unique u-coordinate Montgomery +// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond +// to any edwards25519 point, and every other X25519 input corresponds to two +// edwards25519 points. +func (v *Point) BytesMontgomery() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytesMontgomery(&buf) +} + +func (v *Point) bytesMontgomery(buf *[32]byte) []byte { + checkInitialized(v) + + // RFC 7748, Section 4.1 provides the bilinear map to calculate the + // Montgomery u-coordinate + // + // u = (1 + y) / (1 - y) + // + // where y = Y / Z. + + var y, recip, u field.Element + + y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z + recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y) + u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r + + return copyFieldElement(buf, &u) +} + +// MultByCofactor sets v = 8 * p, and returns v. +func (v *Point) MultByCofactor(p *Point) *Point { + checkInitialized(p) + result := projP1xP1{} + pp := (&projP2{}).FromP3(p) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + return v.fromP1xP1(&result) +} + +// Given k > 0, set s = s**(2*i). +func (s *Scalar) pow2k(k int) { + for i := 0; i < k; i++ { + s.Multiply(s, s) + } +} + +// Invert sets s to the inverse of a nonzero scalar v, and returns s. +// +// If t is zero, Invert returns zero. +func (s *Scalar) Invert(t *Scalar) *Scalar { + // Uses a hardcoded sliding window of width 4. + var table [8]Scalar + var tt Scalar + tt.Multiply(t, t) + table[0] = *t + for i := 0; i < 7; i++ { + table[i+1].Multiply(&table[i], &tt) + } + // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15] + // so t**k = t[k/2] for odd k + + // To compute the sliding window digits, use the following Sage script: + + // sage: import itertools + // sage: def sliding_window(w,k): + // ....: digits = [] + // ....: while k > 0: + // ....: if k % 2 == 1: + // ....: kmod = k % (2**w) + // ....: digits.append(kmod) + // ....: k = k - kmod + // ....: else: + // ....: digits.append(0) + // ....: k = k // 2 + // ....: return digits + + // Now we can compute s roughly as follows: + + // sage: s = 1 + // sage: for coeff in reversed(sliding_window(4,l-2)): + // ....: s = s*s + // ....: if coeff > 0 : + // ....: s = s*t**coeff + + // This works on one bit at a time, with many runs of zeros. + // The digits can be collapsed into [(count, coeff)] as follows: + + // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))] + + // Entries of the form (k, 0) turn into pow2k(k) + // Entries of the form (1, coeff) turn into a squaring and then a table lookup. + // We can fold the squaring into the previous pow2k(k) as pow2k(k+1). + + *s = table[1/2] + s.pow2k(127 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[5/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(5 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(9 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + + return s +} + +// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends only on the lengths of the two slices, which must match. +func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called MultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Proceed as in the single-base case, but share doublings + // between each point in the multiscalar equation. + + // Build lookup tables for each point + tables := make([]projLookupTable, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute signed radix-16 digits for each scalar + digits := make([][64]int8, len(scalars)) + for i := range digits { + digits[i] = scalars[i].signedRadix16() + } + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][63]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + for i := 62; i >= 0; i-- { + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][i]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + } + return v +} + +// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called VarTimeMultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Generalize double-base NAF computation to arbitrary sizes. + // Here all the points are dynamic, so we only use the smaller + // tables. + + // Build lookup tables for each point + tables := make([]nafLookupTable5, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute a NAF for each scalar + nafs := make([][256]int8, len(scalars)) + for i := range nafs { + nafs[i] = scalars[i].nonAdjacentForm(5) + } + + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + // + // Skip trying to find the first nonzero coefficent, because + // searching might be more work than a few extra doublings. + for i := 255; i >= 0; i-- { + tmp1.Double(tmp2) + + for j := range nafs { + if nafs[j][i] > 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, nafs[j][i]) + tmp1.Add(v, multiple) + } else if nafs[j][i] < 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, -nafs[j][i]) + tmp1.Sub(v, multiple) + } + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go new file mode 100644 index 0000000000..5518ef2b90 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe.go @@ -0,0 +1,420 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package field implements fast arithmetic modulo 2^255-19. +package field + +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "math/bits" +) + +// Element represents an element of the field GF(2^255-19). Note that this +// is not a cryptographically secure group, and should only be used to interact +// with edwards25519.Point coordinates. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is a valid zero element. +type Element struct { + // An element t represents the integer + // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 + // + // Between operations, all limbs are expected to be lower than 2^52. + l0 uint64 + l1 uint64 + l2 uint64 + l3 uint64 + l4 uint64 +} + +const maskLow51Bits uint64 = (1 << 51) - 1 + +var feZero = &Element{0, 0, 0, 0, 0} + +// Zero sets v = 0, and returns v. +func (v *Element) Zero() *Element { + *v = *feZero + return v +} + +var feOne = &Element{1, 0, 0, 0, 0} + +// One sets v = 1, and returns v. +func (v *Element) One() *Element { + *v = *feOne + return v +} + +// reduce reduces v modulo 2^255 - 19 and returns it. +func (v *Element) reduce() *Element { + v.carryPropagate() + + // After the light reduction we now have a field element representation + // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. + + // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, + // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. + c := (v.l0 + 19) >> 51 + c = (v.l1 + c) >> 51 + c = (v.l2 + c) >> 51 + c = (v.l3 + c) >> 51 + c = (v.l4 + c) >> 51 + + // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's + // effectively applying the reduction identity to the carry. + v.l0 += 19 * c + + v.l1 += v.l0 >> 51 + v.l0 = v.l0 & maskLow51Bits + v.l2 += v.l1 >> 51 + v.l1 = v.l1 & maskLow51Bits + v.l3 += v.l2 >> 51 + v.l2 = v.l2 & maskLow51Bits + v.l4 += v.l3 >> 51 + v.l3 = v.l3 & maskLow51Bits + // no additional carry + v.l4 = v.l4 & maskLow51Bits + + return v +} + +// Add sets v = a + b, and returns v. +func (v *Element) Add(a, b *Element) *Element { + v.l0 = a.l0 + b.l0 + v.l1 = a.l1 + b.l1 + v.l2 = a.l2 + b.l2 + v.l3 = a.l3 + b.l3 + v.l4 = a.l4 + b.l4 + // Using the generic implementation here is actually faster than the + // assembly. Probably because the body of this function is so simple that + // the compiler can figure out better optimizations by inlining the carry + // propagation. + return v.carryPropagateGeneric() +} + +// Subtract sets v = a - b, and returns v. +func (v *Element) Subtract(a, b *Element) *Element { + // We first add 2 * p, to guarantee the subtraction won't underflow, and + // then subtract b (which can be up to 2^255 + 2^13 * 19). + v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 + v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 + v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 + v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 + v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 + return v.carryPropagate() +} + +// Negate sets v = -a, and returns v. +func (v *Element) Negate(a *Element) *Element { + return v.Subtract(feZero, a) +} + +// Invert sets v = 1/z mod p, and returns v. +// +// If z == 0, Invert returns v = 0. +func (v *Element) Invert(z *Element) *Element { + // Inversion is implemented as exponentiation with exponent p − 2. It uses the + // same sequence of 255 squarings and 11 multiplications as [Curve25519]. + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element + + z2.Square(z) // 2 + t.Square(&z2) // 4 + t.Square(&t) // 8 + z9.Multiply(&t, z) // 9 + z11.Multiply(&z9, &z2) // 11 + t.Square(&z11) // 22 + z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 + + t.Square(&z2_5_0) // 2^6 - 2^1 + for i := 0; i < 4; i++ { + t.Square(&t) // 2^10 - 2^5 + } + z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 + + t.Square(&z2_10_0) // 2^11 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^20 - 2^10 + } + z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 + + t.Square(&z2_20_0) // 2^21 - 2^1 + for i := 0; i < 19; i++ { + t.Square(&t) // 2^40 - 2^20 + } + t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 + + t.Square(&t) // 2^41 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^50 - 2^10 + } + z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 + + t.Square(&z2_50_0) // 2^51 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^100 - 2^50 + } + z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 + + t.Square(&z2_100_0) // 2^101 - 2^1 + for i := 0; i < 99; i++ { + t.Square(&t) // 2^200 - 2^100 + } + t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 + + t.Square(&t) // 2^201 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^250 - 2^50 + } + t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 + + t.Square(&t) // 2^251 - 2^1 + t.Square(&t) // 2^252 - 2^2 + t.Square(&t) // 2^253 - 2^3 + t.Square(&t) // 2^254 - 2^4 + t.Square(&t) // 2^255 - 2^5 + + return v.Multiply(&t, &z11) // 2^255 - 21 +} + +// Set sets v = a, and returns v. +func (v *Element) Set(a *Element) *Element { + *v = *a + return v +} + +// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is +// not of the right length, SetBytes returns nil and an error, and the +// receiver is unchanged. +// +// Consistent with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. Note that this is laxer than specified by RFC 8032, but +// consistent with most Ed25519 implementations. +func (v *Element) SetBytes(x []byte) (*Element, error) { + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid field element input size") + } + + // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). + v.l0 = binary.LittleEndian.Uint64(x[0:8]) + v.l0 &= maskLow51Bits + // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). + v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 + v.l1 &= maskLow51Bits + // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). + v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 + v.l2 &= maskLow51Bits + // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). + v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 + v.l3 &= maskLow51Bits + // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51). + // Note: not bytes 25:33, shift 4, to avoid overread. + v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 + v.l4 &= maskLow51Bits + + return v, nil +} + +// Bytes returns the canonical 32-byte little-endian encoding of v. +func (v *Element) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var out [32]byte + return v.bytes(&out) +} + +func (v *Element) bytes(out *[32]byte) []byte { + t := *v + t.reduce() + + var buf [8]byte + for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], l<= len(out) { + break + } + out[off] |= bb + } + } + + return out[:] +} + +// Equal returns 1 if v and u are equal, and 0 otherwise. +func (v *Element) Equal(u *Element) int { + sa, sv := u.Bytes(), v.Bytes() + return subtle.ConstantTimeCompare(sa, sv) +} + +// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. +func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } + +// Select sets v to a if cond == 1, and to b if cond == 0. +func (v *Element) Select(a, b *Element, cond int) *Element { + m := mask64Bits(cond) + v.l0 = (m & a.l0) | (^m & b.l0) + v.l1 = (m & a.l1) | (^m & b.l1) + v.l2 = (m & a.l2) | (^m & b.l2) + v.l3 = (m & a.l3) | (^m & b.l3) + v.l4 = (m & a.l4) | (^m & b.l4) + return v +} + +// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. +func (v *Element) Swap(u *Element, cond int) { + m := mask64Bits(cond) + t := m & (v.l0 ^ u.l0) + v.l0 ^= t + u.l0 ^= t + t = m & (v.l1 ^ u.l1) + v.l1 ^= t + u.l1 ^= t + t = m & (v.l2 ^ u.l2) + v.l2 ^= t + u.l2 ^= t + t = m & (v.l3 ^ u.l3) + v.l3 ^= t + u.l3 ^= t + t = m & (v.l4 ^ u.l4) + v.l4 ^= t + u.l4 ^= t +} + +// IsNegative returns 1 if v is negative, and 0 otherwise. +func (v *Element) IsNegative() int { + return int(v.Bytes()[0] & 1) +} + +// Absolute sets v to |u|, and returns v. +func (v *Element) Absolute(u *Element) *Element { + return v.Select(new(Element).Negate(u), u, u.IsNegative()) +} + +// Multiply sets v = x * y, and returns v. +func (v *Element) Multiply(x, y *Element) *Element { + feMul(v, x, y) + return v +} + +// Square sets v = x * x, and returns v. +func (v *Element) Square(x *Element) *Element { + feSquare(v, x) + return v +} + +// Mult32 sets v = x * y, and returns v. +func (v *Element) Mult32(x *Element, y uint32) *Element { + x0lo, x0hi := mul51(x.l0, y) + x1lo, x1hi := mul51(x.l1, y) + x2lo, x2hi := mul51(x.l2, y) + x3lo, x3hi := mul51(x.l3, y) + x4lo, x4hi := mul51(x.l4, y) + v.l0 = x0lo + 19*x4hi // carried over per the reduction identity + v.l1 = x1lo + x0hi + v.l2 = x2lo + x1hi + v.l3 = x3lo + x2hi + v.l4 = x4lo + x3hi + // The hi portions are going to be only 32 bits, plus any previous excess, + // so we can skip the carry propagation. + return v +} + +// mul51 returns lo + hi * 2⁵¹ = a * b. +func mul51(a uint64, b uint32) (lo uint64, hi uint64) { + mh, ml := bits.Mul64(a, uint64(b)) + lo = ml & maskLow51Bits + hi = (mh << 13) | (ml >> 51) + return +} + +// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. +func (v *Element) Pow22523(x *Element) *Element { + var t0, t1, t2 Element + + t0.Square(x) // x^2 + t1.Square(&t0) // x^4 + t1.Square(&t1) // x^8 + t1.Multiply(x, &t1) // x^9 + t0.Multiply(&t0, &t1) // x^11 + t0.Square(&t0) // x^22 + t0.Multiply(&t1, &t0) // x^31 + t1.Square(&t0) // x^62 + for i := 1; i < 5; i++ { // x^992 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 + t1.Square(&t0) // 2^11 - 2 + for i := 1; i < 10; i++ { // 2^20 - 2^10 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^20 - 1 + t2.Square(&t1) // 2^21 - 2 + for i := 1; i < 20; i++ { // 2^40 - 2^20 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^40 - 1 + t1.Square(&t1) // 2^41 - 2 + for i := 1; i < 10; i++ { // 2^50 - 2^10 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^50 - 1 + t1.Square(&t0) // 2^51 - 2 + for i := 1; i < 50; i++ { // 2^100 - 2^50 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^100 - 1 + t2.Square(&t1) // 2^101 - 2 + for i := 1; i < 100; i++ { // 2^200 - 2^100 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^200 - 1 + t1.Square(&t1) // 2^201 - 2 + for i := 1; i < 50; i++ { // 2^250 - 2^50 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^250 - 1 + t0.Square(&t0) // 2^251 - 2 + t0.Square(&t0) // 2^252 - 4 + return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) +} + +// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. +var sqrtM1 = &Element{1718705420411056, 234908883556509, + 2233514472574048, 2117202627021982, 765476049583133} + +// SqrtRatio sets r to the non-negative square root of the ratio of u and v. +// +// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio +// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, +// and returns r and 0. +func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) { + t0 := new(Element) + + // r = (u * v3) * (u * v7)^((p-5)/8) + v2 := new(Element).Square(v) + uv3 := new(Element).Multiply(u, t0.Multiply(v2, v)) + uv7 := new(Element).Multiply(uv3, t0.Square(v2)) + rr := new(Element).Multiply(uv3, t0.Pow22523(uv7)) + + check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2 + + uNeg := new(Element).Negate(u) + correctSignSqrt := check.Equal(u) + flippedSignSqrt := check.Equal(uNeg) + flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1)) + + rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r + // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) + rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI) + + r.Absolute(rr) // Choose the nonnegative square root. + return r, correctSignSqrt | flippedSignSqrt +} diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go new file mode 100644 index 0000000000..edcf163c4e --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go @@ -0,0 +1,16 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package field + +// feMul sets out = a * b. It works like feMulGeneric. +// +//go:noescape +func feMul(out *Element, a *Element, b *Element) + +// feSquare sets out = a * a. It works like feSquareGeneric. +// +//go:noescape +func feSquare(out *Element, a *Element) diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s new file mode 100644 index 0000000000..293f013c94 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s @@ -0,0 +1,379 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +// func feMul(out *Element, a *Element, b *Element) +TEXT ·feMul(SB), NOSPLIT, $0-24 + MOVQ a+8(FP), CX + MOVQ b+16(FP), BX + + // r0 = a0×b0 + MOVQ (CX), AX + MULQ (BX) + MOVQ AX, DI + MOVQ DX, SI + + // r0 += 19×a1×b4 + MOVQ 8(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a2×b3 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a3×b2 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a4×b1 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 8(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r1 = a0×b1 + MOVQ (CX), AX + MULQ 8(BX) + MOVQ AX, R9 + MOVQ DX, R8 + + // r1 += a1×b0 + MOVQ 8(CX), AX + MULQ (BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a2×b4 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a3×b3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a4×b2 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r2 = a0×b2 + MOVQ (CX), AX + MULQ 16(BX) + MOVQ AX, R11 + MOVQ DX, R10 + + // r2 += a1×b1 + MOVQ 8(CX), AX + MULQ 8(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += a2×b0 + MOVQ 16(CX), AX + MULQ (BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a3×b4 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a4×b3 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r3 = a0×b3 + MOVQ (CX), AX + MULQ 24(BX) + MOVQ AX, R13 + MOVQ DX, R12 + + // r3 += a1×b2 + MOVQ 8(CX), AX + MULQ 16(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a2×b1 + MOVQ 16(CX), AX + MULQ 8(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a3×b0 + MOVQ 24(CX), AX + MULQ (BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += 19×a4×b4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r4 = a0×b4 + MOVQ (CX), AX + MULQ 32(BX) + MOVQ AX, R15 + MOVQ DX, R14 + + // r4 += a1×b3 + MOVQ 8(CX), AX + MULQ 24(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a2×b2 + MOVQ 16(CX), AX + MULQ 16(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a3×b1 + MOVQ 24(CX), AX + MULQ 8(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a4×b0 + MOVQ 32(CX), AX + MULQ (BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, DI, SI + SHLQ $0x0d, R9, R8 + SHLQ $0x0d, R11, R10 + SHLQ $0x0d, R13, R12 + SHLQ $0x0d, R15, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Second reduction chain (carryPropagate) + MOVQ DI, SI + SHRQ $0x33, SI + MOVQ R9, R8 + SHRQ $0x33, R8 + MOVQ R11, R10 + SHRQ $0x33, R10 + MOVQ R13, R12 + SHRQ $0x33, R12 + MOVQ R15, R14 + SHRQ $0x33, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Store output + MOVQ out+0(FP), AX + MOVQ DI, (AX) + MOVQ R9, 8(AX) + MOVQ R11, 16(AX) + MOVQ R13, 24(AX) + MOVQ R15, 32(AX) + RET + +// func feSquare(out *Element, a *Element) +TEXT ·feSquare(SB), NOSPLIT, $0-16 + MOVQ a+8(FP), CX + + // r0 = l0×l0 + MOVQ (CX), AX + MULQ (CX) + MOVQ AX, SI + MOVQ DX, BX + + // r0 += 38×l1×l4 + MOVQ 8(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r0 += 38×l2×l3 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 24(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r1 = 2×l0×l1 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 8(CX) + MOVQ AX, R8 + MOVQ DX, DI + + // r1 += 38×l2×l4 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r1 += 19×l3×l3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r2 = 2×l0×l2 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 16(CX) + MOVQ AX, R10 + MOVQ DX, R9 + + // r2 += l1×l1 + MOVQ 8(CX), AX + MULQ 8(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r2 += 38×l3×l4 + MOVQ 24(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r3 = 2×l0×l3 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 24(CX) + MOVQ AX, R12 + MOVQ DX, R11 + + // r3 += 2×l1×l2 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 16(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r3 += 19×l4×l4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r4 = 2×l0×l4 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 32(CX) + MOVQ AX, R14 + MOVQ DX, R13 + + // r4 += 2×l1×l3 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 24(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // r4 += l2×l2 + MOVQ 16(CX), AX + MULQ 16(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, SI, BX + SHLQ $0x0d, R8, DI + SHLQ $0x0d, R10, R9 + SHLQ $0x0d, R12, R11 + SHLQ $0x0d, R14, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Second reduction chain (carryPropagate) + MOVQ SI, BX + SHRQ $0x33, BX + MOVQ R8, DI + SHRQ $0x33, DI + MOVQ R10, R9 + SHRQ $0x33, R9 + MOVQ R12, R11 + SHRQ $0x33, R11 + MOVQ R14, R13 + SHRQ $0x33, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Store output + MOVQ out+0(FP), AX + MOVQ SI, (AX) + MOVQ R8, 8(AX) + MOVQ R10, 16(AX) + MOVQ R12, 24(AX) + MOVQ R14, 32(AX) + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go new file mode 100644 index 0000000000..ddb6c9b8f7 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package field + +func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } + +func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go new file mode 100644 index 0000000000..af459ef515 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +package field + +//go:noescape +func carryPropagate(v *Element) + +func (v *Element) carryPropagate() *Element { + carryPropagate(v) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s new file mode 100644 index 0000000000..3126a43419 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego + +#include "textflag.h" + +// carryPropagate works exactly like carryPropagateGeneric and uses the +// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but +// avoids loading R0-R4 twice and uses LDP and STP. +// +// See https://golang.org/issues/43145 for the main compiler issue. +// +// func carryPropagate(v *Element) +TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 + MOVD v+0(FP), R20 + + LDP 0(R20), (R0, R1) + LDP 16(R20), (R2, R3) + MOVD 32(R20), R4 + + AND $0x7ffffffffffff, R0, R10 + AND $0x7ffffffffffff, R1, R11 + AND $0x7ffffffffffff, R2, R12 + AND $0x7ffffffffffff, R3, R13 + AND $0x7ffffffffffff, R4, R14 + + ADD R0>>51, R11, R11 + ADD R1>>51, R12, R12 + ADD R2>>51, R13, R13 + ADD R3>>51, R14, R14 + // R4>>51 * 19 + R10 -> R10 + LSR $51, R4, R21 + MOVD $19, R22 + MADD R22, R10, R21, R10 + + STP (R10, R11), 0(R20) + STP (R12, R13), 16(R20) + MOVD R14, 32(R20) + + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go new file mode 100644 index 0000000000..234a5b2e5d --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 || !gc || purego +// +build !arm64 !gc purego + +package field + +func (v *Element) carryPropagate() *Element { + return v.carryPropagateGeneric() +} diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go new file mode 100644 index 0000000000..1ef503b9a2 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_extra.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "errors" + +// This file contains additional functionality that is not included in the +// upstream crypto/ed25519/edwards25519/field package. + +// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which +// is reduced modulo the field order. If x is not of the right length, +// SetWideBytes returns nil and an error, and the receiver is unchanged. +// +// SetWideBytes is not necessary to select a uniformly distributed value, and is +// only provided for compatibility: SetBytes can be used instead as the chance +// of bias is less than 2⁻²⁵⁰. +func (v *Element) SetWideBytes(x []byte) (*Element, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetWideBytes input size") + } + + // Split the 64 bytes into two elements, and extract the most significant + // bit of each, which is ignored by SetBytes. + lo, _ := new(Element).SetBytes(x[:32]) + loMSB := uint64(x[31] >> 7) + hi, _ := new(Element).SetBytes(x[32:]) + hiMSB := uint64(x[63] >> 7) + + // The output we want is + // + // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹ + // + // which applying the reduction identity comes out to + // + // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19² + // + // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value + // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value + // (hiMSB * 2 * 19²), so it fits in a uint64. + + v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19 + v.l1 = lo.l1 + hi.l1*2*19 + v.l2 = lo.l2 + hi.l2*2*19 + v.l3 = lo.l3 + hi.l3*2*19 + v.l4 = lo.l4 + hi.l4*2*19 + + return v.carryPropagate(), nil +} diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go new file mode 100644 index 0000000000..86f5fd9553 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_generic.go @@ -0,0 +1,266 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "math/bits" + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +// mul64 returns a * b. +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +// addMul64 returns v + a * b. +func addMul64(v uint128, a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + lo, c := bits.Add64(lo, v.lo, 0) + hi, _ = bits.Add64(hi, v.hi, c) + return uint128{lo, hi} +} + +// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. +func shiftRightBy51(a uint128) uint64 { + return (a.hi << (64 - 51)) | (a.lo >> 51) +} + +func feMulGeneric(v, a, b *Element) { + a0 := a.l0 + a1 := a.l1 + a2 := a.l2 + a3 := a.l3 + a4 := a.l4 + + b0 := b.l0 + b1 := b.l1 + b2 := b.l2 + b3 := b.l3 + b4 := b.l4 + + // Limb multiplication works like pen-and-paper columnar multiplication, but + // with 51-bit limbs instead of digits. + // + // a4 a3 a2 a1 a0 x + // b4 b3 b2 b1 b0 = + // ------------------------ + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a4b1 a3b1 a2b1 a1b1 a0b1 + + // a4b2 a3b2 a2b2 a1b2 a0b2 + + // a4b3 a3b3 a2b3 a1b3 a0b3 + + // a4b4 a3b4 a2b4 a1b4 a0b4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to + // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, + // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. + // + // Reduction can be carried out simultaneously to multiplication. For + // example, we do not compute r5: whenever the result of a multiplication + // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. + // + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a3b1 a2b1 a1b1 a0b1 19×a4b1 + + // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + + // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + + // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // Finally we add up the columns into wide, overlapping limbs. + + a1_19 := a1 * 19 + a2_19 := a2 * 19 + a3_19 := a3 * 19 + a4_19 := a4 * 19 + + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + r0 := mul64(a0, b0) + r0 = addMul64(r0, a1_19, b4) + r0 = addMul64(r0, a2_19, b3) + r0 = addMul64(r0, a3_19, b2) + r0 = addMul64(r0, a4_19, b1) + + // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) + r1 := mul64(a0, b1) + r1 = addMul64(r1, a1, b0) + r1 = addMul64(r1, a2_19, b4) + r1 = addMul64(r1, a3_19, b3) + r1 = addMul64(r1, a4_19, b2) + + // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) + r2 := mul64(a0, b2) + r2 = addMul64(r2, a1, b1) + r2 = addMul64(r2, a2, b0) + r2 = addMul64(r2, a3_19, b4) + r2 = addMul64(r2, a4_19, b3) + + // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 + r3 := mul64(a0, b3) + r3 = addMul64(r3, a1, b2) + r3 = addMul64(r3, a2, b1) + r3 = addMul64(r3, a3, b0) + r3 = addMul64(r3, a4_19, b4) + + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + r4 := mul64(a0, b4) + r4 = addMul64(r4, a1, b3) + r4 = addMul64(r4, a2, b2) + r4 = addMul64(r4, a3, b1) + r4 = addMul64(r4, a4, b0) + + // After the multiplication, we need to reduce (carry) the five coefficients + // to obtain a result with limbs that are at most slightly larger than 2⁵¹, + // to respect the Element invariant. + // + // Overall, the reduction works the same as carryPropagate, except with + // wider inputs: we take the carry for each coefficient by shifting it right + // by 51, and add it to the limb above it. The top carry is multiplied by 19 + // according to the reduction identity and added to the lowest limb. + // + // The largest coefficient (r0) will be at most 111 bits, which guarantees + // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. + // + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) + // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² + // r0 < 2⁷ × 2⁵² × 2⁵² + // r0 < 2¹¹¹ + // + // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most + // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and + // allows us to easily apply the reduction identity. + // + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + // r4 < 5 × 2⁵² × 2⁵² + // r4 < 2¹⁰⁷ + // + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + // Now all coefficients fit into 64-bit registers but are still too large to + // be passed around as an Element. We therefore do one last carry chain, + // where the carries will be small enough to fit in the wiggle room above 2⁵¹. + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +func feSquareGeneric(v, a *Element) { + l0 := a.l0 + l1 := a.l1 + l2 := a.l2 + l3 := a.l3 + l4 := a.l4 + + // Squaring works precisely like multiplication above, but thanks to its + // symmetry we get to group a few terms together. + // + // l4 l3 l2 l1 l0 x + // l4 l3 l2 l1 l0 = + // ------------------------ + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l4l1 l3l1 l2l1 l1l1 l0l1 + + // l4l2 l3l2 l2l2 l1l2 l0l2 + + // l4l3 l3l3 l2l3 l1l3 l0l3 + + // l4l4 l3l4 l2l4 l1l4 l0l4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l3l1 l2l1 l1l1 l0l1 19×l4l1 + + // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + + // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + + // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with + // only three Mul64 and four Add64, instead of five and eight. + + l0_2 := l0 * 2 + l1_2 := l1 * 2 + + l1_38 := l1 * 38 + l2_38 := l2 * 38 + l3_38 := l3 * 38 + + l3_19 := l3 * 19 + l4_19 := l4 * 19 + + // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) + r0 := mul64(l0, l0) + r0 = addMul64(r0, l1_38, l4) + r0 = addMul64(r0, l2_38, l3) + + // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 + r1 := mul64(l0_2, l1) + r1 = addMul64(r1, l2_38, l4) + r1 = addMul64(r1, l3_19, l3) + + // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 + r2 := mul64(l0_2, l2) + r2 = addMul64(r2, l1, l1) + r2 = addMul64(r2, l3_38, l4) + + // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 + r3 := mul64(l0_2, l3) + r3 = addMul64(r3, l1_2, l2) + r3 = addMul64(r3, l4_19, l4) + + // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 + r4 := mul64(l0_2, l4) + r4 = addMul64(r4, l1_2, l3) + r4 = addMul64(r4, l2, l2) + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction +// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. +func (v *Element) carryPropagateGeneric() *Element { + c0 := v.l0 >> 51 + c1 := v.l1 >> 51 + c2 := v.l2 >> 51 + c3 := v.l3 >> 51 + c4 := v.l4 >> 51 + + // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and + // the final l0 will be at most 52 bits. Similarly for the rest. + v.l0 = v.l0&maskLow51Bits + c4*19 + v.l1 = v.l1&maskLow51Bits + c0 + v.l2 = v.l2&maskLow51Bits + c1 + v.l3 = v.l3&maskLow51Bits + c2 + v.l4 = v.l4&maskLow51Bits + c3 + + return v +} diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go new file mode 100644 index 0000000000..3fd1653877 --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalar.go @@ -0,0 +1,343 @@ +// Copyright (c) 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "encoding/binary" + "errors" +) + +// A Scalar is an integer modulo +// +// l = 2^252 + 27742317777372353535851937790883648493 +// +// which is the prime order of the edwards25519 group. +// +// This type works similarly to math/big.Int, and all arguments and +// receivers are allowed to alias. +// +// The zero value is a valid zero element. +type Scalar struct { + // s is the scalar in the Montgomery domain, in the format of the + // fiat-crypto implementation. + s fiatScalarMontgomeryDomainFieldElement +} + +// The field implementation in scalar_fiat.go is generated by the fiat-crypto +// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc) +// from a formally verified model. +// +// fiat-crypto code comes under the following license. +// +// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, +// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + +// NewScalar returns a new zero Scalar. +func NewScalar() *Scalar { + return &Scalar{} +} + +// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to +// using Multiply and then Add. +func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar { + // Make a copy of z in case it aliases s. + zCopy := new(Scalar).Set(z) + return s.Multiply(x, y).Add(s, zCopy) +} + +// Add sets s = x + y mod l, and returns s. +func (s *Scalar) Add(x, y *Scalar) *Scalar { + // s = 1 * x + y mod l + fiatScalarAdd(&s.s, &x.s, &y.s) + return s +} + +// Subtract sets s = x - y mod l, and returns s. +func (s *Scalar) Subtract(x, y *Scalar) *Scalar { + // s = -1 * y + x mod l + fiatScalarSub(&s.s, &x.s, &y.s) + return s +} + +// Negate sets s = -x mod l, and returns s. +func (s *Scalar) Negate(x *Scalar) *Scalar { + // s = -1 * x + 0 mod l + fiatScalarOpp(&s.s, &x.s) + return s +} + +// Multiply sets s = x * y mod l, and returns s. +func (s *Scalar) Multiply(x, y *Scalar) *Scalar { + // s = x * y + 0 mod l + fiatScalarMul(&s.s, &x.s, &y.s) + return s +} + +// Set sets s = x, and returns s. +func (s *Scalar) Set(x *Scalar) *Scalar { + *s = *x + return s +} + +// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer. +// If x is not of the right length, SetUniformBytes returns nil and an error, +// and the receiver is unchanged. +// +// SetUniformBytes can be used to set s to a uniformly distributed value given +// 64 uniformly distributed random bytes. +func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetUniformBytes input length") + } + + // We have a value x of 512 bits, but our fiatScalarFromBytes function + // expects an input lower than l, which is a little over 252 bits. + // + // Instead of writing a reduction function that operates on wider inputs, we + // can interpret x as the sum of three shorter values a, b, and c. + // + // x = a + b * 2^168 + c * 2^336 mod l + // + // We then precompute 2^168 and 2^336 modulo l, and perform the reduction + // with two multiplications and two additions. + + s.setShortBytes(x[:21]) + t := new(Scalar).setShortBytes(x[21:42]) + s.Add(s, t.Multiply(t, scalarTwo168)) + t.setShortBytes(x[42:]) + s.Add(s, t.Multiply(t, scalarTwo336)) + + return s, nil +} + +// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a +// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value +// in the 2^256 Montgomery domain. +var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7, + 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}} +var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b, + 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}} + +// setShortBytes sets s = x mod l, where x is a little-endian integer shorter +// than 32 bytes. +func (s *Scalar) setShortBytes(x []byte) *Scalar { + if len(x) >= 32 { + panic("edwards25519: internal error: setShortBytes called with a long string") + } + var buf [32]byte + copy(buf[:], x) + fiatScalarFromBytes((*[4]uint64)(&s.s), &buf) + fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s)) + return s +} + +// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of +// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes +// returns nil and an error, and the receiver is unchanged. +func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) { + if len(x) != 32 { + return nil, errors.New("invalid scalar length") + } + if !isReduced(x) { + return nil, errors.New("invalid scalar encoding") + } + + fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x)) + fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s)) + + return s, nil +} + +// scalarMinusOneBytes is l - 1 in little endian. +var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16} + +// isReduced returns whether the given scalar in 32-byte little endian encoded +// form is reduced modulo l. +func isReduced(s []byte) bool { + if len(s) != 32 { + return false + } + + for i := len(s) - 1; i >= 0; i-- { + switch { + case s[i] > scalarMinusOneBytes[i]: + return false + case s[i] < scalarMinusOneBytes[i]: + return true + } + } + return true +} + +// SetBytesWithClamping applies the buffer pruning described in RFC 8032, +// Section 5.1.5 (also known as clamping) and sets s to the result. The input +// must be 32 bytes, and it is not modified. If x is not of the right length, +// SetBytesWithClamping returns nil and an error, and the receiver is unchanged. +// +// Note that since Scalar values are always reduced modulo the prime order of +// the curve, the resulting value will not preserve any of the cofactor-clearing +// properties that clamping is meant to provide. It will however work as +// expected as long as it is applied to points on the prime order subgroup, like +// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the +// irrelevant RFC 7748 clamping, but it is now required for compatibility. +func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) { + // The description above omits the purpose of the high bits of the clamping + // for brevity, but those are also lost to reductions, and are also + // irrelevant to edwards25519 as they protect against a specific + // implementation bug that was once observed in a generic Montgomery ladder. + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length") + } + + // We need to use the wide reduction from SetUniformBytes, since clamping + // sets the 2^254 bit, making the value higher than the order. + var wideBytes [64]byte + copy(wideBytes[:], x[:]) + wideBytes[0] &= 248 + wideBytes[31] &= 63 + wideBytes[31] |= 64 + return s.SetUniformBytes(wideBytes[:]) +} + +// Bytes returns the canonical 32-byte little-endian encoding of s. +func (s *Scalar) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var encoded [32]byte + return s.bytes(&encoded) +} + +func (s *Scalar) bytes(out *[32]byte) []byte { + var ss fiatScalarNonMontgomeryDomainFieldElement + fiatScalarFromMontgomery(&ss, &s.s) + fiatScalarToBytes(out, (*[4]uint64)(&ss)) + return out[:] +} + +// Equal returns 1 if s and t are equal, and 0 otherwise. +func (s *Scalar) Equal(t *Scalar) int { + var diff fiatScalarMontgomeryDomainFieldElement + fiatScalarSub(&diff, &s.s, &t.s) + var nonzero uint64 + fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff)) + nonzero |= nonzero >> 32 + nonzero |= nonzero >> 16 + nonzero |= nonzero >> 8 + nonzero |= nonzero >> 4 + nonzero |= nonzero >> 2 + nonzero |= nonzero >> 1 + return int(^nonzero) & 1 +} + +// nonAdjacentForm computes a width-w non-adjacent form for this scalar. +// +// w must be between 2 and 8, or nonAdjacentForm will panic. +func (s *Scalar) nonAdjacentForm(w uint) [256]int8 { + // This implementation is adapted from the one + // in curve25519-dalek and is documented there: + // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871 + b := s.Bytes() + if b[31] > 127 { + panic("scalar has high bit set illegally") + } + if w < 2 { + panic("w must be at least 2 by the definition of NAF") + } else if w > 8 { + panic("NAF digits must fit in int8") + } + + var naf [256]int8 + var digits [5]uint64 + + for i := 0; i < 4; i++ { + digits[i] = binary.LittleEndian.Uint64(b[i*8:]) + } + + width := uint64(1 << w) + windowMask := uint64(width - 1) + + pos := uint(0) + carry := uint64(0) + for pos < 256 { + indexU64 := pos / 64 + indexBit := pos % 64 + var bitBuf uint64 + if indexBit < 64-w { + // This window's bits are contained in a single u64 + bitBuf = digits[indexU64] >> indexBit + } else { + // Combine the current 64 bits with bits from the next 64 + bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit)) + } + + // Add carry into the current window + window := carry + (bitBuf & windowMask) + + if window&1 == 0 { + // If the window value is even, preserve the carry and continue. + // Why is the carry preserved? + // If carry == 0 and window & 1 == 0, + // then the next carry should be 0 + // If carry == 1 and window & 1 == 0, + // then bit_buf & 1 == 1 so the next carry should be 1 + pos += 1 + continue + } + + if window < width/2 { + carry = 0 + naf[pos] = int8(window) + } else { + carry = 1 + naf[pos] = int8(window) - int8(width) + } + + pos += w + } + return naf +} + +func (s *Scalar) signedRadix16() [64]int8 { + b := s.Bytes() + if b[31] > 127 { + panic("scalar has high bit set illegally") + } + + var digits [64]int8 + + // Compute unsigned radix-16 digits: + for i := 0; i < 32; i++ { + digits[2*i] = int8(b[i] & 15) + digits[2*i+1] = int8((b[i] >> 4) & 15) + } + + // Recenter coefficients: + for i := 0; i < 63; i++ { + carry := (digits[i] + 8) >> 4 + digits[i] -= carry << 4 + digits[i+1] += carry + } + + return digits +} diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go new file mode 100644 index 0000000000..2e5782b605 --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalar_fiat.go @@ -0,0 +1,1147 @@ +// Code generated by Fiat Cryptography. DO NOT EDIT. +// +// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes +// +// curve description: Scalar +// +// machine_wordsize = 64 (from "64") +// +// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes +// +// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493") +// +// +// +// NOTE: In addition to the bounds specified above each function, all +// +// functions synthesized for this Montgomery arithmetic require the +// +// input to be strictly less than the prime modulus (m), and also +// +// require the input to be in the unique saturated representation. +// +// All functions also ensure that these two properties are true of +// +// return values. +// +// +// +// Computed values: +// +// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) +// +// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) +// +// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in +// +// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256 + +package edwards25519 + +import "math/bits" + +type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927 +type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927 + +// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain. +// +// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] +type fiatScalarMontgomeryDomainFieldElement [4]uint64 + +// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain. +// +// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] +type fiatScalarNonMontgomeryDomainFieldElement [4]uint64 + +// fiatScalarCmovznzU64 is a single-word conditional move. +// +// Postconditions: +// +// out1 = (if arg1 = 0 then arg2 else arg3) +// +// Input Bounds: +// +// arg1: [0x0 ~> 0x1] +// arg2: [0x0 ~> 0xffffffffffffffff] +// arg3: [0x0 ~> 0xffffffffffffffff] +// +// Output Bounds: +// +// out1: [0x0 ~> 0xffffffffffffffff] +func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) { + x1 := (uint64(arg1) * 0xffffffffffffffff) + x2 := ((x1 & arg3) | ((^x1) & arg2)) + *out1 = x2 +} + +// fiatScalarMul multiplies two field elements in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// 0 ≤ eval arg2 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m +// 0 ≤ eval out1 < m +func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { + x1 := arg1[1] + x2 := arg1[2] + x3 := arg1[3] + x4 := arg1[0] + var x5 uint64 + var x6 uint64 + x6, x5 = bits.Mul64(x4, arg2[3]) + var x7 uint64 + var x8 uint64 + x8, x7 = bits.Mul64(x4, arg2[2]) + var x9 uint64 + var x10 uint64 + x10, x9 = bits.Mul64(x4, arg2[1]) + var x11 uint64 + var x12 uint64 + x12, x11 = bits.Mul64(x4, arg2[0]) + var x13 uint64 + var x14 uint64 + x13, x14 = bits.Add64(x12, x9, uint64(0x0)) + var x15 uint64 + var x16 uint64 + x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14))) + var x17 uint64 + var x18 uint64 + x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16))) + x19 := (uint64(fiatScalarUint1(x18)) + x6) + var x20 uint64 + _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b) + var x22 uint64 + var x23 uint64 + x23, x22 = bits.Mul64(x20, 0x1000000000000000) + var x24 uint64 + var x25 uint64 + x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6) + var x26 uint64 + var x27 uint64 + x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed) + var x28 uint64 + var x29 uint64 + x28, x29 = bits.Add64(x27, x24, uint64(0x0)) + x30 := (uint64(fiatScalarUint1(x29)) + x25) + var x32 uint64 + _, x32 = bits.Add64(x11, x26, uint64(0x0)) + var x33 uint64 + var x34 uint64 + x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32))) + var x35 uint64 + var x36 uint64 + x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34))) + var x37 uint64 + var x38 uint64 + x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36))) + var x39 uint64 + var x40 uint64 + x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38))) + var x41 uint64 + var x42 uint64 + x42, x41 = bits.Mul64(x1, arg2[3]) + var x43 uint64 + var x44 uint64 + x44, x43 = bits.Mul64(x1, arg2[2]) + var x45 uint64 + var x46 uint64 + x46, x45 = bits.Mul64(x1, arg2[1]) + var x47 uint64 + var x48 uint64 + x48, x47 = bits.Mul64(x1, arg2[0]) + var x49 uint64 + var x50 uint64 + x49, x50 = bits.Add64(x48, x45, uint64(0x0)) + var x51 uint64 + var x52 uint64 + x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50))) + var x53 uint64 + var x54 uint64 + x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52))) + x55 := (uint64(fiatScalarUint1(x54)) + x42) + var x56 uint64 + var x57 uint64 + x56, x57 = bits.Add64(x33, x47, uint64(0x0)) + var x58 uint64 + var x59 uint64 + x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57))) + var x60 uint64 + var x61 uint64 + x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59))) + var x62 uint64 + var x63 uint64 + x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61))) + var x64 uint64 + var x65 uint64 + x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63))) + var x66 uint64 + _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b) + var x68 uint64 + var x69 uint64 + x69, x68 = bits.Mul64(x66, 0x1000000000000000) + var x70 uint64 + var x71 uint64 + x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6) + var x72 uint64 + var x73 uint64 + x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed) + var x74 uint64 + var x75 uint64 + x74, x75 = bits.Add64(x73, x70, uint64(0x0)) + x76 := (uint64(fiatScalarUint1(x75)) + x71) + var x78 uint64 + _, x78 = bits.Add64(x56, x72, uint64(0x0)) + var x79 uint64 + var x80 uint64 + x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78))) + var x81 uint64 + var x82 uint64 + x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80))) + var x83 uint64 + var x84 uint64 + x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82))) + var x85 uint64 + var x86 uint64 + x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84))) + x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65))) + var x88 uint64 + var x89 uint64 + x89, x88 = bits.Mul64(x2, arg2[3]) + var x90 uint64 + var x91 uint64 + x91, x90 = bits.Mul64(x2, arg2[2]) + var x92 uint64 + var x93 uint64 + x93, x92 = bits.Mul64(x2, arg2[1]) + var x94 uint64 + var x95 uint64 + x95, x94 = bits.Mul64(x2, arg2[0]) + var x96 uint64 + var x97 uint64 + x96, x97 = bits.Add64(x95, x92, uint64(0x0)) + var x98 uint64 + var x99 uint64 + x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97))) + var x100 uint64 + var x101 uint64 + x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99))) + x102 := (uint64(fiatScalarUint1(x101)) + x89) + var x103 uint64 + var x104 uint64 + x103, x104 = bits.Add64(x79, x94, uint64(0x0)) + var x105 uint64 + var x106 uint64 + x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104))) + var x107 uint64 + var x108 uint64 + x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106))) + var x109 uint64 + var x110 uint64 + x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108))) + var x111 uint64 + var x112 uint64 + x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110))) + var x113 uint64 + _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b) + var x115 uint64 + var x116 uint64 + x116, x115 = bits.Mul64(x113, 0x1000000000000000) + var x117 uint64 + var x118 uint64 + x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6) + var x119 uint64 + var x120 uint64 + x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed) + var x121 uint64 + var x122 uint64 + x121, x122 = bits.Add64(x120, x117, uint64(0x0)) + x123 := (uint64(fiatScalarUint1(x122)) + x118) + var x125 uint64 + _, x125 = bits.Add64(x103, x119, uint64(0x0)) + var x126 uint64 + var x127 uint64 + x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125))) + var x128 uint64 + var x129 uint64 + x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127))) + var x130 uint64 + var x131 uint64 + x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129))) + var x132 uint64 + var x133 uint64 + x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131))) + x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112))) + var x135 uint64 + var x136 uint64 + x136, x135 = bits.Mul64(x3, arg2[3]) + var x137 uint64 + var x138 uint64 + x138, x137 = bits.Mul64(x3, arg2[2]) + var x139 uint64 + var x140 uint64 + x140, x139 = bits.Mul64(x3, arg2[1]) + var x141 uint64 + var x142 uint64 + x142, x141 = bits.Mul64(x3, arg2[0]) + var x143 uint64 + var x144 uint64 + x143, x144 = bits.Add64(x142, x139, uint64(0x0)) + var x145 uint64 + var x146 uint64 + x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144))) + var x147 uint64 + var x148 uint64 + x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146))) + x149 := (uint64(fiatScalarUint1(x148)) + x136) + var x150 uint64 + var x151 uint64 + x150, x151 = bits.Add64(x126, x141, uint64(0x0)) + var x152 uint64 + var x153 uint64 + x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151))) + var x154 uint64 + var x155 uint64 + x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153))) + var x156 uint64 + var x157 uint64 + x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155))) + var x158 uint64 + var x159 uint64 + x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157))) + var x160 uint64 + _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b) + var x162 uint64 + var x163 uint64 + x163, x162 = bits.Mul64(x160, 0x1000000000000000) + var x164 uint64 + var x165 uint64 + x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6) + var x166 uint64 + var x167 uint64 + x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed) + var x168 uint64 + var x169 uint64 + x168, x169 = bits.Add64(x167, x164, uint64(0x0)) + x170 := (uint64(fiatScalarUint1(x169)) + x165) + var x172 uint64 + _, x172 = bits.Add64(x150, x166, uint64(0x0)) + var x173 uint64 + var x174 uint64 + x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172))) + var x175 uint64 + var x176 uint64 + x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174))) + var x177 uint64 + var x178 uint64 + x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176))) + var x179 uint64 + var x180 uint64 + x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178))) + x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159))) + var x182 uint64 + var x183 uint64 + x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0)) + var x184 uint64 + var x185 uint64 + x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183))) + var x186 uint64 + var x187 uint64 + x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185))) + var x188 uint64 + var x189 uint64 + x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187))) + var x191 uint64 + _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189))) + var x192 uint64 + fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173) + var x193 uint64 + fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175) + var x194 uint64 + fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177) + var x195 uint64 + fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179) + out1[0] = x192 + out1[1] = x193 + out1[2] = x194 + out1[3] = x195 +} + +// fiatScalarAdd adds two field elements in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// 0 ≤ eval arg2 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m +// 0 ≤ eval out1 < m +func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { + var x1 uint64 + var x2 uint64 + x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0)) + var x3 uint64 + var x4 uint64 + x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2))) + var x5 uint64 + var x6 uint64 + x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) + var x7 uint64 + var x8 uint64 + x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) + var x9 uint64 + var x10 uint64 + x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0)) + var x11 uint64 + var x12 uint64 + x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10))) + var x13 uint64 + var x14 uint64 + x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12))) + var x15 uint64 + var x16 uint64 + x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14))) + var x18 uint64 + _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16))) + var x19 uint64 + fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1) + var x20 uint64 + fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3) + var x21 uint64 + fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5) + var x22 uint64 + fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7) + out1[0] = x19 + out1[1] = x20 + out1[2] = x21 + out1[3] = x22 +} + +// fiatScalarSub subtracts two field elements in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// 0 ≤ eval arg2 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m +// 0 ≤ eval out1 < m +func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { + var x1 uint64 + var x2 uint64 + x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0)) + var x3 uint64 + var x4 uint64 + x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2))) + var x5 uint64 + var x6 uint64 + x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) + var x7 uint64 + var x8 uint64 + x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) + var x9 uint64 + fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) + var x10 uint64 + var x11 uint64 + x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) + var x12 uint64 + var x13 uint64 + x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11))) + var x14 uint64 + var x15 uint64 + x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13))) + var x16 uint64 + x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15))) + out1[0] = x10 + out1[1] = x12 + out1[2] = x14 + out1[3] = x16 +} + +// fiatScalarOpp negates a field element in the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m +// 0 ≤ eval out1 < m +func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) { + var x1 uint64 + var x2 uint64 + x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0)) + var x3 uint64 + var x4 uint64 + x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2))) + var x5 uint64 + var x6 uint64 + x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4))) + var x7 uint64 + var x8 uint64 + x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6))) + var x9 uint64 + fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) + var x10 uint64 + var x11 uint64 + x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) + var x12 uint64 + var x13 uint64 + x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11))) + var x14 uint64 + var x15 uint64 + x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13))) + var x16 uint64 + x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15))) + out1[0] = x10 + out1[1] = x12 + out1[2] = x14 + out1[3] = x16 +} + +// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0 +// +// Input Bounds: +// +// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] +// +// Output Bounds: +// +// out1: [0x0 ~> 0xffffffffffffffff] +func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) { + x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3]))) + *out1 = x1 +} + +// fiatScalarFromMontgomery translates a field element out of the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m +// 0 ≤ eval out1 < m +func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) { + x1 := arg1[0] + var x2 uint64 + _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b) + var x4 uint64 + var x5 uint64 + x5, x4 = bits.Mul64(x2, 0x1000000000000000) + var x6 uint64 + var x7 uint64 + x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6) + var x8 uint64 + var x9 uint64 + x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed) + var x10 uint64 + var x11 uint64 + x10, x11 = bits.Add64(x9, x6, uint64(0x0)) + var x13 uint64 + _, x13 = bits.Add64(x1, x8, uint64(0x0)) + var x14 uint64 + var x15 uint64 + x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13))) + var x16 uint64 + var x17 uint64 + x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0)) + var x18 uint64 + _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b) + var x20 uint64 + var x21 uint64 + x21, x20 = bits.Mul64(x18, 0x1000000000000000) + var x22 uint64 + var x23 uint64 + x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6) + var x24 uint64 + var x25 uint64 + x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed) + var x26 uint64 + var x27 uint64 + x26, x27 = bits.Add64(x25, x22, uint64(0x0)) + var x29 uint64 + _, x29 = bits.Add64(x16, x24, uint64(0x0)) + var x30 uint64 + var x31 uint64 + x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29))) + var x32 uint64 + var x33 uint64 + x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31))) + var x34 uint64 + var x35 uint64 + x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33))) + var x36 uint64 + var x37 uint64 + x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0)) + var x38 uint64 + var x39 uint64 + x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37))) + var x40 uint64 + var x41 uint64 + x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39))) + var x42 uint64 + _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b) + var x44 uint64 + var x45 uint64 + x45, x44 = bits.Mul64(x42, 0x1000000000000000) + var x46 uint64 + var x47 uint64 + x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6) + var x48 uint64 + var x49 uint64 + x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed) + var x50 uint64 + var x51 uint64 + x50, x51 = bits.Add64(x49, x46, uint64(0x0)) + var x53 uint64 + _, x53 = bits.Add64(x36, x48, uint64(0x0)) + var x54 uint64 + var x55 uint64 + x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53))) + var x56 uint64 + var x57 uint64 + x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55))) + var x58 uint64 + var x59 uint64 + x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57))) + var x60 uint64 + var x61 uint64 + x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0)) + var x62 uint64 + var x63 uint64 + x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61))) + var x64 uint64 + var x65 uint64 + x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63))) + var x66 uint64 + _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b) + var x68 uint64 + var x69 uint64 + x69, x68 = bits.Mul64(x66, 0x1000000000000000) + var x70 uint64 + var x71 uint64 + x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6) + var x72 uint64 + var x73 uint64 + x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed) + var x74 uint64 + var x75 uint64 + x74, x75 = bits.Add64(x73, x70, uint64(0x0)) + var x77 uint64 + _, x77 = bits.Add64(x60, x72, uint64(0x0)) + var x78 uint64 + var x79 uint64 + x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77))) + var x80 uint64 + var x81 uint64 + x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79))) + var x82 uint64 + var x83 uint64 + x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81))) + x84 := (uint64(fiatScalarUint1(x83)) + x69) + var x85 uint64 + var x86 uint64 + x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0)) + var x87 uint64 + var x88 uint64 + x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86))) + var x89 uint64 + var x90 uint64 + x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88))) + var x91 uint64 + var x92 uint64 + x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90))) + var x94 uint64 + _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92))) + var x95 uint64 + fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78) + var x96 uint64 + fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80) + var x97 uint64 + fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82) + var x98 uint64 + fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84) + out1[0] = x95 + out1[1] = x96 + out1[2] = x97 + out1[3] = x98 +} + +// fiatScalarToMontgomery translates a field element into the Montgomery domain. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// eval (from_montgomery out1) mod m = eval arg1 mod m +// 0 ≤ eval out1 < m +func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) { + x1 := arg1[1] + x2 := arg1[2] + x3 := arg1[3] + x4 := arg1[0] + var x5 uint64 + var x6 uint64 + x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d) + var x7 uint64 + var x8 uint64 + x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65) + var x9 uint64 + var x10 uint64 + x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347) + var x11 uint64 + var x12 uint64 + x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01) + var x13 uint64 + var x14 uint64 + x13, x14 = bits.Add64(x12, x9, uint64(0x0)) + var x15 uint64 + var x16 uint64 + x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14))) + var x17 uint64 + var x18 uint64 + x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16))) + var x19 uint64 + _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b) + var x21 uint64 + var x22 uint64 + x22, x21 = bits.Mul64(x19, 0x1000000000000000) + var x23 uint64 + var x24 uint64 + x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6) + var x25 uint64 + var x26 uint64 + x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed) + var x27 uint64 + var x28 uint64 + x27, x28 = bits.Add64(x26, x23, uint64(0x0)) + var x30 uint64 + _, x30 = bits.Add64(x11, x25, uint64(0x0)) + var x31 uint64 + var x32 uint64 + x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30))) + var x33 uint64 + var x34 uint64 + x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32))) + var x35 uint64 + var x36 uint64 + x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34))) + var x37 uint64 + var x38 uint64 + x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d) + var x39 uint64 + var x40 uint64 + x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65) + var x41 uint64 + var x42 uint64 + x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347) + var x43 uint64 + var x44 uint64 + x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01) + var x45 uint64 + var x46 uint64 + x45, x46 = bits.Add64(x44, x41, uint64(0x0)) + var x47 uint64 + var x48 uint64 + x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46))) + var x49 uint64 + var x50 uint64 + x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48))) + var x51 uint64 + var x52 uint64 + x51, x52 = bits.Add64(x31, x43, uint64(0x0)) + var x53 uint64 + var x54 uint64 + x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52))) + var x55 uint64 + var x56 uint64 + x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54))) + var x57 uint64 + var x58 uint64 + x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56))) + var x59 uint64 + _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b) + var x61 uint64 + var x62 uint64 + x62, x61 = bits.Mul64(x59, 0x1000000000000000) + var x63 uint64 + var x64 uint64 + x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6) + var x65 uint64 + var x66 uint64 + x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed) + var x67 uint64 + var x68 uint64 + x67, x68 = bits.Add64(x66, x63, uint64(0x0)) + var x70 uint64 + _, x70 = bits.Add64(x51, x65, uint64(0x0)) + var x71 uint64 + var x72 uint64 + x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70))) + var x73 uint64 + var x74 uint64 + x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72))) + var x75 uint64 + var x76 uint64 + x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74))) + var x77 uint64 + var x78 uint64 + x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d) + var x79 uint64 + var x80 uint64 + x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65) + var x81 uint64 + var x82 uint64 + x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347) + var x83 uint64 + var x84 uint64 + x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01) + var x85 uint64 + var x86 uint64 + x85, x86 = bits.Add64(x84, x81, uint64(0x0)) + var x87 uint64 + var x88 uint64 + x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86))) + var x89 uint64 + var x90 uint64 + x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88))) + var x91 uint64 + var x92 uint64 + x91, x92 = bits.Add64(x71, x83, uint64(0x0)) + var x93 uint64 + var x94 uint64 + x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92))) + var x95 uint64 + var x96 uint64 + x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94))) + var x97 uint64 + var x98 uint64 + x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96))) + var x99 uint64 + _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b) + var x101 uint64 + var x102 uint64 + x102, x101 = bits.Mul64(x99, 0x1000000000000000) + var x103 uint64 + var x104 uint64 + x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6) + var x105 uint64 + var x106 uint64 + x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed) + var x107 uint64 + var x108 uint64 + x107, x108 = bits.Add64(x106, x103, uint64(0x0)) + var x110 uint64 + _, x110 = bits.Add64(x91, x105, uint64(0x0)) + var x111 uint64 + var x112 uint64 + x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110))) + var x113 uint64 + var x114 uint64 + x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112))) + var x115 uint64 + var x116 uint64 + x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114))) + var x117 uint64 + var x118 uint64 + x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d) + var x119 uint64 + var x120 uint64 + x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65) + var x121 uint64 + var x122 uint64 + x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347) + var x123 uint64 + var x124 uint64 + x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01) + var x125 uint64 + var x126 uint64 + x125, x126 = bits.Add64(x124, x121, uint64(0x0)) + var x127 uint64 + var x128 uint64 + x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126))) + var x129 uint64 + var x130 uint64 + x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128))) + var x131 uint64 + var x132 uint64 + x131, x132 = bits.Add64(x111, x123, uint64(0x0)) + var x133 uint64 + var x134 uint64 + x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132))) + var x135 uint64 + var x136 uint64 + x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134))) + var x137 uint64 + var x138 uint64 + x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136))) + var x139 uint64 + _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b) + var x141 uint64 + var x142 uint64 + x142, x141 = bits.Mul64(x139, 0x1000000000000000) + var x143 uint64 + var x144 uint64 + x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6) + var x145 uint64 + var x146 uint64 + x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed) + var x147 uint64 + var x148 uint64 + x147, x148 = bits.Add64(x146, x143, uint64(0x0)) + var x150 uint64 + _, x150 = bits.Add64(x131, x145, uint64(0x0)) + var x151 uint64 + var x152 uint64 + x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150))) + var x153 uint64 + var x154 uint64 + x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152))) + var x155 uint64 + var x156 uint64 + x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154))) + x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142) + var x158 uint64 + var x159 uint64 + x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0)) + var x160 uint64 + var x161 uint64 + x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159))) + var x162 uint64 + var x163 uint64 + x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161))) + var x164 uint64 + var x165 uint64 + x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163))) + var x167 uint64 + _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165))) + var x168 uint64 + fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151) + var x169 uint64 + fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153) + var x170 uint64 + fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155) + var x171 uint64 + fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157) + out1[0] = x168 + out1[1] = x169 + out1[2] = x170 + out1[3] = x171 +} + +// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order. +// +// Preconditions: +// +// 0 ≤ eval arg1 < m +// +// Postconditions: +// +// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] +// +// Input Bounds: +// +// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]] +// +// Output Bounds: +// +// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]] +func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) { + x1 := arg1[3] + x2 := arg1[2] + x3 := arg1[1] + x4 := arg1[0] + x5 := (uint8(x4) & 0xff) + x6 := (x4 >> 8) + x7 := (uint8(x6) & 0xff) + x8 := (x6 >> 8) + x9 := (uint8(x8) & 0xff) + x10 := (x8 >> 8) + x11 := (uint8(x10) & 0xff) + x12 := (x10 >> 8) + x13 := (uint8(x12) & 0xff) + x14 := (x12 >> 8) + x15 := (uint8(x14) & 0xff) + x16 := (x14 >> 8) + x17 := (uint8(x16) & 0xff) + x18 := uint8((x16 >> 8)) + x19 := (uint8(x3) & 0xff) + x20 := (x3 >> 8) + x21 := (uint8(x20) & 0xff) + x22 := (x20 >> 8) + x23 := (uint8(x22) & 0xff) + x24 := (x22 >> 8) + x25 := (uint8(x24) & 0xff) + x26 := (x24 >> 8) + x27 := (uint8(x26) & 0xff) + x28 := (x26 >> 8) + x29 := (uint8(x28) & 0xff) + x30 := (x28 >> 8) + x31 := (uint8(x30) & 0xff) + x32 := uint8((x30 >> 8)) + x33 := (uint8(x2) & 0xff) + x34 := (x2 >> 8) + x35 := (uint8(x34) & 0xff) + x36 := (x34 >> 8) + x37 := (uint8(x36) & 0xff) + x38 := (x36 >> 8) + x39 := (uint8(x38) & 0xff) + x40 := (x38 >> 8) + x41 := (uint8(x40) & 0xff) + x42 := (x40 >> 8) + x43 := (uint8(x42) & 0xff) + x44 := (x42 >> 8) + x45 := (uint8(x44) & 0xff) + x46 := uint8((x44 >> 8)) + x47 := (uint8(x1) & 0xff) + x48 := (x1 >> 8) + x49 := (uint8(x48) & 0xff) + x50 := (x48 >> 8) + x51 := (uint8(x50) & 0xff) + x52 := (x50 >> 8) + x53 := (uint8(x52) & 0xff) + x54 := (x52 >> 8) + x55 := (uint8(x54) & 0xff) + x56 := (x54 >> 8) + x57 := (uint8(x56) & 0xff) + x58 := (x56 >> 8) + x59 := (uint8(x58) & 0xff) + x60 := uint8((x58 >> 8)) + out1[0] = x5 + out1[1] = x7 + out1[2] = x9 + out1[3] = x11 + out1[4] = x13 + out1[5] = x15 + out1[6] = x17 + out1[7] = x18 + out1[8] = x19 + out1[9] = x21 + out1[10] = x23 + out1[11] = x25 + out1[12] = x27 + out1[13] = x29 + out1[14] = x31 + out1[15] = x32 + out1[16] = x33 + out1[17] = x35 + out1[18] = x37 + out1[19] = x39 + out1[20] = x41 + out1[21] = x43 + out1[22] = x45 + out1[23] = x46 + out1[24] = x47 + out1[25] = x49 + out1[26] = x51 + out1[27] = x53 + out1[28] = x55 + out1[29] = x57 + out1[30] = x59 + out1[31] = x60 +} + +// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order. +// +// Preconditions: +// +// 0 ≤ bytes_eval arg1 < m +// +// Postconditions: +// +// eval out1 mod m = bytes_eval arg1 mod m +// 0 ≤ eval out1 < m +// +// Input Bounds: +// +// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]] +// +// Output Bounds: +// +// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]] +func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) { + x1 := (uint64(arg1[31]) << 56) + x2 := (uint64(arg1[30]) << 48) + x3 := (uint64(arg1[29]) << 40) + x4 := (uint64(arg1[28]) << 32) + x5 := (uint64(arg1[27]) << 24) + x6 := (uint64(arg1[26]) << 16) + x7 := (uint64(arg1[25]) << 8) + x8 := arg1[24] + x9 := (uint64(arg1[23]) << 56) + x10 := (uint64(arg1[22]) << 48) + x11 := (uint64(arg1[21]) << 40) + x12 := (uint64(arg1[20]) << 32) + x13 := (uint64(arg1[19]) << 24) + x14 := (uint64(arg1[18]) << 16) + x15 := (uint64(arg1[17]) << 8) + x16 := arg1[16] + x17 := (uint64(arg1[15]) << 56) + x18 := (uint64(arg1[14]) << 48) + x19 := (uint64(arg1[13]) << 40) + x20 := (uint64(arg1[12]) << 32) + x21 := (uint64(arg1[11]) << 24) + x22 := (uint64(arg1[10]) << 16) + x23 := (uint64(arg1[9]) << 8) + x24 := arg1[8] + x25 := (uint64(arg1[7]) << 56) + x26 := (uint64(arg1[6]) << 48) + x27 := (uint64(arg1[5]) << 40) + x28 := (uint64(arg1[4]) << 32) + x29 := (uint64(arg1[3]) << 24) + x30 := (uint64(arg1[2]) << 16) + x31 := (uint64(arg1[1]) << 8) + x32 := arg1[0] + x33 := (x31 + uint64(x32)) + x34 := (x30 + x33) + x35 := (x29 + x34) + x36 := (x28 + x35) + x37 := (x27 + x36) + x38 := (x26 + x37) + x39 := (x25 + x38) + x40 := (x23 + uint64(x24)) + x41 := (x22 + x40) + x42 := (x21 + x41) + x43 := (x20 + x42) + x44 := (x19 + x43) + x45 := (x18 + x44) + x46 := (x17 + x45) + x47 := (x15 + uint64(x16)) + x48 := (x14 + x47) + x49 := (x13 + x48) + x50 := (x12 + x49) + x51 := (x11 + x50) + x52 := (x10 + x51) + x53 := (x9 + x52) + x54 := (x7 + uint64(x8)) + x55 := (x6 + x54) + x56 := (x5 + x55) + x57 := (x4 + x56) + x58 := (x3 + x57) + x59 := (x2 + x58) + x60 := (x1 + x59) + out1[0] = x39 + out1[1] = x46 + out1[2] = x53 + out1[3] = x60 +} diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go new file mode 100644 index 0000000000..f7ca3cef99 --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalarmult.go @@ -0,0 +1,214 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "sync" + +// basepointTable is a set of 32 affineLookupTables, where table i is generated +// from 256i * basepoint. It is precomputed the first time it's used. +func basepointTable() *[32]affineLookupTable { + basepointTablePrecomp.initOnce.Do(func() { + p := NewGeneratorPoint() + for i := 0; i < 32; i++ { + basepointTablePrecomp.table[i].FromP3(p) + for j := 0; j < 8; j++ { + p.Add(p, p) + } + } + }) + return &basepointTablePrecomp.table +} + +var basepointTablePrecomp struct { + table [32]affineLookupTable + initOnce sync.Once +} + +// ScalarBaseMult sets v = x * B, where B is the canonical generator, and +// returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarBaseMult(x *Scalar) *Point { + basepointTable := basepointTable() + + // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i ) + // as described in the Ed25519 paper + // + // Group even and odd coefficients + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B) + // + // We use a lookup table for each i to get x_i*16^(2*i)*B + // and do four doublings to multiply by 16. + digits := x.signedRadix16() + + multiple := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + + // Accumulate the odd components first + v.Set(NewIdentityPoint()) + for i := 1; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + // Multiply by 16 + tmp2.FromP3(v) // tmp2 = v in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords + v.fromP1xP1(tmp1) // now v = 16*(odd components) + + // Accumulate the even components + for i := 0; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + return v +} + +// ScalarMult sets v = x * q, and returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarMult(x *Scalar, q *Point) *Point { + checkInitialized(q) + + var table projLookupTable + table.FromP3(q) + + // Write x = sum(x_i * 16^i) + // so x*Q = sum( Q*x_i*16^i ) + // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... ) + // <------compute inside out--------- + // + // We use the lookup table to get the x_i*Q values + // and do four doublings to compute 16*Q + digits := x.signedRadix16() + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + table.SelectInto(multiple, digits[63]) + + v.Set(NewIdentityPoint()) + tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords + for i := 62; i >= 0; i-- { + tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + table.SelectInto(multiple, digits[i]) + tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords + } + v.fromP1xP1(tmp1) + return v +} + +// basepointNafTable is the nafLookupTable8 for the basepoint. +// It is precomputed the first time it's used. +func basepointNafTable() *nafLookupTable8 { + basepointNafTablePrecomp.initOnce.Do(func() { + basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint()) + }) + return &basepointNafTablePrecomp.table +} + +var basepointNafTablePrecomp struct { + table nafLookupTable8 + initOnce sync.Once +} + +// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical +// generator, and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point { + checkInitialized(A) + + // Similarly to the single variable-base approach, we compute + // digits and use them with a lookup table. However, because + // we are allowed to do variable-time operations, we don't + // need constant-time lookups or constant-time digit + // computations. + // + // So we use a non-adjacent form of some width w instead of + // radix 16. This is like a binary representation (one digit + // for each binary place) but we allow the digits to grow in + // magnitude up to 2^{w-1} so that the nonzero digits are as + // sparse as possible. Intuitively, this "condenses" the + // "mass" of the scalar onto sparse coefficients (meaning + // fewer additions). + + basepointNafTable := basepointNafTable() + var aTable nafLookupTable5 + aTable.FromP3(A) + // Because the basepoint is fixed, we can use a wider NAF + // corresponding to a bigger table. + aNaf := a.nonAdjacentForm(5) + bNaf := b.nonAdjacentForm(8) + + // Find the first nonzero coefficient. + i := 255 + for j := i; j >= 0; j-- { + if aNaf[j] != 0 || bNaf[j] != 0 { + break + } + } + + multA := &projCached{} + multB := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + for ; i >= 0; i-- { + tmp1.Double(tmp2) + + // Only update v if we have a nonzero coeff to add in. + if aNaf[i] > 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, aNaf[i]) + tmp1.Add(v, multA) + } else if aNaf[i] < 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, -aNaf[i]) + tmp1.Sub(v, multA) + } + + if bNaf[i] > 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, bNaf[i]) + tmp1.AddAffine(v, multB) + } else if bNaf[i] < 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, -bNaf[i]) + tmp1.SubAffine(v, multB) + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go new file mode 100644 index 0000000000..83234bbc0f --- /dev/null +++ b/vendor/filippo.io/edwards25519/tables.go @@ -0,0 +1,129 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "crypto/subtle" +) + +// A dynamic lookup table for variable-base, constant-time scalar muls. +type projLookupTable struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, constant-time scalar muls. +type affineLookupTable struct { + points [8]affineCached +} + +// A dynamic lookup table for variable-base, variable-time scalar muls. +type nafLookupTable5 struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, variable-time scalar muls. +type nafLookupTable8 struct { + points [64]affineCached +} + +// Constructors. + +// Builds a lookup table at runtime. Fast. +func (v *projLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to a projCached + // This is needlessly complicated because the API has explicit + // receivers instead of creating stack objects and relying on RVO + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *affineLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to affineCached + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i]))) + } +} + +// Builds a lookup table at runtime. Fast. +func (v *nafLookupTable5) FromP3(q *Point) { + // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q + // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *nafLookupTable8) FromP3(q *Point) { + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 63; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i]))) + } +} + +// Selectors. + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *projLookupTable) SelectInto(dest *projCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Given odd x with 0 < x < 2^4, return x*Q (in variable time). +func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) { + *dest = v.points[x/2] +} + +// Given odd x with 0 < x < 2^7, return x*Q (in variable time). +func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) { + *dest = v.points[x/2] +} diff --git a/vendor/github.com/4meepo/tagalign/.gitignore b/vendor/github.com/4meepo/tagalign/.gitignore index 1c6218ee29..bdff1c364d 100644 --- a/vendor/github.com/4meepo/tagalign/.gitignore +++ b/vendor/github.com/4meepo/tagalign/.gitignore @@ -17,7 +17,10 @@ *.test .vscode +<<<<<<< HEAD .idea/ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/vendor/github.com/4meepo/tagalign/.goreleaser.yml index 37dfec7c88..b413f6eeb5 100644 --- a/vendor/github.com/4meepo/tagalign/.goreleaser.yml +++ b/vendor/github.com/4meepo/tagalign/.goreleaser.yml @@ -1,4 +1,8 @@ +<<<<<<< HEAD version: 2 +======= +--- +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) project_name: tagalign release: @@ -29,4 +33,8 @@ builds: goarch: 386 - goos: freebsd goarch: arm64 +<<<<<<< HEAD main: ./cmd/tagalign/ +======= + main: ./cmd/tagalign/ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/4meepo/tagalign/options.go b/vendor/github.com/4meepo/tagalign/options.go index 2a78592465..8e1fc5dc3a 100644 --- a/vendor/github.com/4meepo/tagalign/options.go +++ b/vendor/github.com/4meepo/tagalign/options.go @@ -2,6 +2,16 @@ package tagalign type Option func(*Helper) +<<<<<<< HEAD +======= +// WithMode specify the mode of tagalign. +func WithMode(mode Mode) Option { + return func(h *Helper) { + h.mode = mode + } +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithSort enable tags sort. // fixedOrder specify the order of tags, the other tags will be sorted by name. // Sory is disabled by default. diff --git a/vendor/github.com/4meepo/tagalign/tagalign.go b/vendor/github.com/4meepo/tagalign/tagalign.go index 8161a0aa7f..76b136b399 100644 --- a/vendor/github.com/4meepo/tagalign/tagalign.go +++ b/vendor/github.com/4meepo/tagalign/tagalign.go @@ -1,19 +1,42 @@ package tagalign import ( +<<<<<<< HEAD "cmp" "fmt" "go/ast" "go/token" "reflect" "slices" +======= + "fmt" + "go/ast" + "go/token" + "log" + "reflect" + "sort" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "strings" "github.com/fatih/structtag" +<<<<<<< HEAD + "golang.org/x/tools/go/analysis" +) + +======= + "golang.org/x/tools/go/analysis" ) +type Mode int + +const ( + StandaloneMode Mode = iota + GolangciLintMode +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Style int const ( @@ -36,6 +59,7 @@ func NewAnalyzer(options ...Option) *analysis.Analyzer { } } +<<<<<<< HEAD func Run(pass *analysis.Pass, options ...Option) { for _, f := range pass.Files { filename := getFilename(pass.Fset, f) @@ -44,6 +68,13 @@ func Run(pass *analysis.Pass, options ...Option) { } h := &Helper{ +======= +func Run(pass *analysis.Pass, options ...Option) []Issue { + var issues []Issue + for _, f := range pass.Files { + h := &Helper{ + mode: StandaloneMode, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) style: DefaultStyle, align: true, } @@ -58,19 +89,35 @@ func Run(pass *analysis.Pass, options ...Option) { if !h.align && !h.sort { // do nothing +<<<<<<< HEAD return +======= + return nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ast.Inspect(f, func(n ast.Node) bool { h.find(pass, n) return true }) +<<<<<<< HEAD h.Process(pass) } } type Helper struct { +======= + h.Process(pass) + issues = append(issues, h.issues...) + } + return issues +} + +type Helper struct { + mode Mode + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) style Style align bool // whether enable tags align. @@ -79,6 +126,22 @@ type Helper struct { singleFields []*ast.Field consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct. +<<<<<<< HEAD +======= + issues []Issue +} + +// Issue is used to integrate with golangci-lint's inline auto fix. +type Issue struct { + Pos token.Position + Message string + InlineFix InlineFix +} +type InlineFix struct { + StartCol int // zero-based + Length int + NewString string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (w *Helper) find(pass *analysis.Pass, n ast.Node) { @@ -138,6 +201,7 @@ func (w *Helper) find(pass *analysis.Pass, n ast.Node) { split() } +<<<<<<< HEAD func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr string) { pass.Report(analysis.Diagnostic{ Pos: field.Tag.Pos(), @@ -160,6 +224,44 @@ func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr s //nolint:gocognit,gocyclo,nestif func (w *Helper) Process(pass *analysis.Pass) { +======= +func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) { + if w.mode == GolangciLintMode { + iss := Issue{ + Pos: pass.Fset.Position(field.Tag.Pos()), + Message: msg, + InlineFix: InlineFix{ + StartCol: startCol, + Length: len(field.Tag.Value), + NewString: replaceStr, + }, + } + w.issues = append(w.issues, iss) + } + + if w.mode == StandaloneMode { + pass.Report(analysis.Diagnostic{ + Pos: field.Tag.Pos(), + End: field.Tag.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: field.Tag.Pos(), + End: field.Tag.End(), + NewText: []byte(replaceStr), + }, + }, + }, + }, + }) + } +} + +func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // process grouped fields for _, fields := range w.consecutiveFieldsGroups { offsets := make([]int, len(fields)) @@ -185,7 +287,11 @@ func (w *Helper) Process(pass *analysis.Pass) { tag, err := strconv.Unquote(field.Tag.Value) if err != nil { // if tag value is not a valid string, report it directly +<<<<<<< HEAD w.report(pass, field, errTagValueSyntax, field.Tag.Value) +======= + w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fields = removeField(fields, i) continue } @@ -193,7 +299,11 @@ func (w *Helper) Process(pass *analysis.Pass) { tags, err := structtag.Parse(tag) if err != nil { // if tag value is not a valid struct tag, report it directly +<<<<<<< HEAD w.report(pass, field, err.Error(), field.Tag.Value) +======= + w.report(pass, field, column, err.Error(), field.Tag.Value) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fields = removeField(fields, i) continue } @@ -206,7 +316,11 @@ func (w *Helper) Process(pass *analysis.Pass) { cp[i] = tag } notSortedTagsGroup = append(notSortedTagsGroup, cp) +<<<<<<< HEAD sortTags(w.fixedTagOrder, tags) +======= + sortBy(w.fixedTagOrder, tags) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, t := range tags.Tags() { addKey(t.Key) @@ -217,7 +331,11 @@ func (w *Helper) Process(pass *analysis.Pass) { } if w.sort && StrictStyle == w.style { +<<<<<<< HEAD sortKeys(w.fixedTagOrder, uniqueKeys) +======= + sortAllKeys(w.fixedTagOrder, uniqueKeys) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) maxTagNum = len(uniqueKeys) } @@ -305,26 +423,45 @@ func (w *Helper) Process(pass *analysis.Pass) { msg := "tag is not aligned, should be: " + unquoteTag +<<<<<<< HEAD w.report(pass, field, msg, newTagValue) +======= + w.report(pass, field, offsets[i], msg, newTagValue) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } // process single fields for _, field := range w.singleFields { +<<<<<<< HEAD tag, err := strconv.Unquote(field.Tag.Value) if err != nil { w.report(pass, field, errTagValueSyntax, field.Tag.Value) +======= + column := pass.Fset.Position(field.Tag.Pos()).Column - 1 + tag, err := strconv.Unquote(field.Tag.Value) + if err != nil { + w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } tags, err := structtag.Parse(tag) if err != nil { +<<<<<<< HEAD w.report(pass, field, err.Error(), field.Tag.Value) +======= + w.report(pass, field, column, err.Error(), field.Tag.Value) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } originalTags := append([]*structtag.Tag(nil), tags.Tags()...) if w.sort { +<<<<<<< HEAD sortTags(w.fixedTagOrder, tags) +======= + sortBy(w.fixedTagOrder, tags) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } newTagValue := fmt.Sprintf("`%s`", tags.String()) @@ -335,6 +472,7 @@ func (w *Helper) Process(pass *analysis.Pass) { msg := "tag is not aligned , should be: " + tags.String() +<<<<<<< HEAD w.report(pass, field, msg, newTagValue) } } @@ -370,12 +508,90 @@ func compareByFixedOrder(fixedOrder []string) func(a, b string) int { return cmp.Compare(oi, oj) } +======= + w.report(pass, field, column, msg, newTagValue) + } +} + +// Issues returns all issues found by the analyzer. +// It is used to integrate with golangci-lint. +func (w *Helper) Issues() []Issue { + log.Println("tagalign 's Issues() should only be called in golangci-lint mode") + return w.issues +} + +// sortBy sorts tags by fixed order. +// If a tag is not in the fixed order, it will be sorted by name. +func sortBy(fixedOrder []string, tags *structtag.Tags) { + // sort by fixed order + sort.Slice(tags.Tags(), func(i, j int) bool { + ti := tags.Tags()[i] + tj := tags.Tags()[j] + + oi := findIndex(fixedOrder, ti.Key) + oj := findIndex(fixedOrder, tj.Key) + + if oi == -1 && oj == -1 { + return ti.Key < tj.Key + } + + if oi == -1 { + return false + } + + if oj == -1 { + return true + } + + return oi < oj + }) +} + +func sortAllKeys(fixedOrder []string, keys []string) { + sort.Slice(keys, func(i, j int) bool { + oi := findIndex(fixedOrder, keys[i]) + oj := findIndex(fixedOrder, keys[j]) + + if oi == -1 && oj == -1 { + return keys[i] < keys[j] + } + + if oi == -1 { + return false + } + + if oj == -1 { + return true + } + + return oi < oj + }) +} + +func findIndex(s []string, e string) int { + for i, a := range s { + if a == e { + return i + } + } + return -1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func alignFormat(length int) string { return "%" + fmt.Sprintf("-%ds", length) } +<<<<<<< HEAD +======= +func max(a, b int) int { + if a > b { + return a + } + return b +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func removeField(fields []*ast.Field, index int) []*ast.Field { if index < 0 || index >= len(fields) { return fields @@ -383,6 +599,7 @@ func removeField(fields []*ast.Field, index int) []*ast.Field { return append(fields[:index], fields[index+1:]...) } +<<<<<<< HEAD func getFilename(fset *token.FileSet, file *ast.File) string { filename := fset.PositionFor(file.Pos(), true).Filename @@ -392,3 +609,5 @@ func getFilename(fset *token.FileSet, file *ast.File) string { return filename } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go index 703cc1c39f..4adc007698 100644 --- a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -125,7 +125,11 @@ const ( ) func (n *nilNil) isDangerNilType(t types.Type) (bool, zeroValue) { +<<<<<<< HEAD switch v := types.Unalias(t).(type) { +======= + switch v := t.(type) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *types.Pointer: return n.checkedTypes.Contains(ptrType), zeroValueNil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index cf422304e7..fc09c2324c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,6 @@ # Release History +<<<<<<< HEAD ## 1.17.0 (2025-01-07) ### Features Added @@ -7,6 +8,8 @@ * Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern. * Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.16.0 (2024-10-17) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index d9a4e36dcc..f8944266f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -110,6 +110,7 @@ func (id *ResourceID) String() string { return id.stringValue } +<<<<<<< HEAD // MarshalText returns a textual representation of the ResourceID func (id *ResourceID) MarshalText() ([]byte, error) { return []byte(id.String()), nil @@ -125,6 +126,8 @@ func (id *ResourceID) UnmarshalText(text []byte) error { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { id := &ResourceID{} id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index f496331893..836dc1fb75 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -40,13 +40,20 @@ type Poller[T any] struct { OrigURL string `json:"origURL"` Method string `json:"method"` FinalState pollers.FinalStateVia `json:"finalState"` +<<<<<<< HEAD ResultPath string `json:"resultPath"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) CurState string `json:"state"` } // New creates a new Poller from the provided initial response. // Pass nil for response to create an empty Poller for rehydration. +<<<<<<< HEAD func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) { +======= +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp == nil { log.Write(log.EventLRO, "Resuming Operation-Location poller.") return &Poller[T]{pl: pl}, nil @@ -83,7 +90,10 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, +<<<<<<< HEAD ResultPath: resultPath, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) CurState: curState, }, nil } @@ -118,6 +128,13 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error +<<<<<<< HEAD +======= + // when the payload is included with the status monitor on + // terminal success it's in the "result" JSON property + payloadPath := "result" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { @@ -136,7 +153,11 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { // no JSON path when making a final GET request +<<<<<<< HEAD p.ResultPath = "" +======= + payloadPath = "" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, err := p.pl.Do(req) if err != nil { return err @@ -144,5 +165,9 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } +<<<<<<< HEAD return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out) +======= + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 44ab00d400..9632da6dc7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,9 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. +<<<<<<< HEAD Version = "v1.17.0" +======= + Version = "v1.16.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index c66fc0a90a..ac08baf4c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -32,7 +32,10 @@ type PagingHandler[T any] struct { } // Pager provides operations for iterating over paged responses. +<<<<<<< HEAD // Methods on this type are not safe for concurrent use. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Pager[T any] struct { current *T handler PagingHandler[T] diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 4f90e44743..7aabe6173e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -50,6 +50,7 @@ const ( // NewPollerOptions contains the optional parameters for NewPoller. type NewPollerOptions[T any] struct { // FinalStateVia contains the final-state-via value for the LRO. +<<<<<<< HEAD // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs. FinalStateVia FinalStateVia @@ -58,6 +59,10 @@ type NewPollerOptions[T any] struct { // NOTE: only used for Operation-Location LROs. OperationLocationResultPath string +======= + FinalStateVia FinalStateVia + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Response contains a preconstructed response type. // The final payload will be unmarshaled into it and returned. Response *T @@ -104,7 +109,11 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { // op poller must be checked before loc as it can also have a location header +<<<<<<< HEAD opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath) +======= + opr, err = op.New[T](pl, resp, options.FinalStateVia) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if loc.Applicable(resp) { opr, err = loc.New[T](pl, resp) } else if body.Applicable(resp) { @@ -178,7 +187,11 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options } else if loc.CanResume(asJSON) { opr, _ = loc.New[T](pl, nil) } else if op.CanResume(asJSON) { +<<<<<<< HEAD opr, _ = op.New[T](pl, nil, "", "") +======= + opr, _ = op.New[T](pl, nil, "") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { return nil, fmt.Errorf("unhandled poller token %s", string(raw)) } @@ -206,7 +219,10 @@ type PollingHandler[T any] interface { } // Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +<<<<<<< HEAD // Methods on this type are not safe for concurrent use. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Poller[T any] struct { op PollingHandler[T] resp *http.Response diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md index 567e6975b1..a5a7421218 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -1,5 +1,6 @@ # Breaking Changes +<<<<<<< HEAD ## v1.8.0 ### New errors from `NewManagedIdentityCredential` in some environments @@ -10,6 +11,8 @@ * Cloud Shell * Service Fabric +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## v1.6.0 ### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 1ffc19a548..9ea7a6344c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,6 @@ # Release History +<<<<<<< HEAD ## 1.8.1 (2025-01-15) ### Bugs Fixed @@ -14,6 +15,8 @@ credential after `ManagedIdentityCredential` receives an unexpected response from IMDS, indicating the response is from something else such as a proxy +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.8.0 (2024-10-08) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index c99ce5b2b5..79c86ede61 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,7 +54,21 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential +<<<<<<< HEAD `DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview]. +======= +`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. +1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. +1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Managed Identity @@ -118,10 +132,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ### Credential chains +<<<<<<< HEAD |Credential|Usage|Reference |-|-|- |[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]| |[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]| +======= +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Authenticating Azure-Hosted Applications @@ -250,8 +271,11 @@ For more information, see the or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +<<<<<<< HEAD [ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview [dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index 8fc7c64aa3..ef1f96dda2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -22,6 +22,7 @@ Some credential types support opt-in persistent token caching (see [the below ta Persistent caches are encrypted at rest using a mechanism that depends on the operating system: +<<<<<<< HEAD | Operating system | Encryption facility | | ---------------- | ---------------------------------------------- | | Linux | kernel key retention service (keyctl) | @@ -29,6 +30,15 @@ Persistent caches are encrypted at rest using a mechanism that depends on the op | Windows | Data Protection API (DPAPI) | Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. +======= +| Operating system | Encryption facility | +|------------------|---------------------------------------| +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain | +| Windows | Data Protection API (DPAPI) | + +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Credentials supporting token caching @@ -37,7 +47,11 @@ The following table indicates the state of in-memory and persistent caching in e **Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | +<<<<<<< HEAD | ------------------------------ | ------------------------------------------------------------------- | ------------------------ | +======= +|--------------------------------|---------------------------------------------------------------------|--------------------------| +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | | `AzurePipelinesCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 9c4b1cd71c..4b5fbcbf4c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,7 +8,10 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) +<<<<<<< HEAD - [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) @@ -237,6 +240,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| |401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| +<<<<<<< HEAD ## Troubleshoot persistent token caching issues ### macOS @@ -260,6 +264,8 @@ Try `go build` again with `CGO_ENABLED=1`. You may need to install native build macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like `persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Get additional help Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 40a94154c6..3d09bbc60f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -42,8 +42,11 @@ const ( developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" +<<<<<<< HEAD scopeLogFmt = "%s.GetToken() acquired a token for scope %q" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) traceNamespace = "Microsoft.Entra" traceOpGetToken = "GetToken" traceOpAuthenticate = "Authenticate" @@ -105,6 +108,7 @@ func resolveAdditionalTenants(tenants []string) []string { return cp } +<<<<<<< HEAD // resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't // have an explicitly configured tenant and the caller didn't specify a tenant for the token request. // @@ -115,6 +119,9 @@ func resolveAdditionalTenants(tenants []string) []string { // - credName: name of the calling credential type; for error messages // - additionalTenants: optional allow list of tenants the credential may acquire tokens from in // addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option +======= +// resolveTenant returns the correct tenant for a token request +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { if specified == "" || specified == defaultTenant { return defaultTenant, nil @@ -130,6 +137,7 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } +<<<<<<< HEAD if len(additionalTenants) == 0 { switch defaultTenant { case "", organizationsTenantID: @@ -141,6 +149,8 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index e2f371cfd8..bfd5c63dc9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -30,9 +30,15 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { +<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. +======= + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index 46d0b55192..9a2974c094 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -30,9 +30,15 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) // AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. type AzureDeveloperCLICredentialOptions struct { +<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. +======= + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 82342a0254..27bf43409e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -27,10 +27,14 @@ type ChainedTokenCredentialOptions struct { } // ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +<<<<<<< HEAD // it tries all the credentials until one authenticates, after which it always uses that credential. For more information, // see [ChainedTokenCredential overview]. // // [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview +======= +// it tries all the credentials until one authenticates, after which it always uses that credential. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ChainedTokenCredential struct { cond *sync.Cond iterating bool @@ -49,9 +53,12 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil return nil, errors.New("sources cannot contain nil") } +<<<<<<< HEAD if mc, ok := source.(*ManagedIdentityCredential); ok { mc.mic.chained = true } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } cp := make([]azcore.TokenCredential, len(sources)) copy(cp, sources) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index c3af0cdc2d..acc445e053 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,16 +26,38 @@ extends: parameters: CloudConfig: Public: +<<<<<<< HEAD SubscriptionConfigurations: - $(sub-config-identity-test-resources) EnableRaceDetector: true Location: westus2 +======= + ServiceConnection: azure-sdk-tests + SubscriptionConfigurationFilePaths: + - eng/common/TestResources/sub-config/AzurePublicMsft.json + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + - $(sub-config-identity-test-resources) + EnableRaceDetector: true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: +<<<<<<< HEAD PersistOidcToken: true +======= + PreSteps: + - task: AzureCLI@2 + displayName: Set OIDC token + inputs: + addSpnToEnvironment: true + azureSubscription: azure-sdk-tests + inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" + scriptLocation: inlineScript + scriptType: pscore +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 92f508094d..386fa08fbf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -115,7 +115,11 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { +<<<<<<< HEAD msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", ")) +======= + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) log.Write(EventAuthentication, msg) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 14af271f6a..7b4de159e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -23,6 +23,7 @@ type DefaultAzureCredentialOptions struct { // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions +<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. @@ -30,12 +31,21 @@ type DefaultAzureCredentialOptions struct { // AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string +======= + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add + // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be + // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + AdditionallyAllowedTenants []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } @@ -43,7 +53,11 @@ type DefaultAzureCredentialOptions struct { // DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by // combining credentials used in Azure hosting environments and credentials used in local development. In // production, it's better to use a specific credential type so authentication is more predictable and easier +<<<<<<< HEAD // to debug. For more information, see [DefaultAzureCredential overview]. +======= +// to debug. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, // stopping when one provides a token: @@ -59,8 +73,11 @@ type DefaultAzureCredentialOptions struct { // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. +<<<<<<< HEAD // // [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type DefaultAzureCredential struct { chain *ChainedTokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 53ae9767f4..fe96597bf6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -21,9 +21,14 @@ const credNameDeviceCode = "DeviceCodeCredential" type DeviceCodeCredentialOptions struct { azcore.ClientOptions +<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. +======= + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index ec89de9b5b..0147e69d31 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -20,9 +20,14 @@ const credNameBrowser = "InteractiveBrowserCredential" type InteractiveBrowserCredentialOptions struct { azcore.ClientOptions +<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. +======= + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire + // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index cc07fd7015..031f2f685a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -65,9 +65,12 @@ type managedIdentityClient struct { id ManagedIDKind msiType msiType probeIMDS bool +<<<<<<< HEAD // chained indicates whether the client is part of a credential chain. If true, the client will return // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response. chained bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // arcKeyDirectory returns the directory expected to contain Azure Arc keys @@ -147,7 +150,11 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { if options.ID != nil { +<<<<<<< HEAD return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") +======= + return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } env = "Service Fabric" c.endpoint = endpoint @@ -218,7 +225,10 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block if c.probeIMDS { +<<<<<<< HEAD // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) @@ -226,14 +236,32 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi if err != nil { return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } +<<<<<<< HEAD if _, err = c.azClient.Pipeline().Do(req); err != nil { +======= + res, err := c.azClient.Pipeline().Do(req) + if err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } +<<<<<<< HEAD // send normal token requests from now on because something responded +======= + // because IMDS always responds with JSON, assume a non-JSON response is from something else, such + // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating + b, err := azruntime.Payload(res) + if err != nil { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) + } + if !json.Valid(b) { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") + } + // send normal token requests from now on because IMDS responded +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.probeIMDS = false } @@ -248,6 +276,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { +<<<<<<< HEAD tk, err := c.createAccessToken(resp) if err != nil && c.chained && c.msiType == msiTypeIMDS { // failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at @@ -255,14 +284,20 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi err = newCredentialUnavailableError(credNameManagedIdentity, err.Error()) } return tk, err +======= + return c.createAccessToken(resp) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if c.msiType == msiTypeIMDS { switch resp.StatusCode { case http.StatusBadRequest: if id != nil { +<<<<<<< HEAD // return authenticationFailedError, halting any encompassing credential chain, // because the explicit user-assigned identity implies the developer expected this to work +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" @@ -278,6 +313,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } +<<<<<<< HEAD if c.chained { // the response may be from something other than IMDS, for example a proxy returning // 404. Return credentialUnavailableError so credential chains continue to their @@ -285,6 +321,8 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi err = newAuthenticationFailedError(credNameManagedIdentity, "", resp) return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) @@ -299,7 +337,11 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { +<<<<<<< HEAD return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res) +======= + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if value.ExpiresIn != "" { expiresIn, err := json.Number(value.ExpiresIn).Int64() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index ef5e4d7212..5493cdbb4f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -154,7 +154,16 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti if p.opts.DisableAutomaticAuthentication { return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } +<<<<<<< HEAD return p.reqToken(ctx, client, tro) +======= + at, err := p.reqToken(ctx, client, tro) + if err == nil { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return at, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. @@ -237,8 +246,11 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { +<<<<<<< HEAD msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", ")) log.Write(EventAuthentication, msg) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.record, err = newAuthenticationRecord(ar) } else { err = newAuthenticationFailedErrorFromMSAL(p.name, err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index efa8c6d3eb..93766f2171 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -7,10 +7,13 @@ param ( [hashtable] $AdditionalParameters = @{}, [hashtable] $DeploymentOutputs, +<<<<<<< HEAD [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $SubscriptionId, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $TenantId, @@ -19,10 +22,13 @@ param ( [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] [string] $TestApplicationId, +<<<<<<< HEAD [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $Environment, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments @@ -36,9 +42,14 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } +<<<<<<< HEAD az cloud set -n $Environment az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId az account set --subscription $SubscriptionId +======= + az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId + az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } Write-Host "Building container" @@ -71,9 +82,12 @@ $aciName = "azidentity-test" az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` +<<<<<<< HEAD --cpu 1 ` --memory 1.0 ` --os-type Linux ` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 88c1078a72..5e8d2be4dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,9 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. +<<<<<<< HEAD version = "v1.8.1" +======= + version = "v1.8.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md index d99490438c..63e8b52886 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md @@ -1,5 +1,6 @@ # Release History +<<<<<<< HEAD ## 1.3.0 (2024-11-06) ### Features Added @@ -12,6 +13,8 @@ * Client requests tokens from the Vault's tenant, overriding any credential default (thanks @francescomari) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.1.0 (2024-02-13) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json index 27217d4ff7..908b7c6bb0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json @@ -2,5 +2,9 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/security/keyvault/azkeys", +<<<<<<< HEAD "Tag": "go/security/keyvault/azkeys_d53919c433" +======= + "Tag": "go/security/keyvault/azkeys_2d421aec6c" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml index 75586518a6..15ff7bf9da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml @@ -21,13 +21,19 @@ pr: include: - sdk/security/keyvault/azkeys +<<<<<<< HEAD extends: template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +======= +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) parameters: TimeoutInMinutes: 120 ServiceDirectory: 'security/keyvault/azkeys' RunLiveTests: true UsePipelineProxy: false +<<<<<<< HEAD SupportedClouds: 'Public,UsGov,China' CloudConfig: Public: @@ -45,3 +51,15 @@ extends: Path: sdk/security/keyvault/azkeys/platform-matrix.json Selection: sparse GenerateVMJobs: true +======= + AdditionalMatrixConfigs: + - Name: keyvault_test_matrix_addons + Path: sdk/security/keyvault/azkeys/platform-matrix.json + Selection: sparse + GenerateVMJobs: true + + # Due to the high cost of Managed HSMs, we only want to test using them weekly. + ${{ if not(contains(variables['Build.DefinitionName'], 'tests-weekly')) }}: + MatrixFilters: + - ArmTemplateParameters=^(?!.*enableHsm.*true) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go index 350ef41c42..60303f1f30 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go @@ -38,10 +38,13 @@ func NewClient(vaultURL string, credential azcore.TokenCredential, options *Clie }, ) azcoreClient, err := azcore.NewClient(moduleName, version, runtime.PipelineOptions{ +<<<<<<< HEAD APIVersion: runtime.APIVersionOptions{ Location: runtime.APIVersionLocationQueryParam, Name: "api-version", }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PerRetry: []policy.Policy{authPolicy}, Tracing: runtime.TracingOptions{ Namespace: "Microsoft.KeyVault", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json index a2a6f970f0..6ee85a1f5d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json @@ -6,8 +6,13 @@ { "Agent": { "ubuntu-20.04": { +<<<<<<< HEAD "OSVmImage": "env:LINUXVMIMAGE", "Pool": "env:LINUXPOOL" +======= + "OSVmImage": "MMSUbuntu20.04", + "Pool": "azsdk-pool-mms-ubuntu-2004-general" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }, "ArmTemplateParameters": "@{ enableHsm = $true }", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go index ca94011745..881dd80905 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go @@ -8,5 +8,9 @@ package azkeys const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys" +<<<<<<< HEAD version = "v1.3.0" +======= + version = "v1.1.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md index 873368aa1a..452110f6b4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md @@ -1,5 +1,6 @@ # Release History +<<<<<<< HEAD ## 1.1.0 (2024-10-21) ### Features Added @@ -11,6 +12,8 @@ ### Other Changes * Upgraded dependencies +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.0.0 (2023-08-15) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go index 408ae052b3..95899dbc80 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go @@ -17,6 +17,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +<<<<<<< HEAD +======= + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` @@ -30,7 +34,13 @@ type KeyVaultChallengePolicyOptions struct { type keyVaultAuthorizer struct { // tro is the policy's authentication parameters. These are discovered from an authentication challenge // elicited ahead of the first client request. +<<<<<<< HEAD tro policy.TokenRequestOptions +======= + tro policy.TokenRequestOptions + // TODO: move into tro once it has a tenant field (https://github.com/Azure/azure-sdk-for-go/issues/19841) + tenantID string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) verifyChallengeResource bool } @@ -55,7 +65,11 @@ func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChall } func (k *keyVaultAuthorizer) authorize(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { +<<<<<<< HEAD if len(k.tro.Scopes) == 0 || k.tro.TenantID == "" { +======= + if len(k.tro.Scopes) == 0 || k.tenantID == "" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if body := req.Body(); body != nil { // We don't know the scope or tenant ID because we haven't seen a challenge yet. We elicit one now by sending // the request without authorization, first removing its body, if any. authorizeOnChallenge will reattach the @@ -102,11 +116,36 @@ func parseTenant(url string) string { return tenant } +<<<<<<< HEAD +======= +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // updateTokenRequestOptions parses authentication parameters from Key Vault's challenge func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req *http.Request) error { authHeader := resp.Header.Get("WWW-Authenticate") if authHeader == "" { +<<<<<<< HEAD return errors.New("response has no WWW-Authenticate header for challenge authentication") +======= + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Strip down to auth and resource @@ -126,7 +165,11 @@ func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req } } +<<<<<<< HEAD k.tro.TenantID = parseTenant(vals["authorization"]) +======= + k.tenantID = parseTenant(vals["authorization"]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) scope := "" if v, ok := vals["scope"]; ok { scope = v @@ -134,16 +177,27 @@ func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req scope = v } if scope == "" { +<<<<<<< HEAD return errors.New("could not find a valid resource in the WWW-Authenticate header") +======= + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if k.verifyChallengeResource { // the challenge resource's host must match the requested vault's host parsed, err := url.Parse(scope) if err != nil { +<<<<<<< HEAD return fmt.Errorf("invalid challenge resource %q: %v", scope, err) } if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { return fmt.Errorf(challengeMatchError, scope) +======= + return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } if !strings.HasSuffix(scope, "/.default") { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml new file mode 100644 index 0000000000..2f8b8e1a87 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'security/keyvault/internal' + RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go index 5a037978fa..c5b083b854 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go @@ -7,5 +7,9 @@ package internal const ( +<<<<<<< HEAD version = "v1.1.0" //nolint +======= + version = "v1.0.0" //nolint +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 57d0e2777e..14c7c62239 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -18,8 +18,11 @@ import ( "encoding/pem" "errors" "fmt" +<<<<<<< HEAD "os" "strings" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" @@ -317,21 +320,31 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client if err != nil { return Client{}, err } +<<<<<<< HEAD autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION") +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) opts := clientOptions{ authority: authority, // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache disableInstanceDiscovery: cred.tokenProvider != nil, httpClient: shared.DefaultClient, +<<<<<<< HEAD azureRegion: autoEnabledRegion, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, o := range options { o(&opts) } +<<<<<<< HEAD if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") { opts.azureRegion = "" } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) baseOpts := []base.Option{ base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index e473d1267d..7a3a03fb6d 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -89,6 +89,7 @@ type AuthResult struct { ExpiresOn time.Time GrantedScopes []string DeclinedScopes []string +<<<<<<< HEAD Metadata AuthResultMetadata } @@ -106,6 +107,10 @@ const ( Cache TokenSource = 2 ) +======= +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { if err := storageTokenResponse.AccessToken.Validate(); err != nil { @@ -124,6 +129,7 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) } } +<<<<<<< HEAD return AuthResult{ Account: account, IDToken: idToken, @@ -135,6 +141,9 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu TokenSource: Cache, }, }, nil +======= + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAuthResult creates an AuthResult. @@ -148,9 +157,12 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco AccessToken: tokenResponse.AccessToken, ExpiresOn: tokenResponse.ExpiresOn.T, GrantedScopes: tokenResponse.GrantedScopes.Slice, +<<<<<<< HEAD Metadata: AuthResultMetadata{ TokenSource: IdentityProvider, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 2134e57c9e..3ae7ffec84 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -18,6 +18,13 @@ import ( ) const addField = "AdditionalFields" +<<<<<<< HEAD +======= +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( leftBrace = []byte("{")[0] @@ -102,6 +109,7 @@ func delimIs(got json.Token, want rune) bool { // hasMarshalJSON will determine if the value or a pointer to this value has // the MarshalJSON method. func hasMarshalJSON(v reflect.Value) bool { +<<<<<<< HEAD ok := false if _, ok = v.Interface().(json.Marshaler); !ok { var i any @@ -113,16 +121,43 @@ func hasMarshalJSON(v reflect.Value) bool { _, ok = i.(json.Marshaler) } return ok +======= + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. // This will panic if the method is not defined. func callMarshalJSON(v reflect.Value) ([]byte, error) { +<<<<<<< HEAD if marsh, ok := v.Interface().(json.Marshaler); ok { +======= + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return marsh.MarshalJSON() } if v.Kind() == reflect.Ptr { +<<<<<<< HEAD if marsh, ok := v.Elem().Interface().(json.Marshaler); ok { return marsh.MarshalJSON() } @@ -134,6 +169,20 @@ func callMarshalJSON(v reflect.Value) ([]byte, error) { } } +======= + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) } @@ -148,8 +197,17 @@ func hasUnmarshalJSON(v reflect.Value) bool { v = v.Addr() } +<<<<<<< HEAD _, ok := v.Interface().(json.Unmarshaler) return ok +======= + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // hasOmitEmpty indicates if the field has instructed us to not output diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index fda5d7dd33..f2291a2fe7 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -7,7 +7,10 @@ package local import ( "context" "fmt" +<<<<<<< HEAD "html" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "strconv" @@ -142,7 +145,11 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { +<<<<<<< HEAD desc := html.EscapeString(q.Get("error_description")) +======= + desc := q.Get("error_description") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index e065313444..c139a64e63 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -10,8 +10,11 @@ import ( "io" "time" +<<<<<<< HEAD "github.com/google/uuid" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" @@ -20,6 +23,10 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +<<<<<<< HEAD +======= + "github.com/google/uuid" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -332,7 +339,11 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) if err != nil { +<<<<<<< HEAD return fmt.Errorf("unable to resolve an endpoint: %w", err) +======= + return fmt.Errorf("unable to resolve an endpoint: %s", err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } authParams.Endpoints = endpoints return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index c3c4a96fc3..a191ba9f69 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -23,7 +23,11 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" +<<<<<<< HEAD aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance" +======= + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" @@ -47,12 +51,22 @@ type jsonCaller interface { } var aadTrustedHostList = map[string]bool{ +<<<<<<< HEAD "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list "login.partner.microsoftonline.cn": true, // Microsoft Azure China "login.microsoftonline.de": true, // Microsoft Azure Blackforest "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy "login.microsoftonline.us": true, // Microsoft Azure US Government "login.microsoftonline.com": true, // Microsoft Azure Worldwide +======= + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TrustedHost checks if an AAD host is trusted/valid. @@ -136,12 +150,17 @@ const ( const ( AAD = "MSSTS" ADFS = "ADFS" +<<<<<<< HEAD DSTS = "DSTS" ) // DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it. const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f" +======= +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. type AuthenticationScheme interface { // Extra parameters that are added to the request to the /token endpoint. @@ -239,6 +258,7 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { // - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint // - the resulting authority URL is invalid func (p AuthParams) WithTenant(ID string) (AuthParams, error) { +<<<<<<< HEAD if ID == "" || ID == p.AuthorityInfo.Tenant { return p, nil } @@ -259,6 +279,25 @@ func (p AuthParams) WithTenant(ID string) (AuthParams, error) { return p, errors.New("dSTS authority doesn't support tenants") } +======= + switch ID { + case "", p.AuthorityInfo.Tenant: + // keep the default tenant because the caller didn't override it + return p, nil + case "common", "consumers", "organizations": + if p.AuthorityInfo.AuthorityType == AAD { + return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) + } + // else we'll return a better error below + } + if p.AuthorityInfo.AuthorityType != AAD { + return p, errors.New("the authority doesn't support tenants") + } + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) if err == nil { info.Region = p.AuthorityInfo.Region @@ -350,12 +389,17 @@ type Info struct { Host string CanonicalAuthorityURI string AuthorityType string +<<<<<<< HEAD +======= + UserRealmURIPrefix string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ValidateAuthority bool Tenant string Region string InstanceDiscoveryDisabled bool } +<<<<<<< HEAD // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { @@ -394,13 +438,44 @@ func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceD } authorityType = DSTS tenant = DSTSTenant +======= +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { + u, err := url.Parse(strings.ToLower(authority)) + if err != nil || u.Scheme != "https" { + return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + } + + tenant, err := firstPathSegment(u) + if err != nil { + return Info{}, err + } + authorityType := AAD + if tenant == "adfs" { + authorityType = ADFS +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // u.Host includes the port, if any, which is required for private cloud deployments return Info{ Host: u.Host, +<<<<<<< HEAD CanonicalAuthorityURI: cannonicalAuthority, AuthorityType: authorityType, +======= + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ValidateAuthority: validateAuthority, Tenant: tenant, InstanceDiscoveryDisabled: instanceDiscoveryDisabled, @@ -544,7 +619,11 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I discoveryHost = authorityInfo.Host } +<<<<<<< HEAD endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost) +======= + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) } return resp, err @@ -562,6 +641,7 @@ func detectRegion(ctx context.Context) string { client := http.Client{ Timeout: time.Duration(2 * time.Second), } +<<<<<<< HEAD req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil) req.Header.Set("Metadata", "true") resp, err := client.Do(req) @@ -575,6 +655,19 @@ func detectRegion(ctx context.Context) string { return "" } } +======= + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) response, err := io.ReadAll(resp.Body) if err != nil { return "" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index d62aac74eb..d431ea97f4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -18,11 +18,18 @@ import ( "strings" "time" +<<<<<<< HEAD "github.com/google/uuid" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" +======= + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // HTTPClient represents an HTTP client. @@ -71,6 +78,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea unmarshal = customJSON.Unmarshal } +<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil) if err != nil { return fmt.Errorf("could not create request: %w", err) @@ -78,6 +86,17 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea addStdHeaders(headers) req.Header = headers +======= + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if body != nil { // Note: In case your wondering why we are not gzip encoding.... diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 4030ec8d8f..812d17accb 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -18,6 +18,12 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" ) +<<<<<<< HEAD +======= +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type cacheEntry struct { Endpoints authority.Endpoints ValidForDomainsInList map[string]bool @@ -48,7 +54,11 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo return endpoints, nil } +<<<<<<< HEAD endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo) +======= + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return authority.Endpoints{}, err } @@ -80,7 +90,11 @@ func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPr defer m.mu.Unlock() if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { +<<<<<<< HEAD if authorityInfo.AuthorityType == authority.ADFS { +======= + if authorityInfo.AuthorityType == ADFS { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) domain, err := adfsDomainFromUpn(userPrincipalName) if err == nil { if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { @@ -99,7 +113,11 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use updatedCacheEntry := createcacheEntry(endpoints) +<<<<<<< HEAD if authorityInfo.AuthorityType == authority.ADFS { +======= + if authorityInfo.AuthorityType == ADFS { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Since we're here, we've made a call to the backend. We want to ensure we're caching // the latest values from the server. if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { @@ -116,12 +134,18 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry } +<<<<<<< HEAD func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) { if authorityInfo.AuthorityType == authority.ADFS { return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil } else if authorityInfo.AuthorityType == authority.DSTS { return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil +======= +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) if err != nil { @@ -134,6 +158,10 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut return "", err } return resp.TenantDiscoveryEndpoint, nil +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go index 9ce7d96fe4..73d3ecca45 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go @@ -14,8 +14,11 @@ package gcp +<<<<<<< HEAD import "context" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const ( // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules // for the environment variables available in GAE environments. @@ -69,7 +72,11 @@ func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, err // AppEngineStandardAvailabilityZone returns the zone the app engine service is running in. func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) { +<<<<<<< HEAD return d.metadata.ZoneWithContext(context.TODO()) +======= + return d.metadata.Zone() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AppEngineStandardCloudRegion returns the region the app engine service is running in. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go index 4eac3c74b6..6b0affdbd9 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go @@ -15,10 +15,15 @@ package gcp import ( +<<<<<<< HEAD "context" "errors" "os" "strings" +======= + "errors" + "os" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "cloud.google.com/go/compute/metadata" ) @@ -70,6 +75,7 @@ func (d *Detector) CloudPlatform() Platform { // ProjectID returns the ID of the project in which this program is running. func (d *Detector) ProjectID() (string, error) { +<<<<<<< HEAD // N.B. d.metadata.ProjectIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable. s, err := d.metadata.GetWithContext(context.TODO(), "project/project-id") return strings.TrimSpace(s), err @@ -80,14 +86,36 @@ func (d *Detector) instanceID() (string, error) { // N.B. d.metadata.InstanceIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable. s, err := d.metadata.GetWithContext(context.TODO(), "instance/id") return strings.TrimSpace(s), err +======= + return d.metadata.ProjectID() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Detector collects resource information for all GCP platforms. type Detector struct { +<<<<<<< HEAD metadata *metadata.Client os osProvider } +======= + metadata metadataProvider + os osProvider +} + +// metadataProvider contains the subset of the metadata.Client functions used +// by this resource Detector to allow testing with a fake implementation. +type metadataProvider interface { + ProjectID() (string, error) + InstanceID() (string, error) + Get(string) (string, error) + InstanceName() (string, error) + Hostname() (string, error) + Zone() (string, error) + InstanceAttributeValue(string) (string, error) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // osProvider contains the subset of the os package functions used by. type osProvider interface { LookupEnv(string) (string, bool) diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go index f137b1fae6..f5c9f408de 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go @@ -15,7 +15,10 @@ package gcp import ( +<<<<<<< HEAD "context" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" ) @@ -90,7 +93,11 @@ func (d *Detector) CloudRunJobTaskIndex() (string, error) { // FaaSID returns the instance id of the Cloud Run or Cloud Function. func (d *Detector) FaaSID() (string, error) { +<<<<<<< HEAD return d.instanceID() +======= + return d.metadata.InstanceID() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // FaaSCloudRegion detects region from the metadata server. @@ -98,7 +105,11 @@ func (d *Detector) FaaSID() (string, error) { // // https://cloud.google.com/run/docs/reference/container-contract#metadata-server func (d *Detector) FaaSCloudRegion() (string, error) { +<<<<<<< HEAD region, err := d.metadata.GetWithContext(context.TODO(), regionMetadataAttr) +======= + region, err := d.metadata.Get(regionMetadataAttr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go index 794cfdf036..27ffa71b31 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go @@ -15,6 +15,7 @@ package gcp import ( +<<<<<<< HEAD "context" "fmt" "regexp" @@ -32,42 +33,78 @@ const createdByInstanceAttr = "created-by" func (d *Detector) onGCE() bool { _, err := d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr) +======= + "fmt" + "strings" +) + +// See the available GCE instance metadata: +// https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata +const machineTypeMetadataAttr = "instance/machine-type" + +func (d *Detector) onGCE() bool { + _, err := d.metadata.Get(machineTypeMetadataAttr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err == nil } // GCEHostType returns the machine type of the instance on which this program is running. func (d *Detector) GCEHostType() (string, error) { +<<<<<<< HEAD return d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr) +======= + return d.metadata.Get(machineTypeMetadataAttr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEHostID returns the instance ID of the instance on which this program is running. func (d *Detector) GCEHostID() (string, error) { +<<<<<<< HEAD return d.instanceID() +======= + return d.metadata.InstanceID() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEHostName returns the instance name of the instance on which this program is running. // Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which // value is returned. func (d *Detector) GCEHostName() (string, error) { +<<<<<<< HEAD return d.metadata.InstanceNameWithContext(context.TODO()) +======= + return d.metadata.InstanceName() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEInstanceName returns the instance name of the instance on which this program is running. // This is the value visible in the Cloud Console UI, and the prefix for the default hostname // of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). func (d *Detector) GCEInstanceName() (string, error) { +<<<<<<< HEAD return d.metadata.InstanceNameWithContext(context.TODO()) +======= + return d.metadata.InstanceName() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEInstanceHostname returns the full value of the default or custom hostname of the instance // on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm. func (d *Detector) GCEInstanceHostname() (string, error) { +<<<<<<< HEAD return d.metadata.HostnameWithContext(context.TODO()) +======= + return d.metadata.Hostname() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running. func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) { +<<<<<<< HEAD zone, err := d.metadata.ZoneWithContext(context.TODO()) +======= + zone, err := d.metadata.Zone() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", "", err } @@ -80,6 +117,7 @@ func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) { } return zone, strings.Join(splitZone[0:2], "-"), nil } +<<<<<<< HEAD type ManagedInstanceGroup struct { Name string @@ -115,3 +153,5 @@ func (d *Detector) GCEManagedInstanceGroup() (ManagedInstanceGroup, error) { } return mig, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go index 734d44cc03..1b3cc5f14e 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go @@ -15,7 +15,10 @@ package gcp import ( +<<<<<<< HEAD "context" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "strings" ) @@ -32,6 +35,7 @@ const ( ) func (d *Detector) onGKE() bool { +<<<<<<< HEAD // Check if we are on k8s first _, found := d.os.LookupEnv(k8sServiceHostEnv) if !found { @@ -41,6 +45,10 @@ func (d *Detector) onGKE() bool { // different managed k8s platform. _, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr) return err == nil +======= + _, found := d.os.LookupEnv(k8sServiceHostEnv) + return found +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GKEHostID returns the instance ID of the instance on which this program is running. @@ -50,7 +58,11 @@ func (d *Detector) GKEHostID() (string, error) { // GKEClusterName returns the name if the GKE cluster in which this program is running. func (d *Detector) GKEClusterName() (string, error) { +<<<<<<< HEAD return d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterNameMetadataAttr) +======= + return d.metadata.InstanceAttributeValue(clusterNameMetadataAttr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type LocationType int64 @@ -63,7 +75,11 @@ const ( // GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional. func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) { +<<<<<<< HEAD clusterLocation, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr) +======= + clusterLocation, err := d.metadata.InstanceAttributeValue(clusterLocationMetadataAttr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", UndefinedLocation, err } diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index 304edc3422..b82475c75a 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -39,11 +39,17 @@ var ( ) // semVerRegex is the regular expression used to parse a semantic version. +<<<<<<< HEAD // This is not the official regex from the semver spec. It has been modified to allow for loose handling // where versions like 2.1 are detected. const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` +======= +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Version represents a single semantic version. type Version struct { @@ -148,8 +154,13 @@ func NewVersion(v string) (*Version, error) { } sv := &Version{ +<<<<<<< HEAD metadata: m[5], pre: m[4], +======= + metadata: m[8], + pre: m[5], +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) original: v, } @@ -160,7 +171,11 @@ func NewVersion(v string) (*Version, error) { } if m[2] != "" { +<<<<<<< HEAD sv.minor, err = strconv.ParseUint(m[2], 10, 64) +======= + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -169,7 +184,11 @@ func NewVersion(v string) (*Version, error) { } if m[3] != "" { +<<<<<<< HEAD sv.patch, err = strconv.ParseUint(m[3], 10, 64) +======= + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -614,9 +633,13 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { +<<<<<<< HEAD if p == "" { return ErrInvalidMetadata } else if containsOnly(p, num) { +======= + if containsOnly(p, num) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -635,9 +658,13 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { +<<<<<<< HEAD if p == "" { return ErrInvalidMetadata } else if !containsOnly(p, allowed) { +======= + if !containsOnly(p, allowed) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ErrInvalidMetadata } } diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go index d558b9bd82..3bd95a182f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -49,16 +49,28 @@ func ShiftNBytesLeft(dst, x []byte, n int) { dst = append(dst, make([]byte, n/8)...) } +<<<<<<< HEAD // XorBytesMut replaces X with X XOR Y. len(X) must be >= len(Y). func XorBytesMut(X, Y []byte) { for i := 0; i < len(Y); i++ { +======= +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) X[i] ^= Y[i] } } +<<<<<<< HEAD // XorBytes puts X XOR Y into Z. len(Z) and len(X) must be >= len(Y). func XorBytes(Z, X, Y []byte) { for i := 0; i < len(Y); i++ { +======= +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Z[i] = X[i] ^ Y[i] } } diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go index 24f893017b..f48d80b06e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -18,9 +18,14 @@ import ( "crypto/cipher" "crypto/subtle" "errors" +<<<<<<< HEAD "math/bits" "github.com/ProtonMail/go-crypto/internal/byteutil" +======= + "github.com/ProtonMail/go-crypto/internal/byteutil" + "math/bits" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) type ocb struct { @@ -109,10 +114,15 @@ func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { if len(nonce) > o.nonceSize { panic("crypto/ocb: Incorrect nonce length given to OCB") } +<<<<<<< HEAD sep := len(plaintext) ret, out := byteutil.SliceForAppend(dst, sep+o.tagSize) tag := o.crypt(enc, out[:sep], nonce, adata, plaintext) copy(out[sep:], tag) +======= + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret } @@ -124,10 +134,19 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { return nil, ocbError("Ciphertext shorter than tag length") } sep := len(ciphertext) - o.tagSize +<<<<<<< HEAD ret, out := byteutil.SliceForAppend(dst, sep) ciphertextData := ciphertext[:sep] tag := o.crypt(dec, out, nonce, adata, ciphertextData) if subtle.ConstantTimeCompare(tag, ciphertext[sep:]) == 1 { +======= + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } for i := range out { @@ -137,8 +156,12 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { } // On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +<<<<<<< HEAD // function. It writes the resulting plain/ciphertext into Y and returns // the tag. +======= +// function. It returns the resulting plain/ciphertext with the tag appended. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { // // Consider X as a sequence of 128-bit blocks @@ -155,7 +178,11 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { truncatedNonce := make([]byte, len(nonce)) copy(truncatedNonce, nonce) truncatedNonce[len(truncatedNonce)-1] &= 192 +<<<<<<< HEAD var Ktop []byte +======= + Ktop := make([]byte, blockSize) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { Ktop = o.reusableKtop.Ktop } else { @@ -195,6 +222,7 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) blockX := X[i*blockSize : (i+1)*blockSize] blockY := Y[i*blockSize : (i+1)*blockSize] +<<<<<<< HEAD switch instruction { case enc: byteutil.XorBytesMut(checksum, blockX) @@ -203,6 +231,15 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { byteutil.XorBytesMut(blockY, offset) case dec: byteutil.XorBytes(blockY, blockX, offset) +======= + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) o.block.Decrypt(blockY, blockY) byteutil.XorBytesMut(blockY, offset) byteutil.XorBytesMut(checksum, blockY) @@ -218,6 +255,7 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { o.block.Encrypt(pad, offset) chunkX := X[blockSize*m:] chunkY := Y[blockSize*m : len(X)] +<<<<<<< HEAD switch instruction { case enc: byteutil.XorBytesMut(checksum, chunkX) @@ -236,6 +274,33 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { o.block.Encrypt(tag, tag) byteutil.XorBytesMut(tag, o.hash(adata)) return tag[:o.tagSize] +======= + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // This hash function is used to compute the tag. Per design, on empty input it diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index e0a677f284..dd7b4793ff 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -23,7 +23,11 @@ import ( // Headers // // base64-encoded Bytes +<<<<<<< HEAD // '=' base64 encoded checksum (optional) not checked anymore +======= +// '=' base64 encoded checksum +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // -----END Type----- // // where Headers is a possibly empty sequence of Key: Value lines. @@ -40,15 +44,47 @@ type Block struct { var ArmorCorrupt error = errors.StructuralError("armor invalid") +<<<<<<< HEAD +======= +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var armorStart = []byte("-----BEGIN ") var armorEnd = []byte("-----END ") var armorEndOfLine = []byte("-----") +<<<<<<< HEAD // lineReader wraps a line based reader. It watches for the end of an armor block type lineReader struct { in *bufio.Reader buf []byte eof bool +======= +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -77,9 +113,32 @@ func (l *lineReader) Read(p []byte) (n int, err error) { if len(line) == 5 && line[0] == '=' { // This is the checksum line +<<<<<<< HEAD // Don't check the checksum l.eof = true +======= + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return 0, io.EOF } @@ -100,14 +159,33 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return } +<<<<<<< HEAD // openpgpReader passes Read calls to the underlying base64 decoder. type openpgpReader struct { lReader *lineReader b64Reader io.Reader +======= +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) +<<<<<<< HEAD +======= + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -175,6 +253,10 @@ TryNextBlock: } p.lReader.in = r +<<<<<<< HEAD +======= + p.oReader.currentCRC = crc24Init +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.oReader.lReader = &p.lReader p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) p.Body = &p.oReader diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index 550efddf05..fb34c055cc 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -7,7 +7,10 @@ package armor import ( "encoding/base64" "io" +<<<<<<< HEAD "sort" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var armorHeaderSep = []byte(": ") @@ -15,6 +18,7 @@ var blockEnd = []byte("\n=") var newline = []byte("\n") var armorEndOfLineOut = []byte("-----\n") +<<<<<<< HEAD const crc24Init = 0xb704ce const crc24Poly = 0x1864cfb @@ -32,6 +36,8 @@ func crc24(crc uint32, d []byte) uint32 { return crc } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // writeSlices writes its arguments to the given Writer. func writeSlices(out io.Writer, slices ...[]byte) (err error) { for _, s := range slices { @@ -117,6 +123,7 @@ func (l *lineBreaker) Close() (err error) { // // encoding -> base64 encoder -> lineBreaker -> out type encoding struct { +<<<<<<< HEAD out io.Writer breaker *lineBreaker b64 io.WriteCloser @@ -129,6 +136,17 @@ func (e *encoding) Write(data []byte) (n int, err error) { if e.crcEnabled { e.crc = crc24(e.crc, data) } +======= + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return e.b64.Write(data) } @@ -139,6 +157,7 @@ func (e *encoding) Close() (err error) { } e.breaker.Close() +<<<<<<< HEAD if e.crcEnabled { var checksumBytes [3]byte checksumBytes[0] = byte(e.crc >> 16) @@ -154,12 +173,29 @@ func (e *encoding) Close() (err error) { } func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) { +======= + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bType := []byte(blockType) err = writeSlices(out, armorStart, bType, armorEndOfLineOut) if err != nil { return } +<<<<<<< HEAD keys := make([]string, len(headers)) i := 0 for k := range headers { @@ -169,6 +205,10 @@ func encode(out io.Writer, blockType string, headers map[string]string, checksum sort.Strings(keys) for _, k := range keys { err = writeSlices(out, []byte(k), armorHeaderSep, []byte(headers[k]), newline) +======= + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -180,15 +220,23 @@ func encode(out io.Writer, blockType string, headers map[string]string, checksum } e := &encoding{ +<<<<<<< HEAD out: out, breaker: newLineBreaker(out, 64), blockType: bType, crc: crc24Init, crcEnabled: checksum, +======= + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) return e, nil } +<<<<<<< HEAD // Encode returns a WriteCloser which will encode the data written to it in // OpenPGP armor. @@ -204,3 +252,5 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) { return encode(out, blockType, headers, doChecksum) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go index 5b40e1375d..3a5f7a6ae4 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -30,12 +30,17 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { if c == '\r' { *s = 1 } else if c == '\n' { +<<<<<<< HEAD if _, err := cw.Write(buf[start:i]); err != nil { return 0, err } if _, err := cw.Write(newline); err != nil { return 0, err } +======= + cw.Write(buf[start:i]) + cw.Write(newline) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) start = i + 1 } case 1: @@ -43,9 +48,13 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { } } +<<<<<<< HEAD if _, err := cw.Write(buf[start:]); err != nil { return 0, err } +======= + cw.Write(buf[start:]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return len(buf), nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go index db8fb163b6..b59e9e3ce8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -163,9 +163,19 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { return nil, err } +<<<<<<< HEAD if _, err := param.Write(fingerprint[:]); err != nil { return nil, err } +======= + // For v5 keys, the 20 leftmost octets of the fingerprint are used. + if _, err := param.Write(fingerprint[:20]); err != nil { + return nil, err + } + if param.Len()-len(curveOID) != 45 { + return nil, errors.New("ecdh: malformed KDF Param") + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); h := pub.KDF.Hash.New() diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go index 0eb3937b39..b978e62223 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -9,6 +9,7 @@ import ( "strconv" ) +<<<<<<< HEAD var ( // ErrDecryptSessionKeyParsing is a generic error message for parsing errors in decrypted data // to reduce the risk of oracle attacks. @@ -21,6 +22,8 @@ var ( ErrMDCMissing error = SignatureError("MDC packet not found") ) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A StructuralError is returned when OpenPGP data is found to be syntactically // invalid. type StructuralError string @@ -29,6 +32,7 @@ func (s StructuralError) Error() string { return "openpgp: invalid data: " + string(s) } +<<<<<<< HEAD // A DecryptWithSessionKeyError is returned when a failure occurs when reading from symmetrically decrypted data or // an authentication tag verification fails. // Such an error indicates that the supplied session key is likely wrong or the data got corrupted. @@ -57,6 +61,8 @@ func HandleSensitiveParsingError(err error, decrypted bool) error { return ErrDecryptSessionKeyParsing } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // UnsupportedError indicates that, although the OpenPGP data is valid, it // makes use of currently unimplemented features. type UnsupportedError string @@ -81,6 +87,12 @@ func (b SignatureError) Error() string { return "openpgp: invalid signature: " + string(b) } +<<<<<<< HEAD +======= +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type signatureExpiredError int func (se signatureExpiredError) Error() string { @@ -95,6 +107,7 @@ func (ke keyExpiredError) Error() string { return "openpgp: key expired" } +<<<<<<< HEAD var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0) type signatureOlderThanKeyError int @@ -103,6 +116,8 @@ func (ske signatureOlderThanKeyError) Error() string { return "openpgp: signature is older than the key" } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ErrKeyExpired error = keyExpiredError(0) type keyIncorrectError int @@ -137,24 +152,30 @@ func (keyRevokedError) Error() string { var ErrKeyRevoked error = keyRevokedError(0) +<<<<<<< HEAD type WeakAlgorithmError string func (e WeakAlgorithmError) Error() string { return "openpgp: weak algorithms are rejected: " + string(e) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type UnknownPacketTypeError uint8 func (upte UnknownPacketTypeError) Error() string { return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) } +<<<<<<< HEAD type CriticalUnknownPacketTypeError uint8 func (upte CriticalUnknownPacketTypeError) Error() string { return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AEADError indicates that there is a problem when initializing or using a // AEAD instance, configuration struct, nonces or index values. type AEADError string @@ -171,6 +192,7 @@ type ErrDummyPrivateKey string func (dke ErrDummyPrivateKey) Error() string { return "openpgp: s2k GNU dummy key: " + string(dke) } +<<<<<<< HEAD // ErrMalformedMessage results when the packet sequence is incorrect type ErrMalformedMessage string @@ -178,3 +200,5 @@ type ErrMalformedMessage string func (dke ErrMalformedMessage) Error() string { return "openpgp: malformed message " + string(dke) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go index c76a75bcda..a19f26e8b5 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -51,14 +51,34 @@ func (sk CipherFunction) Id() uint8 { return uint8(sk) } +<<<<<<< HEAD // KeySize returns the key size, in bytes, of cipher. func (cipher CipherFunction) KeySize() int { switch cipher { +======= +var keySizeByID = map[uint8]int{ + TripleDES.Id(): 24, + CAST5.Id(): cast5.KeySize, + AES128.Id(): 16, + AES192.Id(): 24, + AES256.Id(): 32, +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case TripleDES: + return 24 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case CAST5: return cast5.KeySize case AES128: return 16 +<<<<<<< HEAD case AES192, TripleDES: +======= + case AES192: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return 24 case AES256: return 32 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go index 0da2d0d852..59afefc085 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -4,14 +4,20 @@ package ecc import ( "bytes" "crypto/elliptic" +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/bitcurves" "github.com/ProtonMail/go-crypto/brainpool" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" ) +<<<<<<< HEAD const Curve25519GenName = "Curve25519" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CurveInfo struct { GenName string Oid *encoding.OID @@ -45,19 +51,31 @@ var Curves = []CurveInfo{ }, { // Curve25519 +<<<<<<< HEAD GenName: Curve25519GenName, +======= + GenName: "Curve25519", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), Curve: NewCurve25519(), }, { +<<<<<<< HEAD // x448 +======= + // X448 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GenName: "Curve448", Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), Curve: NewX448(), }, { // Ed25519 +<<<<<<< HEAD GenName: Curve25519GenName, +======= + GenName: "Curve25519", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), Curve: NewEd25519(), }, diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go index 5a4c3a8596..9e67a05ddd 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -2,7 +2,10 @@ package ecc import ( +<<<<<<< HEAD "bytes" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/subtle" "io" @@ -91,6 +94,7 @@ func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { } func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { +<<<<<<< HEAD privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) if privateKeyCap >= privateKeyLen+publicKeyLen && @@ -99,6 +103,9 @@ func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { } return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) +======= + return append(privateKey, publicKey...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go index b6edda7480..9f0df9efc3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -2,7 +2,10 @@ package ecc import ( +<<<<<<< HEAD "bytes" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/subtle" "io" @@ -85,6 +88,7 @@ func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { } func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { +<<<<<<< HEAD privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) if privateKeyCap >= privateKeyLen+publicKeyLen && @@ -93,6 +97,9 @@ func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { } return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) +======= + return append(privateKey, publicKey...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 77213f66be..0c53a366c9 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -15,15 +15,21 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/packet" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a @@ -40,10 +46,15 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err return nil, err } primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) +<<<<<<< HEAD if config.V6() { if err := primary.UpgradeToV6(); err != nil { return nil, err } +======= + if config != nil && config.V5Keys { + primary.UpgradeToV5() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } e := &Entity{ @@ -51,6 +62,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err PrivateKey: primary, Identities: make(map[string]*Identity), Subkeys: []Subkey{}, +<<<<<<< HEAD Signatures: []*packet.Signature{}, } @@ -70,6 +82,11 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err } err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) +======= + } + + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -87,6 +104,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { creationTime := config.Now() keyLifetimeSecs := config.KeyLifetime() +<<<<<<< HEAD return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) } @@ -95,11 +113,38 @@ func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, selfSignature.CreationTime = creationTime selfSignature.KeyLifetimeSecs = &keyLifetimeSecs +======= + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + + isPrimaryId := len(t.Identities) == 0 + + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + selfSignature.CreationTime = creationTime + selfSignature.KeyLifetimeSecs = &keyLifetimeSecs + selfSignature.IsPrimaryId = &isPrimaryId +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) selfSignature.FlagsValid = true selfSignature.FlagSign = true selfSignature.FlagCertify = true selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 +<<<<<<< HEAD selfSignature.SEIPDv2 = advertiseAead +======= + selfSignature.SEIPDv2 = config.AEAD() != nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set the PreferredHash for the SelfSignature from the packet.Config. // If it is not the must-implement algorithm from rfc4880bis, append that. @@ -128,6 +173,7 @@ func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) } +<<<<<<< HEAD if advertiseAead { // Get the preferred AEAD mode from the packet.Config. // If it is not the must-implement algorithm from rfc9580, append that. @@ -166,6 +212,20 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c } } selfSignature.IsPrimaryId = &isPrimaryId +======= + // And for DefaultMode. + modes := []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeOCB { + modes = append(modes, uint8(packet.AEADModeOCB)) + } + + // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) + for _, cipher := range selfSignature.PreferredSymmetric { + for _, mode := range modes { + selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // User ID binding signature err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) @@ -193,10 +253,15 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error { } sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true +<<<<<<< HEAD if config.V6() { if err := sub.UpgradeToV6(); err != nil { return err } +======= + if config != nil && config.V5Keys { + sub.UpgradeToV5() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } subkey := Subkey{ @@ -240,10 +305,15 @@ func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Ti } sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true +<<<<<<< HEAD if config.V6() { if err := sub.UpgradeToV6(); err != nil { return err } +======= + if config != nil && config.V5Keys { + sub.UpgradeToV5() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } subkey := Subkey{ @@ -281,11 +351,14 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { } return rsa.GenerateKey(config.Random(), bits) case packet.PubKeyAlgoEdDSA: +<<<<<<< HEAD if config.V6() { // Implementations MUST NOT accept or generate v6 key material // using the deprecated OIDs. return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) curve := ecc.FindEdDSAByGenName(string(config.CurveName())) if curve == nil { return nil, errors.InvalidArgumentError("unsupported curve") @@ -307,6 +380,7 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { return nil, err } return priv, nil +<<<<<<< HEAD case packet.PubKeyAlgoEd25519: priv, err := ed25519.GenerateKey(config.Random()) if err != nil { @@ -319,6 +393,8 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { return nil, err } return priv, nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -341,6 +417,7 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey case packet.PubKeyAlgoECDH: +<<<<<<< HEAD if config.V6() && (config.CurveName() == packet.Curve25519 || config.CurveName() == packet.Curve448) { @@ -348,6 +425,8 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { // using the deprecated OIDs. return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var kdf = ecdh.KDF{ Hash: algorithm.SHA512, Cipher: algorithm.AES256, @@ -357,10 +436,13 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { return nil, errors.InvalidArgumentError("unsupported curve") } return ecdh.GenerateKey(config.Random(), curve, kdf) +<<<<<<< HEAD case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey return x25519.GenerateKey(config.Random()) case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey return x448.GenerateKey(config.Random()) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -369,7 +451,11 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { var bigOne = big.NewInt(1) // generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +<<<<<<< HEAD // given bit size, using the given random source and pre-populated primes. +======= +// given bit size, using the given random source and prepopulated primes. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { priv := new(rsa.PrivateKey) priv.E = 65537 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index a071353e2e..fc35909aae 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -6,7 +6,10 @@ package openpgp import ( goerrors "errors" +<<<<<<< HEAD "fmt" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "time" @@ -25,6 +28,7 @@ var PrivateKeyType = "PGP PRIVATE KEY BLOCK" // (which must be a signing key), one or more identities claimed by that key, // and zero or more subkeys, which may be encryption keys. type Entity struct { +<<<<<<< HEAD PrimaryKey *packet.PublicKey PrivateKey *packet.PrivateKey Identities map[string]*Identity // indexed by Identity.Name @@ -32,6 +36,13 @@ type Entity struct { Subkeys []Subkey SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6) Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +======= + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // An Identity represents an identity claimed by an Entity and zero or more @@ -123,12 +134,21 @@ func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // Fail to find any encryption key if the... +<<<<<<< HEAD primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() if primarySelfSignature == nil || // no self-signature found e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) +======= + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return Key{}, false } @@ -155,9 +175,15 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // If we don't have any subkeys for encryption and the primary key // is marked as OK to encrypt with, then we can use it. +<<<<<<< HEAD if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() { return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true +======= + if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return Key{}, false @@ -189,12 +215,21 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { // Fail to find any signing key if the... +<<<<<<< HEAD primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() if primarySelfSignature == nil || // no self-signature found e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) +======= + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return Key{}, false } @@ -223,12 +258,21 @@ func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, // If we don't have any subkeys for signing and the primary key // is marked as OK to sign with, then we can use it. +<<<<<<< HEAD if primarySelfSignature.FlagsValid && (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) && (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) && e.PrimaryKey.PubKeyAlgo.CanSign() && (id == 0 || e.PrimaryKey.KeyId == id) { return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true +======= + if i.SelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) && + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // No keys with a valid Signing Flag or no keys matched the id passed in @@ -262,7 +306,11 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er var keysToEncrypt []*packet.PrivateKey // Add entity private key to encrypt. if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { +<<<<<<< HEAD keysToEncrypt = append(keysToEncrypt, e.PrivateKey) +======= + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Add subkeys to encrypt. @@ -274,7 +322,11 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) } +<<<<<<< HEAD // DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase. +======= +// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, // and don't cause an error to be returned. func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { @@ -287,7 +339,11 @@ func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { // Add subkeys to decrypt. for _, sub := range e.Subkeys { if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { +<<<<<<< HEAD keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) +======= + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) @@ -321,7 +377,12 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { +<<<<<<< HEAD selfSig, _ := e.PrimarySelfSignature() +======= + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } @@ -443,6 +504,10 @@ func readToNextPublicKey(packets *packet.Reader) (err error) { return } else if err != nil { if _, ok := err.(errors.UnsupportedError); ok { +<<<<<<< HEAD +======= + err = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } return @@ -480,7 +545,10 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) { } var revocations []*packet.Signature +<<<<<<< HEAD var directSignatures []*packet.Signature +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EachPacket: for { p, err := packets.Next() @@ -499,7 +567,13 @@ EachPacket: if pkt.SigType == packet.SigTypeKeyRevocation { revocations = append(revocations, pkt) } else if pkt.SigType == packet.SigTypeDirectSignature { +<<<<<<< HEAD directSignatures = append(directSignatures, pkt) +======= + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Else, ignoring the signature as it does not follow anything // we would know to attach it to. @@ -522,6 +596,7 @@ EachPacket: return nil, err } default: +<<<<<<< HEAD // we ignore unknown packets. } } @@ -555,6 +630,14 @@ EachPacket: } e.SelfSignature = mainDirectKeySelfSignature e.Signatures = directSignatures +======= + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, revocation := range revocations { @@ -699,12 +782,15 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return err } } +<<<<<<< HEAD for _, directSignature := range e.Signatures { err := directSignature.Serialize(w) if err != nil { return err } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -771,12 +857,15 @@ func (e *Entity) Serialize(w io.Writer) error { return err } } +<<<<<<< HEAD for _, directSignature := range e.Signatures { err := directSignature.Serialize(w) if err != nil { return err } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -879,6 +968,7 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea sk.Revocations = append(sk.Revocations, revSig) return nil } +<<<<<<< HEAD func (e *Entity) primaryDirectSignature() *packet.Signature { return e.SelfSignature @@ -899,3 +989,5 @@ func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) { } return primaryIdentity.SelfSignature, primaryIdentity } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go index 5e46046563..de15427397 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -3,6 +3,10 @@ package packet import ( +<<<<<<< HEAD +======= + "bytes" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/cipher" "encoding/binary" "io" @@ -14,11 +18,19 @@ import ( type aeadCrypter struct { aead cipher.AEAD chunkSize int +<<<<<<< HEAD nonce []byte +======= + initialNonce []byte +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) associatedData []byte // Chunk-independent associated data chunkIndex []byte // Chunk counter packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet bytesProcessed int // Amount of plaintext bytes encrypted/decrypted +<<<<<<< HEAD +======= + buffer bytes.Buffer // Buffered bytes across chunks +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // computeNonce takes the incremental index and computes an eXclusive OR with @@ -26,12 +38,21 @@ type aeadCrypter struct { // 5.16.1 and 5.16.2). It returns the resulting nonce. func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { +<<<<<<< HEAD return wo.nonce } nonce = make([]byte, len(wo.nonce)) copy(nonce, wo.nonce) offset := len(wo.nonce) - 8 +======= + return append(wo.initialNonce, wo.chunkIndex...) + } + + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i := 0; i < 8; i++ { nonce[i+offset] ^= wo.chunkIndex[i] } @@ -60,9 +81,14 @@ func (wo *aeadCrypter) incrementIndex() error { type aeadDecrypter struct { aeadCrypter // Embedded ciphertext opener reader io.Reader // 'reader' is a partialLengthReader +<<<<<<< HEAD chunkBytes []byte peekedBytes []byte // Used to detect last chunk buffer []byte // Buffered decrypted bytes +======= + peekedBytes []byte // Used to detect last chunk + eof bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Read decrypts bytes and reads them into dst. It decrypts when necessary and @@ -70,14 +96,25 @@ type aeadDecrypter struct { // and an error. func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { // Return buffered plaintext bytes from previous calls +<<<<<<< HEAD if len(ar.buffer) > 0 { n = copy(dst, ar.buffer) ar.buffer = ar.buffer[n:] return +======= + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Read a chunk tagLen := ar.aead.Overhead() +<<<<<<< HEAD copy(ar.chunkBytes, ar.peekedBytes) // Copy bytes peeked in previous chunk or in initialization bytesRead, errRead := io.ReadFull(ar.reader, ar.chunkBytes[tagLen:]) if errRead != nil && errRead != io.EOF && errRead != io.ErrUnexpectedEOF { @@ -109,6 +146,42 @@ func (ar *aeadDecrypter) Close() (err error) { if errChunk != nil { return errChunk } +======= + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -116,15 +189,31 @@ func (ar *aeadDecrypter) Close() (err error) { // the underlying plaintext and an error. It accesses peeked bytes from next // chunk, to identify the last chunk and decrypt/validate accordingly. func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { +<<<<<<< HEAD +======= + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) adata := ar.associatedData if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { adata = append(ar.associatedData, ar.chunkIndex...) } nonce := ar.computeNextNonce() +<<<<<<< HEAD plainChunk, err := ar.aead.Open(data[:0:len(data)], nonce, data, adata) if err != nil { return nil, errors.ErrAEADTagVerification +======= + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ar.bytesProcessed += len(plainChunk) if err = ar.aeadCrypter.incrementIndex(); err != nil { @@ -149,8 +238,14 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { // ... and total number of encrypted octets adata = append(adata, amountBytes...) nonce := ar.computeNextNonce() +<<<<<<< HEAD if _, err := ar.aead.Open(nil, nonce, tag, adata); err != nil { return errors.ErrAEADTagVerification +======= + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -160,14 +255,18 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { type aeadEncrypter struct { aeadCrypter // Embedded plaintext sealer writer io.WriteCloser // 'writer' is a partialLengthWriter +<<<<<<< HEAD chunkBytes []byte offset int +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Write encrypts and writes bytes. It encrypts when necessary and buffers extra // plaintext bytes for next call. When the stream is finished, Close() MUST be // called to append the final tag. func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { +<<<<<<< HEAD for n != len(plaintextBytes) { copied := copy(aw.chunkBytes[aw.offset:aw.chunkSize], plaintextBytes[n:]) n += copied @@ -183,6 +282,23 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { return n, err } aw.offset = 0 +======= + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return @@ -194,8 +310,14 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { func (aw *aeadEncrypter) Close() (err error) { // Encrypt and write a chunk if there's buffered data left, or if we haven't // written any chunks yet. +<<<<<<< HEAD if aw.offset > 0 || aw.bytesProcessed == 0 { lastEncryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset]) +======= + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -241,7 +363,11 @@ func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { } nonce := aw.computeNextNonce() +<<<<<<< HEAD encrypted := aw.aead.Seal(data[:0], nonce, data, adata) +======= + encrypted := aw.aead.Seal(nil, nonce, data, adata) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) aw.bytesProcessed += len(data) if err := aw.aeadCrypter.incrementIndex(); err != nil { return nil, err diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go index 583765d87c..e05b86192c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -65,28 +65,45 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { blockCipher := ae.cipher.new(key) aead := ae.mode.new(blockCipher) // Carry the first tagLen bytes +<<<<<<< HEAD chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) tagLen := ae.mode.TagLength() chunkBytes := make([]byte, chunkSize+tagLen*2) peekedBytes := chunkBytes[chunkSize+tagLen:] +======= + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n, err := io.ReadFull(ae.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) } +<<<<<<< HEAD +======= + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &aeadDecrypter{ aeadCrypter: aeadCrypter{ aead: aead, chunkSize: chunkSize, +<<<<<<< HEAD nonce: ae.initialNonce, +======= + initialNonce: ae.initialNonce, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) associatedData: ae.associatedData(), chunkIndex: make([]byte, 8), packetTag: packetTypeAEADEncrypted, }, reader: ae.Contents, +<<<<<<< HEAD chunkBytes: chunkBytes, peekedBytes: peekedBytes, }, nil +======= + peekedBytes: peekedBytes}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // associatedData for chunks: tag, version, cipher, mode, chunk size byte diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go index 0bcb38caca..8f09f3fcf8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -8,10 +8,16 @@ import ( "compress/bzip2" "compress/flate" "compress/zlib" +<<<<<<< HEAD "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" +======= + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "strconv" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Compressed represents a compressed OpenPGP packet. The decompressed contents @@ -40,6 +46,7 @@ type CompressionConfig struct { Level int } +<<<<<<< HEAD // decompressionReader ensures that the whole compression packet is read. type decompressionReader struct { compressed io.Reader @@ -71,6 +78,8 @@ func (dr *decompressionReader) Read(data []byte) (n int, err error) { return n, err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Compressed) parse(r io.Reader) error { var buf [1]byte _, err := readFull(r, buf[:]) @@ -82,6 +91,7 @@ func (c *Compressed) parse(r io.Reader) error { case 0: c.Body = r case 1: +<<<<<<< HEAD c.Body = newDecompressionReader(r, flate.NewReader(r)) case 2: decompressor, err := zlib.NewReader(r) @@ -91,6 +101,13 @@ func (c *Compressed) parse(r io.Reader) error { c.Body = newDecompressionReader(r, decompressor) case 3: c.Body = newDecompressionReader(r, io.NopCloser(bzip2.NewReader(r))) +======= + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go index 8bf8e6e51f..801e8b13b5 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -14,6 +14,7 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/s2k" ) +<<<<<<< HEAD var ( defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{ PubKeyAlgoElGamal: true, @@ -42,6 +43,8 @@ var ( // by v6 keys, v6 signatures and SEIPDv2 encrypted data, respectively. var V5Disabled = false +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Config collects a number of parameters along with sensible defaults. // A nil *Config is valid and results in all default values. type Config struct { @@ -101,6 +104,7 @@ type Config struct { // **Note: using this option may break compatibility with other OpenPGP // implementations, as well as future versions of this library.** AEADConfig *AEADConfig +<<<<<<< HEAD // V6Keys configures version 6 key generation. If false, this package still // supports version 6 keys, but produces version 4 keys. V6Keys bool @@ -111,6 +115,11 @@ type Config struct { RejectHashAlgorithms map[crypto.Hash]bool RejectMessageHashAlgorithms map[crypto.Hash]bool RejectCurves map[Curve]bool +======= + // V5Keys configures version 5 key generation. If false, this package still + // supports version 5 keys, but produces version 4 keys. + V5Keys bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "The validity period of the key. This is the number of seconds after // the key creation time that the key expires. If this is not present // or has a value of zero, the key never expires. This is found only on @@ -139,17 +148,21 @@ type Config struct { // might be no other way than to tolerate the missing MDC. Setting this flag, allows this // mode of operation. It should be considered a measure of last resort. InsecureAllowUnauthenticatedMessages bool +<<<<<<< HEAD // InsecureAllowDecryptionWithSigningKeys allows decryption with keys marked as signing keys in the v2 API. // This setting is potentially insecure, but it is needed as some libraries // ignored key flags when selecting a key for encryption. // Not relevant for the v1 API, as all keys were allowed in decryption. InsecureAllowDecryptionWithSigningKeys bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // KnownNotations is a map of Notation Data names to bools, which controls // the notation names that are allowed to be present in critical Notation Data // signature subpackets. KnownNotations map[string]bool // SignatureNotations is a list of Notations to be added to any signatures. SignatureNotations []*Notation +<<<<<<< HEAD // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature // should be enabled for encryption and decryption. // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr). @@ -173,6 +186,8 @@ type Config struct { // weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks. // The default behavior, when the config or flag is nil, is to enable the feature. NonDeterministicSignaturesViaNotation *bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Config) Random() io.Reader { @@ -260,7 +275,11 @@ func (c *Config) S2K() *s2k.Config { return nil } // for backwards compatibility +<<<<<<< HEAD if c.S2KCount > 0 && c.S2KConfig == nil { +======= + if c != nil && c.S2KCount > 0 && c.S2KConfig == nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &s2k.Config{ S2KCount: c.S2KCount, } @@ -296,6 +315,7 @@ func (c *Config) AllowUnauthenticatedMessages() bool { return c.InsecureAllowUnauthenticatedMessages } +<<<<<<< HEAD func (c *Config) AllowDecryptionWithSigningKeys() bool { if c == nil { return false @@ -303,6 +323,8 @@ func (c *Config) AllowDecryptionWithSigningKeys() bool { return c.InsecureAllowDecryptionWithSigningKeys } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Config) KnownNotation(notationName string) bool { if c == nil { return false @@ -316,6 +338,7 @@ func (c *Config) Notations() []*Notation { } return c.SignatureNotations } +<<<<<<< HEAD func (c *Config) V6() bool { if c == nil { @@ -408,3 +431,5 @@ func (c *Config) RandomizeSignaturesViaNotation() bool { func BoolPointer(value bool) *bool { return &value } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go index b90bb28911..4d5c8b4fe2 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -5,11 +5,17 @@ package packet import ( +<<<<<<< HEAD "bytes" "crypto" "crypto/rsa" "encoding/binary" "encoding/hex" +======= + "crypto" + "crypto/rsa" + "encoding/binary" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "math/big" "strconv" @@ -18,6 +24,7 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" ) @@ -97,6 +104,34 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { } e.Algo = PublicKeyAlgorithm(buf[0]) var cipherFunction byte +======= +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: e.encryptedMPI1 = new(encoding.MPI) @@ -123,6 +158,7 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { return } +<<<<<<< HEAD case PubKeyAlgoX25519: e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6) if err != nil { @@ -142,20 +178,40 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { } } +======= + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = consumeAll(r) return } +<<<<<<< HEAD +======= +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Decrypt decrypts an encrypted session key with the given private key. The // private key must have been decrypted first. // If config is nil, sensible defaults will be used. func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { +<<<<<<< HEAD if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId { return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) } if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) { return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint)) } +======= + if e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if e.Algo != priv.PubKeyAlgo { return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } @@ -181,6 +237,7 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { vsG := e.encryptedMPI1.Bytes() m := e.encryptedMPI2.Bytes() oid := priv.PublicKey.oid.EncodedBytes() +<<<<<<< HEAD fp := priv.PublicKey.Fingerprint[:] if priv.PublicKey.Version == 5 { // For v5 the, the fingerprint must be restricted to 20 bytes @@ -194,10 +251,18 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { default: err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } +======= + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } +<<<<<<< HEAD var key []byte switch priv.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: @@ -227,11 +292,26 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { return errors.UnsupportedError("unsupported algorithm for decryption") } e.Key = key +======= + e.CipherFunc = CipherFunction(b[0]) + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } // Serialize writes the encrypted key packet, e, to w. func (e *EncryptedKey) Serialize(w io.Writer) error { +<<<<<<< HEAD var encodedLength int switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: @@ -244,10 +324,21 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6) case PubKeyAlgoX448: encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6) +======= + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) } +<<<<<<< HEAD packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength if e.Version == 6 { packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */ @@ -259,10 +350,14 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { } err := serializeHeader(w, packetTypeEncryptedKey, packetLen) +======= + err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } +<<<<<<< HEAD _, err = w.Write([]byte{byte(e.Version)}) if err != nil { return err @@ -291,6 +386,11 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { if err != nil { return err } +======= + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: @@ -308,17 +408,21 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { } _, err := w.Write(e.encryptedMPI2.EncodedBytes()) return err +<<<<<<< HEAD case PubKeyAlgoX25519: err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) return err case PubKeyAlgoX448: err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) return err +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("internal error") } } +<<<<<<< HEAD // SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains // key, encrypted to pub. // If aeadSupported is set, PKESK v6 is used, otherwise v3. @@ -417,6 +521,31 @@ func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, ciph return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version) case PubKeyAlgoX448: return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version) +======= +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } @@ -424,6 +553,7 @@ func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, ciph return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } +<<<<<<< HEAD // SerializeEncryptedKey serializes an encrypted key packet to w that contains // key, encrypted to pub. // PKESKv6 is used if config.AEAD() is not nil. @@ -443,13 +573,20 @@ func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFu } func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error { +======= +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) } cipherMPI := encoding.NewMPI(cipherText) +<<<<<<< HEAD packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength()) +======= + packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) if err != nil { @@ -463,13 +600,21 @@ func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *r return err } +<<<<<<< HEAD func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error { +======= +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) } +<<<<<<< HEAD packetLen := len(header) /* header length */ +======= + packetLen := 10 /* header length */ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 @@ -488,7 +633,11 @@ func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pu return err } +<<<<<<< HEAD func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { +======= +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) if err != nil { return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) @@ -497,7 +646,11 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub * g := encoding.NewMPI(vsG) m := encoding.NewOID(c) +<<<<<<< HEAD packetLen := len(header) /* header length */ +======= + packetLen := 10 /* header length */ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) @@ -515,6 +668,7 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub * _, err = w.Write(m.EncodedBytes()) return err } +<<<<<<< HEAD func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock) @@ -582,3 +736,5 @@ func encodeChecksumKey(buffer []byte, key []byte) { buffer[len(key)] = byte(checksum >> 8) buffer[len(key)+1] = byte(checksum) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go index 8a028c8a17..7ebb7ecbb0 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -58,9 +58,15 @@ func (l *LiteralData) parse(r io.Reader) (err error) { // on completion. The fileName is truncated to 255 bytes. func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { var buf [4]byte +<<<<<<< HEAD buf[0] = 'b' if !isBinary { buf[0] = 'u' +======= + buf[0] = 't' + if isBinary { + buf[0] = 'b' +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if len(fileName) > 255 { fileName = fileName[:255] diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go index f393c4063b..79f3d9d797 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -7,16 +7,24 @@ package packet import ( "crypto" "encoding/binary" +<<<<<<< HEAD "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +======= + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "io" + "strconv" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // OnePassSignature represents a one-pass signature packet. See RFC 4880, // section 5.4. type OnePassSignature struct { +<<<<<<< HEAD Version int SigType SignatureType Hash crypto.Hash @@ -38,6 +46,27 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) } ops.Version = int(buf[0]) +======= + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ok bool ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) @@ -47,6 +76,7 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { ops.SigType = SignatureType(buf[1]) ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) +<<<<<<< HEAD if ops.Version == 6 { // Only for v6, a variable-length field containing the salt @@ -92,11 +122,16 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { return } ops.IsLast = buf[0] != 0 +======= + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } // Serialize marshals the given OnePassSignature to w. func (ops *OnePassSignature) Serialize(w io.Writer) error { +<<<<<<< HEAD //v3 length 1+1+1+1+8+1 = packetLength := 13 if ops.Version == 6 { @@ -110,6 +145,10 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { var buf [8]byte buf[0] = byte(ops.Version) +======= + var buf [13]byte + buf[0] = onePassSignatureVersion +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) buf[1] = uint8(ops.SigType) var ok bool buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) @@ -117,6 +156,7 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) } buf[3] = uint8(ops.PubKeyAlgo) +<<<<<<< HEAD _, err := w.Write(buf[:4]) if err != nil { @@ -153,5 +193,16 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { } _, err = w.Write(isLast) +======= + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index cef7c661d3..b05c5d1641 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -7,6 +7,10 @@ package packet import ( "bytes" "io" +<<<<<<< HEAD +======= + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/errors" ) @@ -25,7 +29,11 @@ type OpaquePacket struct { } func (op *OpaquePacket) parse(r io.Reader) (err error) { +<<<<<<< HEAD op.Contents, err = io.ReadAll(r) +======= + op.Contents, err = ioutil.ReadAll(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index 1e92e22c97..26121d3fc1 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -311,15 +311,22 @@ const ( packetTypePrivateSubkey packetType = 7 packetTypeCompressed packetType = 8 packetTypeSymmetricallyEncrypted packetType = 9 +<<<<<<< HEAD packetTypeMarker packetType = 10 packetTypeLiteralData packetType = 11 packetTypeTrust packetType = 12 +======= + packetTypeLiteralData packetType = 11 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packetTypeUserId packetType = 13 packetTypePublicSubkey packetType = 14 packetTypeUserAttribute packetType = 17 packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 packetTypeAEADEncrypted packetType = 20 +<<<<<<< HEAD packetPadding packetType = 21 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // EncryptedDataPacket holds encrypted data. It is currently implemented by @@ -331,7 +338,11 @@ type EncryptedDataPacket interface { // Read reads a single OpenPGP packet from the given io.Reader. If there is an // error parsing a packet, the whole packet is consumed from the input. func Read(r io.Reader) (p Packet, err error) { +<<<<<<< HEAD tag, len, contents, err := readHeader(r) +======= + tag, _, contents, err := readHeader(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -370,6 +381,7 @@ func Read(r io.Reader) (p Packet, err error) { p = se case packetTypeAEADEncrypted: p = new(AEADEncrypted) +<<<<<<< HEAD case packetPadding: p = Padding(len) case packetTypeMarker: @@ -457,6 +469,10 @@ func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr er } else { err = errors.UnknownPacketTypeError(tag) } +======= + default: + err = errors.UnknownPacketTypeError(tag) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if p != nil { err = p.parse(contents) @@ -473,6 +489,7 @@ type SignatureType uint8 const ( SigTypeBinary SignatureType = 0x00 +<<<<<<< HEAD SigTypeText SignatureType = 0x01 SigTypeGenericCert SignatureType = 0x10 SigTypePersonaCert SignatureType = 0x11 @@ -484,6 +501,19 @@ const ( SigTypeKeyRevocation SignatureType = 0x20 SigTypeSubkeyRevocation SignatureType = 0x28 SigTypeCertificationRevocation SignatureType = 0x30 +======= + SigTypeText = 0x01 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // PublicKeyAlgorithm represents the different public key system specified for @@ -500,11 +530,14 @@ const ( PubKeyAlgoECDSA PublicKeyAlgorithm = 19 // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 +<<<<<<< HEAD // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh PubKeyAlgoX25519 PublicKeyAlgorithm = 25 PubKeyAlgoX448 PublicKeyAlgorithm = 26 PubKeyAlgoEd25519 PublicKeyAlgorithm = 27 PubKeyAlgoEd448 PublicKeyAlgorithm = 28 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated in RFC 4880, Section 13.5. Use key flags instead. PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 @@ -515,7 +548,11 @@ const ( // key of the given type. func (pka PublicKeyAlgorithm) CanEncrypt() bool { switch pka { +<<<<<<< HEAD case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448: +======= + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } return false @@ -525,7 +562,11 @@ func (pka PublicKeyAlgorithm) CanEncrypt() bool { // sign a message. func (pka PublicKeyAlgorithm) CanSign() bool { switch pka { +<<<<<<< HEAD case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: +======= + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } return false @@ -605,11 +646,14 @@ func (mode AEADMode) TagLength() int { return algorithm.AEADMode(mode).TagLength() } +<<<<<<< HEAD // IsSupported returns true if the aead mode is supported from the library func (mode AEADMode) IsSupported() bool { return algorithm.AEADMode(mode).TagLength() > 0 } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // new returns a fresh instance of the given mode. func (mode AEADMode) new(block cipher.Block) cipher.AEAD { return algorithm.AEADMode(mode).New(block) @@ -624,6 +668,7 @@ const ( KeySuperseded ReasonForRevocation = 1 KeyCompromised ReasonForRevocation = 2 KeyRetired ReasonForRevocation = 3 +<<<<<<< HEAD UserIDNotValid ReasonForRevocation = 32 Unknown ReasonForRevocation = 200 ) @@ -635,6 +680,10 @@ func NewReasonForRevocation(value byte) ReasonForRevocation { return Unknown } +======= +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Curve is a mapping to supported ECC curves for key generation. // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats type Curve string @@ -656,6 +705,7 @@ type TrustLevel uint8 // TrustAmount represents a trust amount per RFC4880 5.2.3.13 type TrustAmount uint8 +<<<<<<< HEAD const ( // versionSize is the length in bytes of the version value. @@ -673,3 +723,5 @@ const ( // fingerprintSize is the length in bytes of the key fingerprint. fingerprintSize = 20 ) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go index f04e6c6b87..d2dae47441 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -9,28 +9,42 @@ import ( "crypto" "crypto/cipher" "crypto/dsa" +<<<<<<< HEAD "crypto/rsa" "crypto/sha1" "crypto/sha256" "crypto/subtle" "fmt" "io" +======= + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math/big" "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" "github.com/ProtonMail/go-crypto/openpgp/s2k" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" "golang.org/x/crypto/hkdf" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // PrivateKey represents a possibly encrypted private key. See RFC 4880, @@ -41,6 +55,7 @@ type PrivateKey struct { encryptedData []byte cipher CipherFunction s2k func(out, in []byte) +<<<<<<< HEAD aead AEADMode // only relevant if S2KAEAD is enabled // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or // crypto.Signer/crypto.Decrypter (Decryptor RSA only). @@ -49,6 +64,16 @@ type PrivateKey struct { // Type of encryption of the S2K packet // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or +======= + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + sha1Checksum bool + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 254 (SHA1), or +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // 255 (2-byte checksum) s2kType S2KType // Full parameters of the S2K packet @@ -61,8 +86,11 @@ type S2KType uint8 const ( // S2KNON unencrypt S2KNON S2KType = 0 +<<<<<<< HEAD // S2KAEAD use authenticated encryption S2KAEAD S2KType = 253 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // S2KSHA1 sha1 sum check S2KSHA1 S2KType = 254 // S2KCHECKSUM sum check @@ -111,6 +139,7 @@ func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKe return pk } +<<<<<<< HEAD func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey { pk := new(PrivateKey) pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) @@ -139,6 +168,8 @@ func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *Private return pk } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that // implements RSA, ECDSA or EdDSA. func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { @@ -158,6 +189,7 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) case eddsa.PrivateKey: pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) +<<<<<<< HEAD case *ed25519.PrivateKey: pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) case ed25519.PrivateKey: @@ -166,6 +198,8 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) case ed448.PrivateKey: pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("openpgp: unknown signer type in NewSignerPrivateKey") } @@ -173,7 +207,11 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey return pk } +<<<<<<< HEAD // NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey. +======= +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { pk := new(PrivateKey) switch priv := decrypter.(type) { @@ -183,10 +221,13 @@ func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *Priv pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) case *ecdh.PrivateKey: pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) +<<<<<<< HEAD case *x25519.PrivateKey: pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) case *x448.PrivateKey: pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") } @@ -200,11 +241,14 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } v5 := pk.PublicKey.Version == 5 +<<<<<<< HEAD v6 := pk.PublicKey.Version == 6 if V5Disabled && v5 { return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var buf [1]byte _, err = readFull(r, buf[:]) @@ -213,7 +257,11 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } pk.s2kType = S2KType(buf[0]) var optCount [1]byte +<<<<<<< HEAD if v5 || (v6 && pk.s2kType != S2KNON) { +======= + if v5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, err = readFull(r, optCount[:]); err != nil { return } @@ -223,9 +271,15 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { case S2KNON: pk.s2k = nil pk.Encrypted = false +<<<<<<< HEAD case S2KSHA1, S2KCHECKSUM, S2KAEAD: if (v5 || v6) && pk.s2kType == S2KCHECKSUM { return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version)) +======= + case S2KSHA1, S2KCHECKSUM: + if v5 && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError("wrong s2k identifier for version 5") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } _, err = readFull(r, buf[:]) if err != nil { @@ -235,6 +289,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.cipher != 0 && !pk.cipher.IsSupported() { return errors.UnsupportedError("unsupported cipher function in private key") } +<<<<<<< HEAD // [Optional] If string-to-key usage octet was 253, // a one-octet AEAD algorithm. if pk.s2kType == S2KAEAD { @@ -258,6 +313,8 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.s2kParams, err = s2k.ParseIntoParams(r) if err != nil { return @@ -265,22 +322,32 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.s2kParams.Dummy() { return } +<<<<<<< HEAD if pk.s2kParams.Mode() == s2k.Argon2S2K && pk.s2kType != S2KAEAD { return errors.StructuralError("using Argon2 S2K without AEAD is not allowed") } if pk.s2kParams.Mode() == s2k.SimpleS2K && pk.Version == 6 { return errors.StructuralError("using Simple S2K with version 6 keys is not allowed") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.s2k, err = pk.s2kParams.Function() if err != nil { return } pk.Encrypted = true +<<<<<<< HEAD +======= + if pk.s2kType == S2KSHA1 { + pk.sha1Checksum = true + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return errors.UnsupportedError("deprecated s2k function in private key") } if pk.Encrypted { +<<<<<<< HEAD var ivSize int // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode, // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size. @@ -295,13 +362,23 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) } pk.iv = make([]byte, ivSize) +======= + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = readFull(r, pk.iv) if err != nil { return } +<<<<<<< HEAD if v5 && pk.s2kType == S2KAEAD { pk.iv = pk.iv[:pk.aead.IvLength()] } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var privateKeyData []byte @@ -321,7 +398,11 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } } else { +<<<<<<< HEAD privateKeyData, err = io.ReadAll(r) +======= + privateKeyData, err = ioutil.ReadAll(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -330,6 +411,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if len(privateKeyData) < 2 { return errors.StructuralError("truncated private key data") } +<<<<<<< HEAD if pk.Version != 6 { // checksum var sum uint16 @@ -346,6 +428,18 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { // No checksum return pk.parsePrivateKey(privateKeyData) } +======= + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] + return pk.parsePrivateKey(privateKeyData) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } pk.encryptedData = privateKeyData @@ -377,6 +471,7 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { optional := bytes.NewBuffer(nil) if pk.Encrypted || pk.Dummy() { +<<<<<<< HEAD // [Optional] If string-to-key usage octet was 255, 254, or 253, // a one-octet symmetric encryption algorithm. if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil { @@ -430,6 +525,20 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { if _, err := io.Copy(contents, optional); err != nil { return err } +======= + optional.Write([]byte{uint8(pk.cipher)}) + if err := pk.s2kParams.Serialize(optional); err != nil { + return err + } + if pk.Encrypted { + optional.Write(pk.iv) + } + } + if pk.Version == 5 { + contents.Write([]byte{uint8(optional.Len())}) + } + io.Copy(contents, optional) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !pk.Dummy() { l := 0 @@ -441,10 +550,15 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { return err } l = buf.Len() +<<<<<<< HEAD if pk.Version != 6 { checksum := mod64kHash(buf.Bytes()) buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) } +======= + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) priv = buf.Bytes() } else { priv, l = pk.encryptedData, len(pk.encryptedData) @@ -510,6 +624,7 @@ func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { return err } +<<<<<<< HEAD func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error { _, err := w.Write(priv.Secret) return err @@ -530,6 +645,8 @@ func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error { return err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // decrypt decrypts an encrypted private key using a decryption key. func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if pk.Dummy() { @@ -538,6 +655,7 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if !pk.Encrypted { return nil } +<<<<<<< HEAD block := pk.cipher.new(decryptionKey) var data []byte switch pk.s2kType { @@ -583,6 +701,39 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { } default: return errors.InvalidArgumentError("invalid s2k type") +======= + + block := pk.cipher.new(decryptionKey) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } err := pk.parsePrivateKey(data) @@ -598,6 +749,10 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { pk.s2k = nil pk.Encrypted = false pk.encryptedData = nil +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -613,9 +768,12 @@ func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) e if err != nil { return err } +<<<<<<< HEAD if pk.s2kType == S2KAEAD { key = pk.applyHKDF(key) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.decrypt(key) } @@ -630,14 +788,21 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error { key := make([]byte, pk.cipher.KeySize()) pk.s2k(key, passphrase) +<<<<<<< HEAD if pk.s2kType == S2KAEAD { key = pk.applyHKDF(key) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.decrypt(key) } // DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. +<<<<<<< HEAD // Avoids recomputation of similar s2k key derivations. +======= +// Avoids recomputation of similar s2k key derivations. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { // Create a cache to avoid recomputation of key derviations for the same passphrase. s2kCache := &s2k.Cache{} @@ -653,7 +818,11 @@ func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { } // encrypt encrypts an unencrypted private key. +<<<<<<< HEAD func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error { +======= +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if pk.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } @@ -664,6 +833,7 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, c if len(key) != cipherFunction.KeySize() { return errors.InvalidArgumentError("supplied encryption key has the wrong size") } +<<<<<<< HEAD if params.Mode() == s2k.Argon2S2K && s2kType != S2KAEAD { return errors.InvalidArgumentError("using Argon2 S2K without AEAD is not allowed") @@ -673,6 +843,9 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, c return errors.InvalidArgumentError("insecure S2K mode") } +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) priv := bytes.NewBuffer(nil) err := pk.serializePrivateKey(priv) if err != nil { @@ -684,6 +857,7 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, c pk.s2k, err = pk.s2kParams.Function() if err != nil { return err +<<<<<<< HEAD } privateKeyBytes := priv.Bytes() @@ -731,6 +905,37 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, c return errors.InvalidArgumentError("invalid s2k type for encryption") } +======= + } + + privateKeyBytes := priv.Bytes() + pk.sha1Checksum = true + block := pk.cipher.new(key) + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = rand.Read(pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + + if pk.sha1Checksum { + pk.s2kType = S2KSHA1 + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + pk.s2kType = S2KCHECKSUM + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + } + + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.Encrypted = true pk.PrivateKey = nil return err @@ -749,6 +954,7 @@ func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error return err } s2k(key, passphrase) +<<<<<<< HEAD s2kType := S2KSHA1 if config.AEAD() != nil { s2kType = S2KAEAD @@ -758,6 +964,10 @@ func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error } // Encrypt the private key with the derived encryption key. return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random()) +======= + // Encrypt the private key with the derived encryption key. + return pk.encrypt(key, params, config.Cipher()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. @@ -776,6 +986,7 @@ func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) e s2k(encryptionKey, passphrase) for _, key := range keys { if key != nil && !key.Dummy() && !key.Encrypted { +<<<<<<< HEAD s2kType := S2KSHA1 if config.AEAD() != nil { s2kType = S2KAEAD @@ -786,6 +997,9 @@ func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) e } else { err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random()) } +======= + err = key.encrypt(encryptionKey, params, config.Cipher()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -802,7 +1016,11 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error { S2KMode: s2k.IteratedSaltedS2K, S2KCount: 65536, Hash: crypto.SHA256, +<<<<<<< HEAD }, +======= + } , +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DefaultCipher: CipherAES256, } return pk.EncryptWithConfig(passphrase, config) @@ -822,6 +1040,7 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeEdDSAPrivateKey(w, priv) case *ecdh.PrivateKey: err = serializeECDHPrivateKey(w, priv) +<<<<<<< HEAD case *x25519.PrivateKey: err = serializeX25519PrivateKey(w, priv) case *x448.PrivateKey: @@ -830,6 +1049,8 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeEd25519PrivateKey(w, priv) case *ed448.PrivateKey: err = serializeEd448PrivateKey(w, priv) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.InvalidArgumentError("unknown private key type") } @@ -850,6 +1071,7 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { return pk.parseECDHPrivateKey(data) case PubKeyAlgoEdDSA: return pk.parseEdDSAPrivateKey(data) +<<<<<<< HEAD case PubKeyAlgoX25519: return pk.parseX25519PrivateKey(data) case PubKeyAlgoX448: @@ -862,6 +1084,10 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { err = errors.StructuralError("unknown private key type") return } +======= + } + panic("impossible") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { @@ -982,6 +1208,7 @@ func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { return nil } +<<<<<<< HEAD func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) { publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey) privateKey := x25519.NewPrivateKey(*publicKey) @@ -1062,6 +1289,8 @@ func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) @@ -1086,6 +1315,7 @@ func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { return nil } +<<<<<<< HEAD func (pk *PrivateKey) additionalData() ([]byte, error) { additionalData := bytes.NewBuffer(nil) // Write additional data prefix based on packet type @@ -1121,6 +1351,8 @@ func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte { return encryptionKey } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func validateDSAParameters(priv *dsa.PrivateKey) error { p := priv.P // group prime q := priv.Q // subgroup order diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index f8da781bbe..8ef023ab3b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -5,6 +5,10 @@ package packet import ( +<<<<<<< HEAD +======= + "crypto" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/dsa" "crypto/rsa" "crypto/sha1" @@ -20,24 +24,39 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" ) +======= +) + +type kdfHashFunction byte +type kdfAlgorithm byte + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. type PublicKey struct { Version int CreationTime time.Time PubKeyAlgo PublicKeyAlgorithm +<<<<<<< HEAD PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey +======= + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Fingerprint []byte KeyId uint64 IsSubkey bool @@ -61,6 +80,7 @@ func (pk *PublicKey) UpgradeToV5() { pk.setFingerprintAndKeyId() } +<<<<<<< HEAD // UpgradeToV6 updates the version of the key to v6, and updates all necessary // fields. func (pk *PublicKey) UpgradeToV6() error { @@ -69,11 +89,17 @@ func (pk *PublicKey) UpgradeToV6() error { return pk.checkV6Compatibility() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // signingKey provides a convenient abstraction over signature verification // for v3 and v4 public keys. type signingKey interface { SerializeForHash(io.Writer) error +<<<<<<< HEAD SerializeSignaturePrefix(io.Writer) error +======= + SerializeSignaturePrefix(io.Writer) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) serializeWithoutHeaders(io.Writer) error } @@ -182,6 +208,7 @@ func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey return pk } +<<<<<<< HEAD func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey { pk := &PublicKey{ Version: 4, @@ -230,6 +257,8 @@ func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey return pk } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (pk *PublicKey) parse(r io.Reader) (err error) { // RFC 4880, section 5.5.2 var buf [6]byte @@ -237,6 +266,7 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { if err != nil { return } +<<<<<<< HEAD pk.Version = int(buf[0]) if pk.Version != 4 && pk.Version != 5 && pk.Version != 6 { @@ -250,6 +280,14 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { if pk.Version >= 5 { // Read the four-octet scalar octet count // The count is not used in this implementation +======= + if buf[0] != 4 && buf[0] != 5 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + pk.Version = int(buf[0]) + if pk.Version == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var n [4]byte _, err = readFull(r, n[:]) if err != nil { @@ -258,7 +296,10 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { } pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) +<<<<<<< HEAD // Ignore four-ocet length +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: err = pk.parseRSA(r) @@ -272,6 +313,7 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { err = pk.parseECDH(r) case PubKeyAlgoEdDSA: err = pk.parseEdDSA(r) +<<<<<<< HEAD case PubKeyAlgoX25519: err = pk.parseX25519(r) case PubKeyAlgoX448: @@ -280,6 +322,8 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { err = pk.parseEd25519(r) case PubKeyAlgoEd448: err = pk.parseEd448(r) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) } @@ -293,27 +337,38 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { func (pk *PublicKey) setFingerprintAndKeyId() { // RFC 4880, section 12.2 +<<<<<<< HEAD if pk.Version >= 5 { fingerprint := sha256.New() if err := pk.SerializeForHash(fingerprint); err != nil { // Should not happen for a hash. panic(err) } +======= + if pk.Version == 5 { + fingerprint := sha256.New() + pk.SerializeForHash(fingerprint) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.Fingerprint = make([]byte, 32) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) } else { fingerprint := sha1.New() +<<<<<<< HEAD if err := pk.SerializeForHash(fingerprint); err != nil { // Should not happen for a hash. panic(err) } +======= + pk.SerializeForHash(fingerprint) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.Fingerprint = make([]byte, 20) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) } } +<<<<<<< HEAD func (pk *PublicKey) checkV6Compatibility() error { // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. switch pk.PubKeyAlgo { @@ -331,6 +386,8 @@ func (pk *PublicKey) checkV6Compatibility() error { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // parseRSA parses RSA public key material from the given Reader. See RFC 4880, // section 5.5.2. func (pk *PublicKey) parseRSA(r io.Reader) (err error) { @@ -419,17 +476,27 @@ func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } +<<<<<<< HEAD +======= + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) } +<<<<<<< HEAD pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c, ok := curveInfo.Curve.(ecc.ECDSACurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -449,6 +516,7 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } +<<<<<<< HEAD curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { @@ -460,6 +528,8 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return errors.StructuralError("cannot read v6 key with deprecated OID: Curve25519Legacy") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return @@ -469,6 +539,15 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return } +<<<<<<< HEAD +======= + curveInfo := ecc.FindByOid(pk.oid) + + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c, ok := curveInfo.Curve.(ecc.ECDHCurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -497,16 +576,22 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { } func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { +<<<<<<< HEAD if pk.Version == 6 { // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. return errors.StructuralError("cannot generate v6 key with deprecated algorithm: EdDSALegacy") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.oid = new(encoding.OID) if _, err = pk.oid.ReadFrom(r); err != nil { return } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) @@ -542,6 +627,7 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { return } +<<<<<<< HEAD func (pk *PublicKey) parseX25519(r io.Reader) (err error) { point := make([]byte, x25519.KeySize) _, err = io.ReadFull(r, point) @@ -600,12 +686,19 @@ func (pk *PublicKey) SerializeForHash(w io.Writer) error { if err := pk.SerializeSignaturePrefix(w); err != nil { return err } +======= +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + pk.SerializeSignaturePrefix(w) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.serializeWithoutHeaders(w) } // SerializeSignaturePrefix writes the prefix for this public key to the given Writer. // The prefix is used when calculating a signature over this public key. See // RFC 4880, section 5.2.4. +<<<<<<< HEAD func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { var pLength = pk.algorithmSpecificByteCount() // version, timestamp, algorithm @@ -618,11 +711,20 @@ func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet. 0x95 + byte(pk.Version), +======= +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { + var pLength = pk.algorithmSpecificByteCount() + if pk.Version == 5 { + pLength += 10 // version, timestamp (4), algorithm, key octet count (4). + w.Write([]byte{ + 0x9A, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) byte(pLength >> 24), byte(pLength >> 16), byte(pLength >> 8), byte(pLength), }) +<<<<<<< HEAD return err } if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil { @@ -635,19 +737,36 @@ func (pk *PublicKey) Serialize(w io.Writer) (err error) { length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header length += pk.algorithmSpecificByteCount() if pk.Version >= 5 { +======= + return + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) length += 4 // octet key count } packetType := packetTypePublicKey if pk.IsSubkey { packetType = packetTypePublicSubkey } +<<<<<<< HEAD err = serializeHeader(w, packetType, int(length)) +======= + err = serializeHeader(w, packetType, length) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } return pk.serializeWithoutHeaders(w) } +<<<<<<< HEAD func (pk *PublicKey) algorithmSpecificByteCount() uint32 { length := uint32(0) switch pk.PubKeyAlgo { @@ -681,6 +800,33 @@ func (pk *PublicKey) algorithmSpecificByteCount() uint32 { length += ed25519.PublicKeySize case PubKeyAlgoEd448: length += ed448.PublicKeySize +======= +func (pk *PublicKey) algorithmSpecificByteCount() int { + length := 0 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += int(pk.n.EncodedLength()) + length += int(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += int(pk.p.EncodedLength()) + length += int(pk.q.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += int(pk.p.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + length += int(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("unknown public key algorithm") } @@ -699,7 +845,11 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { return } +<<<<<<< HEAD if pk.Version >= 5 { +======= + if pk.Version == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n := pk.algorithmSpecificByteCount() if _, err = w.Write([]byte{ byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), @@ -757,6 +907,7 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { } _, err = w.Write(pk.p.EncodedBytes()) return +<<<<<<< HEAD case PubKeyAlgoX25519: publicKey := pk.PublicKey.(*x25519.PublicKey) _, err = w.Write(publicKey.Point) @@ -773,6 +924,8 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { publicKey := pk.PublicKey.(*ed448.PublicKey) _, err = w.Write(publicKey.Point) return +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return errors.InvalidArgumentError("bad public-key algorithm") } @@ -782,6 +935,7 @@ func (pk *PublicKey) CanSign() bool { return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH } +<<<<<<< HEAD // VerifyHashTag returns nil iff sig appears to be a plausible signature of the data // hashed into signed, based solely on its HashTag. signed is mutated by this call. func VerifyHashTag(signed hash.Hash, sig *Signature) (err error) { @@ -796,6 +950,8 @@ func VerifyHashTag(signed hash.Hash, sig *Signature) (err error) { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // VerifySignature returns nil iff sig is a valid signature, made by this // public key, of the data hashed into signed. signed is mutated by this call. func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { @@ -807,8 +963,12 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro } signed.Write(sig.HashSuffix) hashBytes := signed.Sum(nil) +<<<<<<< HEAD // see discussion https://github.com/ProtonMail/go-crypto/issues/107 if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { +======= + if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.SignatureError("hash tag doesn't match") } @@ -847,6 +1007,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro return errors.SignatureError("EdDSA verification failure") } return nil +<<<<<<< HEAD case PubKeyAlgoEd25519: ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey) if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) { @@ -859,6 +1020,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro return errors.SignatureError("ed448 verification failure") } return nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return errors.SignatureError("Unsupported public key algorithm used in signature") } @@ -866,8 +1029,16 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro // keySignatureHash returns a Hash of the message that needs to be signed for // pk to assert a subkey relationship to signed. +<<<<<<< HEAD func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) { h = hashFunc +======= +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RFC 4880, section 5.2.4 err = pk.SerializeForHash(h) @@ -879,6 +1050,7 @@ func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, e return } +<<<<<<< HEAD // VerifyKeyHashTag returns nil iff sig appears to be a plausible signature over this // primary key and subkey, based solely on its HashTag. func (pk *PublicKey) VerifyKeyHashTag(signed *PublicKey, sig *Signature) error { @@ -901,6 +1073,12 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error return err } h, err := keySignatureHash(pk, signed, preparedHash) +======= +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -914,6 +1092,7 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error if sig.EmbeddedSignature == nil { return errors.StructuralError("signing subkey is missing cross-signature") } +<<<<<<< HEAD preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify() if err != nil { return err @@ -922,6 +1101,12 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error // data as the main signature, so we cannot just recursively // call signed.VerifyKeySignature(...) if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil { +======= + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) } if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { @@ -932,6 +1117,7 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error return nil } +<<<<<<< HEAD func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) { return pk.SerializeForHash(hashFunc) } @@ -947,11 +1133,24 @@ func (pk *PublicKey) VerifyRevocationHashTag(sig *Signature) (err error) { return err } return VerifyHashTag(preparedHash, sig) +======= +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + + return +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifyRevocationSignature returns nil iff sig is a valid signature, made by this // public key. func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { +<<<<<<< HEAD preparedHash, err := sig.PrepareVerify() if err != nil { return err @@ -960,16 +1159,27 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { return err } return pk.VerifySignature(preparedHash, sig) +======= + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, // made by this public key, of signed. func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { +<<<<<<< HEAD preparedHash, err := sig.PrepareVerify() if err != nil { return err } h, err := keySignatureHash(pk, signed, preparedHash) +======= + h, err := keySignatureHash(pk, signed, sig.Hash) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -978,6 +1188,7 @@ func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *Pub // userIdSignatureHash returns a Hash of the message that needs to be signed // to assert that pk is a valid key for id. +<<<<<<< HEAD func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { // RFC 4880, section 5.2.4 @@ -987,6 +1198,17 @@ func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { if err := pk.serializeWithoutHeaders(h); err != nil { return err } +======= +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var buf [5]byte buf[0] = 0xb4 @@ -997,6 +1219,7 @@ func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { h.Write(buf[:]) h.Write([]byte(id)) +<<<<<<< HEAD return nil } @@ -1017,11 +1240,15 @@ func (pk *PublicKey) VerifyUserIdHashTag(id string, sig *Signature) (err error) return err } return VerifyHashTag(preparedHash, sig) +======= + return +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifyUserIdSignature returns nil iff sig is a valid signature, made by this // public key, that id is the identity of pub. func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { +<<<<<<< HEAD h, err := sig.PrepareVerify() if err != nil { return err @@ -1042,6 +1269,12 @@ func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { if err := directKeySignatureHash(pk, h); err != nil { return err } +======= + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.VerifySignature(h, sig) } @@ -1072,6 +1305,7 @@ func (pk *PublicKey) BitLength() (bitLength uint16, err error) { bitLength = pk.p.BitLength() case PubKeyAlgoEdDSA: bitLength = pk.p.BitLength() +<<<<<<< HEAD case PubKeyAlgoX25519: bitLength = x25519.KeySize * 8 case PubKeyAlgoX448: @@ -1080,12 +1314,15 @@ func (pk *PublicKey) BitLength() (bitLength uint16, err error) { bitLength = ed25519.PublicKeySize * 8 case PubKeyAlgoEd448: bitLength = ed448.PublicKeySize * 8 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.InvalidArgumentError("bad public-key algorithm") } return } +<<<<<<< HEAD // Curve returns the used elliptic curve of this public key. // Returns an error if no elliptic curve is used. func (pk *PublicKey) Curve() (curve Curve, err error) { @@ -1110,11 +1347,21 @@ func (pk *PublicKey) Curve() (curve Curve, err error) { // expired or is created in the future. func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { if pk.CreationTime.Unix() > currentTime.Unix() { +======= +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.After(currentTime) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { return false } expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) +<<<<<<< HEAD return currentTime.Unix() > expiry.Unix() +======= + return currentTime.After(expiry) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go index dd84092392..3739a44f5b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -10,12 +10,15 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/errors" ) +<<<<<<< HEAD type PacketReader interface { Next() (p Packet, err error) Push(reader io.Reader) (err error) Unread(p Packet) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Reader reads packets from an io.Reader and allows packets to be 'unread' so // that they result from the next call to Next. type Reader struct { @@ -32,6 +35,7 @@ type Reader struct { const maxReaders = 32 // Next returns the most recently unread Packet, or reads another packet from +<<<<<<< HEAD // the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped. func (r *Reader) Next() (p Packet, err error) { for { @@ -94,19 +98,49 @@ func (r *Reader) NextWithUnsupported() (p Packet, err error) { } func (r *Reader) read() (p Packet, err error) { +======= +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(r.q) > 0 { p = r.q[len(r.q)-1] r.q = r.q[:len(r.q)-1] return } +<<<<<<< HEAD + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) +======= + for len(r.readers) > 0 { p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == io.EOF { r.readers = r.readers[:len(r.readers)-1] continue } +<<<<<<< HEAD return p, err } +======= + // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, io.EOF } @@ -134,6 +168,7 @@ func NewReader(r io.Reader) *Reader { readers: []io.Reader{r}, } } +<<<<<<< HEAD // CheckReader is similar to Reader but additionally // uses the pushdown automata to verify the read packet sequence. @@ -207,3 +242,5 @@ func NewCheckReader(r io.Reader) *CheckReader { fullyRead: false, } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 3a4b366d87..c69b41fc0a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -8,17 +8,26 @@ import ( "bytes" "crypto" "crypto/dsa" +<<<<<<< HEAD "encoding/asn1" "encoding/binary" "hash" "io" "math/big" +======= + "encoding/binary" + "hash" + "io" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" +<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" @@ -26,8 +35,12 @@ import ( ) const ( +<<<<<<< HEAD // First octet of key flags. // See RFC 9580, section 5.2.3.29 for details. +======= + // See RFC 4880, section 5.2.3.21 for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) KeyFlagCertify = 1 << iota KeyFlagSign KeyFlagEncryptCommunications @@ -38,6 +51,7 @@ const ( KeyFlagGroupKey ) +<<<<<<< HEAD const ( // First octet of keyserver preference flags. // See RFC 9580, section 5.2.3.25 for details. @@ -54,14 +68,20 @@ const ( const SaltNotationName = "salt@notations.openpgpjs.org" // Signature represents a signature. See RFC 9580, section 5.2. +======= +// Signature represents a signature. See RFC 4880, section 5.2. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Signature struct { Version int SigType SignatureType PubKeyAlgo PublicKeyAlgorithm Hash crypto.Hash +<<<<<<< HEAD // salt contains a random salt value for v6 signatures // See RFC 9580 Section 5.2.4. salt []byte +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // HashSuffix is extra data that is hashed in after the signed data. HashSuffix []byte @@ -80,7 +100,10 @@ type Signature struct { DSASigR, DSASigS encoding.Field ECDSASigR, ECDSASigS encoding.Field EdDSASigR, EdDSASigS encoding.Field +<<<<<<< HEAD EdSig []byte +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // rawSubpackets contains the unparsed subpackets, in order. rawSubpackets []outputSubpacket @@ -96,17 +119,25 @@ type Signature struct { SignerUserId *string IsPrimaryId *bool Notations []*Notation +<<<<<<< HEAD IntendedRecipients []*Recipient +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TrustLevel and TrustAmount can be set by the signer to assert that // the key is not only valid but also trustworthy at the specified // level. +<<<<<<< HEAD // See RFC 9580, section 5.2.3.21 for details. +======= + // See RFC 4880, section 5.2.3.13 for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) TrustLevel TrustLevel TrustAmount TrustAmount // TrustRegularExpression can be used in conjunction with trust Signature // packets to limit the scope of the trust that is extended. +<<<<<<< HEAD // See RFC 9580, section 5.2.3.22 for details. TrustRegularExpression *string @@ -127,11 +158,27 @@ type Signature struct { // FlagsValid is set if any flags were given. See RFC 9580, section // 5.2.3.29 for details. +======= + // See RFC 4880, section 5.2.3.14 for details. + TrustRegularExpression *string + + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) FlagsValid bool FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. +<<<<<<< HEAD // See RFC 9580, section 5.2.3.31 for details. +======= + // See RFC 4880, section 5.2.3.23 for details. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) RevocationReason *ReasonForRevocation RevocationReasonText string @@ -148,6 +195,7 @@ type Signature struct { outSubpackets []outputSubpacket } +<<<<<<< HEAD // VerifiableSignature internally keeps state if the // the signature has been verified before. type VerifiableSignature struct { @@ -173,10 +221,16 @@ func (sig *Signature) Salt() []byte { func (sig *Signature) parse(r io.Reader) (err error) { // RFC 9580, section 5.2.3 var buf [7]byte +======= +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = readFull(r, buf[:1]) if err != nil { return } +<<<<<<< HEAD sig.Version = int(buf[0]) if sig.Version != 4 && sig.Version != 5 && sig.Version != 6 { err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) @@ -192,13 +246,25 @@ func (sig *Signature) parse(r io.Reader) (err error) { } else { _, err = readFull(r, buf[:5]) } +======= + if buf[0] != 4 && buf[0] != 5 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + sig.Version = int(buf[0]) + _, err = readFull(r, buf[:5]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } sig.SigType = SignatureType(buf[0]) sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) switch sig.PubKeyAlgo { +<<<<<<< HEAD case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: +======= + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) return @@ -216,6 +282,7 @@ func (sig *Signature) parse(r io.Reader) (err error) { return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) } +<<<<<<< HEAD var hashedSubpacketsLength int if sig.Version == 6 { // For a v6 signature, a four-octet length is used. @@ -227,6 +294,9 @@ func (sig *Signature) parse(r io.Reader) (err error) { } else { hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4]) } +======= + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashedSubpackets := make([]byte, hashedSubpacketsLength) _, err = readFull(r, hashedSubpackets) if err != nil { @@ -242,6 +312,7 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } +<<<<<<< HEAD if sig.Version == 6 { _, err = readFull(r, buf[:4]) } else { @@ -257,6 +328,13 @@ func (sig *Signature) parse(r io.Reader) (err error) { } else { unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1]) } +======= + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) unhashedSubpackets := make([]byte, unhashedSubpacketsLength) _, err = readFull(r, unhashedSubpackets) if err != nil { @@ -272,6 +350,7 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } +<<<<<<< HEAD if sig.Version == 6 { // Only for v6 signatures, a variable-length field containing the salt _, err = readFull(r, buf[:1]) @@ -296,6 +375,8 @@ func (sig *Signature) parse(r io.Reader) (err error) { sig.salt = salt } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: sig.RSASignature = new(encoding.MPI) @@ -326,6 +407,7 @@ func (sig *Signature) parse(r io.Reader) (err error) { if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { return } +<<<<<<< HEAD case PubKeyAlgoEd25519: sig.EdSig, err = ed25519.ReadSignature(r) if err != nil { @@ -336,6 +418,8 @@ func (sig *Signature) parse(r io.Reader) (err error) { if err != nil { return } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("unreachable") } @@ -343,7 +427,11 @@ func (sig *Signature) parse(r io.Reader) (err error) { } // parseSignatureSubpackets parses subpackets of the main signature packet. See +<<<<<<< HEAD // RFC 9580, section 5.2.3.1. +======= +// RFC 4880, section 5.2.3.1. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { for len(subpackets) > 0 { subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) @@ -364,7 +452,10 @@ type signatureSubpacketType uint8 const ( creationTimeSubpacket signatureSubpacketType = 2 signatureExpirationSubpacket signatureSubpacketType = 3 +<<<<<<< HEAD exportableCertSubpacket signatureSubpacketType = 4 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) trustSubpacket signatureSubpacketType = 5 regularExpressionSubpacket signatureSubpacketType = 6 keyExpirationSubpacket signatureSubpacketType = 9 @@ -373,8 +464,11 @@ const ( notationDataSubpacket signatureSubpacketType = 20 prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 +<<<<<<< HEAD keyserverPrefsSubpacket signatureSubpacketType = 23 prefKeyserverSubpacket signatureSubpacketType = 24 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) primaryUserIdSubpacket signatureSubpacketType = 25 policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 @@ -383,13 +477,20 @@ const ( featuresSubpacket signatureSubpacketType = 30 embeddedSignatureSubpacket signatureSubpacketType = 32 issuerFingerprintSubpacket signatureSubpacketType = 33 +<<<<<<< HEAD intendedRecipientSubpacket signatureSubpacketType = 35 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) prefCipherSuitesSubpacket signatureSubpacketType = 39 ) // parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { +<<<<<<< HEAD // RFC 9580, section 5.2.3.7 +======= + // RFC 4880, section 5.2.3.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( length uint32 packetType signatureSubpacketType @@ -447,24 +548,35 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r t := binary.BigEndian.Uint32(subpacket) sig.CreationTime = time.Unix(int64(t), 0) case signatureExpirationSubpacket: +<<<<<<< HEAD // Signature expiration time, section 5.2.3.18 +======= + // Signature expiration time, section 5.2.3.10 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) != 4 { err = errors.StructuralError("expiration subpacket with bad length") return } sig.SigLifetimeSecs = new(uint32) *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) +<<<<<<< HEAD case exportableCertSubpacket: if subpacket[0] == 0 { err = errors.UnsupportedError("signature with non-exportable certification") return } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case trustSubpacket: if len(subpacket) != 2 { err = errors.StructuralError("trust subpacket with bad length") return } +<<<<<<< HEAD // Trust level and amount, section 5.2.3.21 +======= + // Trust level and amount, section 5.2.3.13 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sig.TrustLevel = TrustLevel(subpacket[0]) sig.TrustAmount = TrustAmount(subpacket[1]) case regularExpressionSubpacket: @@ -472,7 +584,11 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("regexp subpacket with bad length") return } +<<<<<<< HEAD // Trust regular expression, section 5.2.3.22 +======= + // Trust regular expression, section 5.2.3.14 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RFC specifies the string should be null-terminated; remove a null byte from the end if subpacket[len(subpacket)-1] != 0x00 { err = errors.StructuralError("expected regular expression to be null-terminated") @@ -481,7 +597,11 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r trustRegularExpression := string(subpacket[:len(subpacket)-1]) sig.TrustRegularExpression = &trustRegularExpression case keyExpirationSubpacket: +<<<<<<< HEAD // Key expiration time, section 5.2.3.13 +======= + // Key expiration time, section 5.2.3.6 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) != 4 { err = errors.StructuralError("key expiration subpacket with bad length") return @@ -489,6 +609,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.KeyLifetimeSecs = new(uint32) *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) case prefSymmetricAlgosSubpacket: +<<<<<<< HEAD // Preferred symmetric algorithms, section 5.2.3.14 sig.PreferredSymmetric = make([]byte, len(subpacket)) copy(sig.PreferredSymmetric, subpacket) @@ -496,18 +617,34 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r // Issuer, section 5.2.3.12 if sig.Version > 4 && isHashed { err = errors.StructuralError("issuer subpacket found in v6 key") +======= + // Preferred symmetric algorithms, section 5.2.3.7 + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if sig.Version > 4 { + err = errors.StructuralError("issuer subpacket found in v5 key") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } if len(subpacket) != 8 { err = errors.StructuralError("issuer subpacket with bad length") return } +<<<<<<< HEAD if sig.Version <= 4 { sig.IssuerKeyId = new(uint64) *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) } case notationDataSubpacket: // Notation data, section 5.2.3.24 +======= + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case notationDataSubpacket: + // Notation data, section 5.2.3.16 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) < 8 { err = errors.StructuralError("notation data subpacket with bad length") return @@ -529,6 +666,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.Notations = append(sig.Notations, ¬ation) case prefHashAlgosSubpacket: +<<<<<<< HEAD // Preferred hash algorithms, section 5.2.3.16 sig.PreferredHash = make([]byte, len(subpacket)) copy(sig.PreferredHash, subpacket) @@ -550,6 +688,17 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.PreferredKeyserver = string(subpacket) case primaryUserIdSubpacket: // Primary User ID, section 5.2.3.27 +======= + // Preferred hash algorithms, section 5.2.3.8 + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) != 1 { err = errors.StructuralError("primary user id subpacket with bad length") return @@ -559,11 +708,20 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r *sig.IsPrimaryId = true } case keyFlagsSubpacket: +<<<<<<< HEAD // Key flags, section 5.2.3.29 sig.FlagsValid = true if len(subpacket) == 0 { return } +======= + // Key flags, section 5.2.3.21 + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if subpacket[0]&KeyFlagCertify != 0 { sig.FlagCertify = true } @@ -589,16 +747,27 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r userId := string(subpacket) sig.SignerUserId = &userId case reasonForRevocationSubpacket: +<<<<<<< HEAD // Reason For Revocation, section 5.2.3.31 +======= + // Reason For Revocation, section 5.2.3.23 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) == 0 { err = errors.StructuralError("empty revocation reason subpacket") return } sig.RevocationReason = new(ReasonForRevocation) +<<<<<<< HEAD *sig.RevocationReason = NewReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.32 specifies a very general +======= + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // mechanism for OpenPGP implementations to signal support for new // features. if len(subpacket) > 0 { @@ -612,13 +781,23 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r } case embeddedSignatureSubpacket: // Only usage is in signatures that cross-certify +<<<<<<< HEAD // signing subkeys. section 5.2.3.34 describes the +======= + // signing subkeys. section 5.2.3.26 describes the +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // format, with its usage described in section 11.1 if sig.EmbeddedSignature != nil { err = errors.StructuralError("Cannot have multiple embedded signatures") return } sig.EmbeddedSignature = new(Signature) +<<<<<<< HEAD +======= + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { return nil, err } @@ -626,7 +805,11 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } case policyUriSubpacket: +<<<<<<< HEAD // Policy URI, section 5.2.3.28 +======= + // Policy URI, section 5.2.3.20 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: if len(subpacket) == 0 { @@ -634,17 +817,26 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return } v, l := subpacket[0], len(subpacket[1:]) +<<<<<<< HEAD if v >= 5 && l != 32 || v < 5 && l != 20 { +======= + if v == 5 && l != 32 || v != 5 && l != 20 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, errors.StructuralError("bad fingerprint length") } sig.IssuerFingerprint = make([]byte, l) copy(sig.IssuerFingerprint, subpacket[1:]) sig.IssuerKeyId = new(uint64) +<<<<<<< HEAD if v >= 5 { +======= + if v == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) } else { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) } +<<<<<<< HEAD case intendedRecipientSubpacket: // Intended Recipient Fingerprint, section 5.2.3.36 if len(subpacket) < 1 { @@ -659,6 +851,11 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint}) case prefCipherSuitesSubpacket: // Preferred AEAD cipher suites, section 5.2.3.15 +======= + case prefCipherSuitesSubpacket: + // Preferred AEAD cipher suites + // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket)%2 != 0 { err = errors.StructuralError("invalid aead cipher suite length") return @@ -700,6 +897,7 @@ func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId } +<<<<<<< HEAD func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool { if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil { return bytes.Equal(sig.IssuerFingerprint, fingerprint) @@ -710,6 +908,11 @@ func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId // serializeSubpacketLength marshals the given length into to. func serializeSubpacketLength(to []byte, length int) int { // RFC 9580, Section 4.2.1. +======= +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if length < 192 { to[0] = byte(length) return 1 @@ -755,19 +958,31 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { to = to[n:] } } +<<<<<<< HEAD +======= + return +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SigExpired returns whether sig is a signature that has expired or is created // in the future. func (sig *Signature) SigExpired(currentTime time.Time) bool { +<<<<<<< HEAD if sig.CreationTime.Unix() > currentTime.Unix() { +======= + if sig.CreationTime.After(currentTime) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { return false } expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) +<<<<<<< HEAD return currentTime.Unix() > expiry.Unix() +======= + return currentTime.After(expiry) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. @@ -791,6 +1006,7 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { uint8(sig.SigType), uint8(sig.PubKeyAlgo), uint8(hashId), +<<<<<<< HEAD }) hashedSubpacketsLength := len(hashedSubpackets) if sig.Version == 6 { @@ -813,14 +1029,27 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { var l uint64 = uint64(lenPrefix + len(hashedSubpackets)) if sig.Version == 5 { // v5 case +======= + uint8(len(hashedSubpackets) >> 8), + uint8(len(hashedSubpackets)), + }) + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(6 + len(hashedSubpackets)) + if sig.Version == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashedFields.Write([]byte{0x05, 0xff}) hashedFields.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) } else { +<<<<<<< HEAD // v4 and v6 case hashedFields.Write([]byte{byte(sig.Version), 0xff}) +======= + hashedFields.Write([]byte{0x04, 0xff}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashedFields.Write([]byte{ uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) @@ -848,6 +1077,7 @@ func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { return } +<<<<<<< HEAD // PrepareSign must be called to create a hash object before Sign for v6 signatures. // The created hash object initially hashes a randomly generated salt // as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6, @@ -909,6 +1139,8 @@ func (sig *Signature) PrepareVerify() (hash.Hash, error) { return hasher, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Sign signs a message with a private key. The hash, h, must contain // the hash of the message to be signed and will be mutated by this function. // On success, the signature is stored in sig. Call Serialize to write it out. @@ -919,6 +1151,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e } sig.Version = priv.PublicKey.Version sig.IssuerFingerprint = priv.PublicKey.Fingerprint +<<<<<<< HEAD if sig.Version < 6 && config.RandomizeSignaturesViaNotation() { sig.removeNotationsWithName(SaltNotationName) salt, err := SignatureSaltForHash(sig.Hash, config.Random()) @@ -933,6 +1166,8 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e } sig.Notations = append(sig.Notations, ¬ation) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) if err != nil { return err @@ -962,6 +1197,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.DSASigS = new(encoding.MPI).SetBig(s) } case PubKeyAlgoECDSA: +<<<<<<< HEAD var r, s *big.Int if sk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { r, s, err = ecdsa.Sign(config.Random(), sk, digest) @@ -972,6 +1208,10 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e r, s, err = unwrapECDSASig(b) } } +======= + sk := priv.PrivateKey.(*ecdsa.PrivateKey) + r, s, err := ecdsa.Sign(config.Random(), sk, digest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == nil { sig.ECDSASigR = new(encoding.MPI).SetBig(r) @@ -984,6 +1224,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.EdDSASigR = encoding.NewMPI(r) sig.EdDSASigS = encoding.NewMPI(s) } +<<<<<<< HEAD case PubKeyAlgoEd25519: sk := priv.PrivateKey.(*ed25519.PrivateKey) signature, err := ed25519.Sign(sk, digest) @@ -996,6 +1237,8 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e if err == nil { sig.EdSig = signature } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) } @@ -1003,6 +1246,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e return } +<<<<<<< HEAD // unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA signature. func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { var ecsdaSig struct { @@ -1015,6 +1259,8 @@ func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { return ecsdaSig.R, ecsdaSig.S, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SignUserId computes a signature from priv, asserting that pub is a valid // key for the identity id. On success, the signature is stored in sig. Call // Serialize to write it out. @@ -1023,6 +1269,7 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, co if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } +<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err @@ -1049,6 +1296,13 @@ func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, con return err } return sig.Sign(prepareHash, priv, config) +======= + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, @@ -1056,11 +1310,15 @@ func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, con // If config is nil, sensible defaults will be used. func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, config *Config) error { +<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err } h, err := keySignatureHash(hashKey, pub, prepareHash) +======= + h, err := keySignatureHash(hashKey, pub, sig.Hash) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1074,11 +1332,15 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } +<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err } h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash) +======= + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1089,6 +1351,7 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) // stored in sig. Call Serialize to write it out. // If config is nil, sensible defaults will be used. func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { +<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err @@ -1097,6 +1360,13 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config return err } return sig.Sign(prepareHash, priv, config) +======= + h, err := keyRevocationHash(pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // RevokeSubkey computes a subkey revocation signature of pub using priv. @@ -1113,7 +1383,11 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { if len(sig.outSubpackets) == 0 { sig.outSubpackets = sig.rawSubpackets } +<<<<<<< HEAD if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil { +======= + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") } @@ -1130,14 +1404,18 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { case PubKeyAlgoEdDSA: sigLength = int(sig.EdDSASigR.EncodedLength()) sigLength += int(sig.EdDSASigS.EncodedLength()) +<<<<<<< HEAD case PubKeyAlgoEd25519: sigLength = ed25519.SignatureSize case PubKeyAlgoEd448: sigLength = ed448.SignatureSize +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("impossible") } +<<<<<<< HEAD hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */ @@ -1148,6 +1426,14 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { length += 4 + /* the two length fields are four-octet instead of two */ 1 + /* salt length */ len(sig.salt) /* length salt */ +======= + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 5 { + length -= 4 // eight-octet instead of four-octet big endian +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } err = serializeHeader(w, packetTypeSignature, length) if err != nil { @@ -1161,6 +1447,7 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { } func (sig *Signature) serializeBody(w io.Writer) (err error) { +<<<<<<< HEAD var fields []byte if sig.Version == 6 { // v6 signatures use 4 octets for length @@ -1176,12 +1463,17 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { fields = sig.HashSuffix[:6+hashedSubpacketsLen] } +======= + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) + fields := sig.HashSuffix[:6+hashedSubpacketsLen] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = w.Write(fields) if err != nil { return } unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) +<<<<<<< HEAD var unhashedSubpackets []byte if sig.Version == 6 { unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen) @@ -1196,6 +1488,12 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { unhashedSubpackets[1] = byte(unhashedSubpacketsLen) serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) } +======= + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = w.Write(unhashedSubpackets) if err != nil { @@ -1206,6 +1504,7 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } +<<<<<<< HEAD if sig.Version == 6 { // write salt for v6 signatures _, err = w.Write([]byte{uint8(len(sig.salt))}) @@ -1218,6 +1517,8 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: _, err = w.Write(sig.RSASignature.EncodedBytes()) @@ -1236,10 +1537,13 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } _, err = w.Write(sig.EdDSASigS.EncodedBytes()) +<<<<<<< HEAD case PubKeyAlgoEd25519: err = ed25519.WriteSignature(w, sig.EdSig) case PubKeyAlgoEd448: err = ed448.WriteSignature(w, sig.EdSig) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("impossible") } @@ -1257,14 +1561,32 @@ type outputSubpacket struct { func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { creationTime := make([]byte, 4) binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) +<<<<<<< HEAD // Signature Creation Time subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, true, creationTime}) // Signature Expiration Time +======= + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) + } + if sig.SignerUserId != nil { + subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) } +<<<<<<< HEAD // Trust Signature if sig.TrustLevel != 0 { subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) @@ -1332,6 +1654,11 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } // Key Flags // Key flags may only appear in self-signatures or certification signatures. +======= + + // Key flags may only appear in self-signatures or certification signatures. + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if sig.FlagsValid { var flags byte if sig.FlagCertify { @@ -1355,6 +1682,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagGroupKey { flags |= KeyFlagGroupKey } +<<<<<<< HEAD subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, true, []byte{flags}}) } // Signer's User ID @@ -1368,6 +1696,24 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) } // Features +======= + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + for _, notation := range sig.Notations { + subpackets = append( + subpackets, + outputSubpacket{ + true, + notationDataSubpacket, + notation.IsCritical, + notation.getData(), + }) + } + + // The following subpackets may only appear in self-signatures. + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var features = byte(0x00) if sig.SEIPDv1 { features |= 0x01 @@ -1375,6 +1721,7 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.SEIPDv2 { features |= 0x08 } +<<<<<<< HEAD if features != 0x00 { subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) } @@ -1405,6 +1752,48 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp }) } // Preferred AEAD Ciphersuites +======= + + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + + if sig.TrustLevel != 0 { + subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) + } + + if sig.TrustRegularExpression != nil { + // RFC specifies the string should be null-terminated; add a null byte to the end + subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) + } + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(sig.PreferredCipherSuites) > 0 { serialized := make([]byte, len(sig.PreferredCipherSuites)*2) for i, cipherSuite := range sig.PreferredCipherSuites { @@ -1413,6 +1802,26 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) } +<<<<<<< HEAD +======= + + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) + } + + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -1454,6 +1863,11 @@ func (sig *Signature) AddMetadataToHashSuffix() { binary.BigEndian.PutUint32(buf[:], lit.Time) suffix.Write(buf[:]) +<<<<<<< HEAD +======= + // Update the counter and restore trailing bytes + l = uint64(suffix.Len()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) suffix.Write([]byte{0x05, 0xff}) suffix.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), @@ -1461,6 +1875,7 @@ func (sig *Signature) AddMetadataToHashSuffix() { }) sig.HashSuffix = suffix.Bytes() } +<<<<<<< HEAD // SaltLengthForHash selects the required salt length for the given hash algorithm, // as per Table 23 (Hash algorithm registry) of the crypto refresh. @@ -1507,3 +1922,5 @@ func (sig *Signature) removeNotationsWithName(name string) { } sig.Notations = updatedNotations } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go index 2812a1db88..e05c417dda 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -7,13 +7,19 @@ package packet import ( "bytes" "crypto/cipher" +<<<<<<< HEAD "crypto/sha256" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/s2k" +<<<<<<< HEAD "golang.org/x/crypto/hkdf" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // This is the largest session key that we'll support. Since at most 256-bit cipher @@ -41,6 +47,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } ske.Version = int(buf[0]) +<<<<<<< HEAD if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 { return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") } @@ -56,6 +63,12 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { } } +======= + if ske.Version != 4 && ske.Version != 5 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Cipher function if _, err := readFull(r, buf[:]); err != nil { return err @@ -65,7 +78,11 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) } +<<<<<<< HEAD if ske.Version >= 5 { +======= + if ske.Version == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AEAD mode if _, err := readFull(r, buf[:]); err != nil { return errors.StructuralError("cannot read AEAD octet from packet") @@ -73,6 +90,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { ske.Mode = AEADMode(buf[0]) } +<<<<<<< HEAD if ske.Version > 5 { // Scalar octet count if _, err := readFull(r, buf[:]); err != nil { @@ -80,6 +98,8 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var err error if ske.s2k, err = s2k.Parse(r); err != nil { if _, ok := err.(errors.ErrDummyPrivateKey); ok { @@ -88,7 +108,11 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } +<<<<<<< HEAD if ske.Version >= 5 { +======= + if ske.Version == 5 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AEAD IV iv := make([]byte, ske.Mode.IvLength()) _, err := readFull(r, iv) @@ -129,8 +153,13 @@ func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunc case 4: plaintextKey, cipherFunc, err := ske.decryptV4(key) return plaintextKey, cipherFunc, err +<<<<<<< HEAD case 5, 6: plaintextKey, err := ske.aeadDecrypt(ske.Version, key) +======= + case 5: + plaintextKey, err := ske.decryptV5(key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return plaintextKey, CipherFunction(0), err } err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") @@ -156,9 +185,15 @@ func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, return plaintextKey, cipherFunc, nil } +<<<<<<< HEAD func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) { adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)} aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version) +======= +func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { + adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) if err != nil { @@ -195,6 +230,7 @@ func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Conf // the given passphrase. The returned session key must be passed to // SerializeSymmetricallyEncrypted. // If config is nil, sensible defaults will be used. +<<<<<<< HEAD // Deprecated: Use SerializeSymmetricKeyEncryptedAEADReuseKey instead. func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { return SerializeSymmetricKeyEncryptedAEADReuseKey(w, sessionKey, passphrase, config.AEAD() != nil, config) @@ -211,6 +247,12 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, var version int if aeadSupported { version = 6 +======= +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { version = 4 } @@ -235,15 +277,22 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, switch version { case 4: packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize +<<<<<<< HEAD case 5, 6: +======= + case 5: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ivLen := config.AEAD().Mode().IvLength() tagLen := config.AEAD().Mode().TagLength() packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen } +<<<<<<< HEAD if version > 5 { packetLength += 2 // additional octet count fields } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) if err != nil { return @@ -252,6 +301,7 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, // Symmetric Key Encrypted Version buf := []byte{byte(version)} +<<<<<<< HEAD if version > 5 { // Scalar octet count buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) @@ -268,6 +318,15 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, // Scalar octet count buf = append(buf, byte(len(s2kBytes))) } +======= + // Cipher function + buf = append(buf, byte(cipherFunc)) + + if version == 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = w.Write(buf) if err != nil { return @@ -288,10 +347,17 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, if err != nil { return } +<<<<<<< HEAD case 5, 6: mode := config.AEAD().Mode() adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)} aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version) +======= + case 5: + mode := config.AEAD().Mode() + adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Sample iv using random reader iv := make([]byte, config.AEAD().Mode().IvLength()) @@ -315,6 +381,7 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, return } +<<<<<<< HEAD func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) { var blockCipher cipher.Block if version > 5 { @@ -327,5 +394,9 @@ func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, asso } else { blockCipher = c.new(inputKey) } +======= +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { + blockCipher := c.new(inputKey) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return mode.new(blockCipher) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go index 0e898742cf..60e4017e3e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -74,10 +74,13 @@ func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.Read // SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet // to w and returns a WriteCloser to which the to-be-encrypted packets can be // written. +<<<<<<< HEAD // If aeadSupported is set to true, SEIPDv2 is used with the indicated CipherSuite. // Otherwise, SEIPDv1 is used with the indicated CipherFunction. // Note: aeadSupported MUST match the value passed to SerializeEncryptedKeyAEAD // and/or SerializeSymmetricKeyEncryptedAEADReuseKey. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If config is nil, sensible defaults will be used. func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { writeCloser := noOpCloser{w} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go index 3ddc4fe4a9..92b2dd7b5b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -7,9 +7,13 @@ package packet import ( "crypto/cipher" "crypto/sha256" +<<<<<<< HEAD "fmt" "io" "strconv" +======= + "io" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/errors" "golang.org/x/crypto/hkdf" @@ -27,19 +31,31 @@ func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { se.Cipher = CipherFunction(headerData[0]) // cipherFunc must have block size 16 to use AEAD if se.Cipher.blockSize() != 16 { +<<<<<<< HEAD return errors.UnsupportedError("invalid aead cipher: " + strconv.Itoa(int(se.Cipher))) +======= + return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Mode se.Mode = AEADMode(headerData[1]) if se.Mode.TagLength() == 0 { +<<<<<<< HEAD return errors.UnsupportedError("unknown aead mode: " + strconv.Itoa(int(se.Mode))) +======= + return errors.UnsupportedError("unknown aead mode: " + string(se.Mode)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Chunk size se.ChunkSizeByte = headerData[2] if se.ChunkSizeByte > 16 { +<<<<<<< HEAD return errors.UnsupportedError("invalid aead chunk size byte: " + strconv.Itoa(int(se.ChunkSizeByte))) +======= + return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Salt @@ -64,6 +80,7 @@ func (se *SymmetricallyEncrypted) associatedData() []byte { // decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { +<<<<<<< HEAD if se.Cipher.KeySize() != len(inputKey) { return nil, errors.StructuralError(fmt.Sprintf("invalid session key length for cipher: got %d bytes, but expected %d bytes", len(inputKey), se.Cipher.KeySize())) } @@ -74,6 +91,13 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e tagLen := se.Mode.TagLength() chunkBytes := make([]byte, chunkSize+tagLen*2) peekedBytes := chunkBytes[chunkSize+tagLen:] +======= + aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) + + // Carry the first tagLen bytes + tagLen := se.Mode.TagLength() + peekedBytes := make([]byte, tagLen) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n, err := io.ReadFull(se.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) @@ -83,6 +107,7 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e aeadCrypter: aeadCrypter{ aead: aead, chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), +<<<<<<< HEAD nonce: nonce, associatedData: se.associatedData(), chunkIndex: nonce[len(nonce)-8:], @@ -90,6 +115,14 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e }, reader: se.Contents, chunkBytes: chunkBytes, +======= + initialNonce: nonce, + associatedData: se.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + reader: se.Contents, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) peekedBytes: peekedBytes, }, nil } @@ -123,7 +156,11 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite // Random salt salt := make([]byte, aeadSaltSize) +<<<<<<< HEAD if _, err := io.ReadFull(rand, salt); err != nil { +======= + if _, err := rand.Read(salt); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } @@ -133,6 +170,7 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) +<<<<<<< HEAD chunkSize := decodeAEADChunkSize(chunkSizeByte) tagLen := aead.Overhead() chunkBytes := make([]byte, chunkSize+tagLen) @@ -147,6 +185,18 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite }, writer: ciphertext, chunkBytes: chunkBytes, +======= + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(chunkSizeByte), + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + writer: ciphertext, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -156,10 +206,17 @@ func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inpu encryptionKey := make([]byte, c.KeySize()) _, _ = readFull(hkdfReader, encryptionKey) +<<<<<<< HEAD nonce = make([]byte, mode.IvLength()) // Last 64 bits of nonce are the counter _, _ = readFull(hkdfReader, nonce[:len(nonce)-8]) +======= + // Last 64 bits of nonce are the counter + nonce = make([]byte, mode.IvLength()-8) + + _, _ = readFull(hkdfReader, nonce) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) blockCipher := c.new(encryptionKey) aead = mode.new(blockCipher) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go index 8b18623684..c1c35c6b8f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -148,7 +148,11 @@ const mdcPacketTagByte = byte(0x80) | 0x40 | 19 func (ser *seMDCReader) Close() error { if ser.error { +<<<<<<< HEAD return errors.ErrMDCHashMismatch +======= + return errors.ErrMDCMissing +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for !ser.eof { @@ -159,7 +163,11 @@ func (ser *seMDCReader) Close() error { break } if err != nil { +<<<<<<< HEAD return errors.ErrMDCHashMismatch +======= + return errors.ErrMDCMissing +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -172,7 +180,11 @@ func (ser *seMDCReader) Close() error { // The hash already includes the MDC header, but we still check its value // to confirm encryption correctness if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { +<<<<<<< HEAD return errors.ErrMDCHashMismatch +======= + return errors.ErrMDCMissing +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -237,9 +249,15 @@ func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunct block := c.new(key) blockSize := block.BlockSize() iv := make([]byte, blockSize) +<<<<<<< HEAD _, err = io.ReadFull(config.Random(), iv) if err != nil { return nil, err +======= + _, err = config.Random().Read(iv) + if err != nil { + return +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) _, err = ciphertext.Write(prefix) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index 63814ed132..113ba00d12 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -9,6 +9,10 @@ import ( "image" "image/jpeg" "io" +<<<<<<< HEAD +======= + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const UserAttrImageSubpacket = 1 @@ -62,7 +66,11 @@ func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { func (uat *UserAttribute) parse(r io.Reader) (err error) { // RFC 4880, section 5.13 +<<<<<<< HEAD b, err := io.ReadAll(r) +======= + b, err := ioutil.ReadAll(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index 3c7451a3c3..676909a4ca 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -6,6 +6,10 @@ package packet import ( "io" +<<<<<<< HEAD +======= + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" ) @@ -65,7 +69,11 @@ func NewUserId(name, comment, email string) *UserId { func (uid *UserId) parse(r io.Reader) (err error) { // RFC 4880, section 5.11 +<<<<<<< HEAD b, err := io.ReadAll(r) +======= + b, err := ioutil.ReadAll(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index e6dd9b5fd3..b821bb519a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -46,7 +46,10 @@ type MessageDetails struct { DecryptedWith Key // the private key used to decrypt the message, if any. IsSigned bool // true if the message is signed. SignedByKeyId uint64 // the key id of the signer, if any. +<<<<<<< HEAD SignedByFingerprint []byte // the key fingerprint of the signer, if any. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SignedBy *Key // the key of the signer, if available. LiteralData *packet.LiteralData // the metadata of the contents UnverifiedBody io.Reader // the contents of the message. @@ -118,7 +121,11 @@ ParsePackets: // This packet contains the decryption key encrypted to a public key. md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) switch p.Algo { +<<<<<<< HEAD case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448: +======= + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break default: continue @@ -233,7 +240,11 @@ FindKey: } mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) if sensitiveParsingErr != nil { +<<<<<<< HEAD return nil, errors.HandleSensitiveParsingError(sensitiveParsingErr, md.decrypted != nil) +======= + return nil, errors.StructuralError("parsing error") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return mdFinal, nil } @@ -271,17 +282,25 @@ FindLiteralData: prevLast = true } +<<<<<<< HEAD h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt) +======= + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { md.SignatureError = err } md.IsSigned = true +<<<<<<< HEAD if p.Version == 6 { md.SignedByFingerprint = p.KeyFingerprint } md.SignedByKeyId = p.KeyId +======= + md.SignedByKeyId = p.KeyId +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if keyring != nil { keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) if len(keys) > 0 { @@ -297,7 +316,11 @@ FindLiteralData: if md.IsSigned && md.SignatureError == nil { md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} } else if md.decrypted != nil { +<<<<<<< HEAD md.UnverifiedBody = &checkReader{md, false} +======= + md.UnverifiedBody = checkReader{md} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { md.UnverifiedBody = md.LiteralData.Body } @@ -305,6 +328,7 @@ FindLiteralData: return md, nil } +<<<<<<< HEAD func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) { switch sigType { case packet.SigTypeBinary: @@ -315,12 +339,18 @@ func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (has return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // hashForSignature returns a pair of hashes that can be used to verify a // signature. The signature may specify that the contents of the signed message // should be preprocessed (i.e. to normalize line endings). Thus this function // returns two hashes. The second should be used to hash the message itself and // performs any needed preprocessing. +<<<<<<< HEAD func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) { +======= +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { return nil, nil, errors.UnsupportedError("unsupported hash function") } @@ -328,6 +358,7 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSal return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) } h := hashFunc.New() +<<<<<<< HEAD if sigSalt != nil { h.Write(sigSalt) } @@ -341,6 +372,16 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSal case packet.SigTypeText: return h, wrappedHash, nil } +======= + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) } @@ -348,6 +389,7 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSal // it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger // MDC checks. type checkReader struct { +<<<<<<< HEAD md *MessageDetails checked bool } @@ -359,16 +401,31 @@ func (cr *checkReader) Read(buf []byte) (int, error) { // Only check once return n, io.EOF } +======= + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) mdcErr := cr.md.decrypted.Close() if mdcErr != nil { return n, mdcErr } +<<<<<<< HEAD cr.checked = true +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return n, io.EOF } if sensitiveParsingError != nil { +<<<<<<< HEAD return n, errors.HandleSensitiveParsingError(sensitiveParsingError, true) +======= + return n, errors.StructuralError("parsing error") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return n, nil @@ -392,7 +449,10 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { scr.wrappedHash.Write(buf[:n]) } +<<<<<<< HEAD readsDecryptedData := scr.md.decrypted != nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if sensitiveParsingError == io.EOF { var p packet.Packet var readError error @@ -411,7 +471,11 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { key := scr.md.SignedBy signatureError := key.PublicKey.VerifySignature(scr.h, sig) if signatureError == nil { +<<<<<<< HEAD signatureError = checkMessageSignatureDetails(key, sig, scr.config) +======= + signatureError = checkSignatureDetails(key, sig, scr.config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } scr.md.Signature = sig scr.md.SignatureError = signatureError @@ -435,15 +499,25 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // unsigned hash of its own. In order to check this we need to // close that Reader. if scr.md.decrypted != nil { +<<<<<<< HEAD if sensitiveParsingError := scr.md.decrypted.Close(); sensitiveParsingError != nil { return n, errors.HandleSensitiveParsingError(sensitiveParsingError, true) +======= + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return n, io.EOF } if sensitiveParsingError != nil { +<<<<<<< HEAD return n, errors.HandleSensitiveParsingError(sensitiveParsingError, readsDecryptedData) +======= + return n, errors.StructuralError("parsing error") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return n, nil @@ -454,13 +528,22 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // if any, and a possible signature verification error. // If the signer isn't known, ErrUnknownIssuer is returned. func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +<<<<<<< HEAD return verifyDetachedSignature(keyring, signed, signature, nil, false, config) +======= + var expectedHashes []crypto.Hash + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifyDetachedSignatureAndHash performs the same actions as // VerifyDetachedSignature and checks that the expected hash functions were used. func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +<<<<<<< HEAD return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) +======= + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CheckDetachedSignature takes a signed file and a detached signature and @@ -468,24 +551,41 @@ func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader // signature verification error. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { +<<<<<<< HEAD _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config) return +======= + var expectedHashes []crypto.Hash + return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CheckDetachedSignatureAndHash performs the same actions as // CheckDetachedSignature and checks that the expected hash functions were used. func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { +<<<<<<< HEAD _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) return } func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +======= + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return +} + +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType var keys []Key var p packet.Packet +<<<<<<< HEAD +======= + expectedHashesLen := len(expectedHashes) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packets := packet.NewReader(signature) for { p, err = packets.Next() @@ -507,6 +607,7 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec issuerKeyId = *sig.IssuerKeyId hashFunc = sig.Hash sigType = sig.SigType +<<<<<<< HEAD if checkHashes { matchFound := false // check for hashes @@ -520,6 +621,18 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers") } } +======= + + for i, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + break + } + if i+1 == expectedHashesLen { + return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + } + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) if len(keys) > 0 { break @@ -530,11 +643,15 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec panic("unreachable") } +<<<<<<< HEAD h, err := sig.PrepareVerify() if err != nil { return nil, nil, err } wrappedHash, err := wrapHashForSignature(h, sigType) +======= + h, wrappedHash, err := hashForSignature(hashFunc, sigType) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, err } @@ -546,7 +663,11 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec for _, key := range keys { err = key.PublicKey.VerifySignature(h, sig) if err == nil { +<<<<<<< HEAD return sig, key.Entity, checkMessageSignatureDetails(&key, sig, config) +======= + return sig, key.Entity, checkSignatureDetails(&key, sig, config) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -564,7 +685,11 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, return CheckDetachedSignature(keyring, signed, body, config) } +<<<<<<< HEAD // checkMessageSignatureDetails returns an error if: +======= +// checkSignatureDetails returns an error if: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // - The signature (or one of the binding signatures mentioned below) // has a unknown critical notation data subpacket // - The primary key of the signing entity is revoked @@ -582,11 +707,23 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, // NOTE: The order of these checks is important, as the caller may choose to // ignore ErrSignatureExpired or ErrKeyExpired errors, but should never // ignore any other errors. +<<<<<<< HEAD func checkMessageSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { now := config.Now() primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature() signedBySubKey := key.PublicKey != key.Entity.PrimaryKey sigsToCheck := []*packet.Signature{signature, primarySelfSignature} +======= +// +// TODO: Also return an error if: +// - The primary key is expired according to a direct-key signature +// - (For V5 keys only:) The direct-key signature (exists and) is expired +func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { + now := config.Now() + primaryIdentity := key.Entity.PrimaryIdentity() + signedBySubKey := key.PublicKey != key.Entity.PrimaryKey + sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if signedBySubKey { sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) } @@ -599,10 +736,17 @@ func checkMessageSignatureDetails(key *Key, signature *packet.Signature, config } if key.Entity.Revoked(now) || // primary key is revoked (signedBySubKey && key.Revoked(now)) || // subkey is revoked +<<<<<<< HEAD (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4 return errors.ErrKeyRevoked } if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired +======= + primaryIdentity.Revoked(now) { // primary identity is revoked + return errors.ErrKeyRevoked + } + if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.ErrKeyExpired } if signedBySubKey { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go index 670d60226a..bc36863355 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -26,8 +26,11 @@ const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a43129 const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" +<<<<<<< HEAD const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" @@ -162,6 +165,7 @@ TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== =IiS2 -----END PGP PRIVATE KEY BLOCK-----` +<<<<<<< HEAD // See OpenPGP crypto refresh Section A.3. const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- @@ -234,6 +238,20 @@ aU71tdtNBQ== =e7jT -----END PGP PRIVATE KEY BLOCK-----` +======= +// Generated with the above private key +const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- +Version: OpenPGP.js v4.10.7 +Comment: https://openpgpjs.org + +xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf +bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G +y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv +UQdl5MlBka1QSNbMq2Bz7XwNPg4= +=6lbM +-----END PGP MESSAGE-----` + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv @@ -334,6 +352,7 @@ AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY hz3tYjKhoFTKEIq3y3Pp =h/aX -----END PGP PUBLIC KEY BLOCK-----` +<<<<<<< HEAD const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- Comment: Bob's OpenPGP Transferable Secret Key @@ -455,3 +474,5 @@ byVJHvLO/XErtC+GNIJeMg== =liRq -----END PGP MESSAGE----- ` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go index 6871b84fc9..1e109b337c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -87,10 +87,17 @@ func decodeCount(c uint8) int { // encodeMemory converts the Argon2 "memory" in the range parallelism*8 to // 2**31, inclusive, to an encoded memory. The return value is the // octet that is actually stored in the GPG file. encodeMemory panics +<<<<<<< HEAD // if is not in the above range // See OpenPGP crypto refresh Section 3.7.1.4. func encodeMemory(memory uint32, parallelism uint8) uint8 { if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) { +======= +// if is not in the above range +// See OpenPGP crypto refresh Section 3.7.1.4. +func encodeMemory(memory uint32, parallelism uint8) uint8 { + if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic("Memory argument memory is outside the required range") } @@ -199,8 +206,13 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { } params = &Params{ +<<<<<<< HEAD mode: SaltedS2K, hashId: hashId, +======= + mode: SaltedS2K, + hashId: hashId, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } else { // Enforce IteratedSaltedS2K method otherwise hashId, ok := algorithm.HashToHashId(c.hash()) @@ -211,7 +223,11 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { c.S2KMode = IteratedSaltedS2K } params = &Params{ +<<<<<<< HEAD mode: IteratedSaltedS2K, +======= + mode: IteratedSaltedS2K, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashId: hashId, countByte: c.EncodedCount(), } @@ -283,9 +299,12 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { params.passes = buf[Argon2SaltSize] params.parallelism = buf[Argon2SaltSize+1] params.memoryExp = buf[Argon2SaltSize+2] +<<<<<<< HEAD if err := validateArgon2Params(params); err != nil { return nil, err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return params, nil case GnuS2K: // This is a GNU extension. See @@ -303,22 +322,31 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { return nil, errors.UnsupportedError("S2K function") } +<<<<<<< HEAD func (params *Params) Mode() Mode { return params.mode } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (params *Params) Dummy() bool { return params != nil && params.mode == GnuS2K } func (params *Params) salt() []byte { switch params.mode { +<<<<<<< HEAD case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] case Argon2S2K: return params.saltBytes[:Argon2SaltSize] default: return nil +======= + case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] + case Argon2S2K: return params.saltBytes[:Argon2SaltSize] + default: return nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -415,6 +443,7 @@ func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Co f(key, passphrase) return nil } +<<<<<<< HEAD // validateArgon2Params checks that the argon2 parameters are valid according to RFC9580. func validateArgon2Params(params *Params) error { @@ -434,3 +463,5 @@ func validateArgon2Params(params *Params) error { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go index 616e0d12c6..9e0748796b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -5,7 +5,11 @@ package s2k // the same parameters. type Cache map[Params][]byte +<<<<<<< HEAD // GetOrComputeDerivedKey tries to retrieve the key +======= +// GetOrComputeDerivedKey tries to retrieve the key +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // for the given s2k parameters from the cache. // If there is no hit, it derives the key with the s2k function from the passphrase, // updates the cache, and returns the key. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go index b93db1ab85..e9ce286032 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -50,9 +50,15 @@ type Config struct { type Argon2Config struct { NumberOfPasses uint8 DegreeOfParallelism uint8 +<<<<<<< HEAD // Memory specifies the desired Argon2 memory usage in kibibytes. // For example memory=64*1024 sets the memory cost to ~64 MB. Memory uint32 +======= + // The memory parameter for Argon2 specifies desired memory usage in kibibytes. + // For example memory=64*1024 sets the memory cost to ~64 MB. + Memory uint32 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Config) Mode() Mode { @@ -115,7 +121,11 @@ func (c *Argon2Config) EncodedMemory() uint8 { } memory := c.Memory +<<<<<<< HEAD lowerBound := uint32(c.Parallelism()) * 8 +======= + lowerBound := uint32(c.Parallelism())*8 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) upperBound := uint32(2147483648) switch { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index b0f6ef7b09..18a3d2c526 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -76,11 +76,15 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S sig := createSignaturePacket(signingKey.PublicKey, sigType, config) +<<<<<<< HEAD h, err := sig.PrepareSign(config) if err != nil { return } wrappedHash, err := wrapHashForSignature(h, sig.SigType) +======= + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -279,6 +283,7 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") } +<<<<<<< HEAD var salt []byte if signer != nil { var opsVersion = 3 @@ -287,12 +292,17 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } ops := &packet.OnePassSignature{ Version: opsVersion, +======= + if signer != nil { + ops := &packet.OnePassSignature{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SigType: sigType, Hash: hash, PubKeyAlgo: signer.PubKeyAlgo, KeyId: signer.KeyId, IsLast: true, } +<<<<<<< HEAD if opsVersion == 6 { ops.KeyFingerprint = signer.Fingerprint salt, err = packet.SignatureSaltForHash(hash, config.Random()) @@ -301,6 +311,8 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } ops.Salt = salt } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := ops.Serialize(payload); err != nil { return nil, err } @@ -328,19 +340,31 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } if signer != nil { +<<<<<<< HEAD h, wrappedHash, err := hashForSignature(hash, sigType, salt) +======= + h, wrappedHash, err := hashForSignature(hash, sigType) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } metadata := &packet.LiteralData{ +<<<<<<< HEAD Format: 'u', +======= + Format: 't', +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) FileName: hints.FileName, Time: epochSeconds, } if hints.IsBinary { metadata.Format = 'b' } +<<<<<<< HEAD return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil +======= + return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return literalData, nil } @@ -398,6 +422,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } +<<<<<<< HEAD primarySelfSignature, _ := to[i].PrimarySelfSignature() if primarySelfSignature == nil { return nil, errors.InvalidArgumentError("entity without a self-signature") @@ -411,6 +436,17 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash) candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites) candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression) +======= + sig := to[i].PrimaryIdentity().SelfSignature + if !sig.SEIPDv2 { + aeadSupported = false + } + + candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // In the event that the intersection of supported algorithms is empty we use the ones @@ -444,6 +480,7 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } } +<<<<<<< HEAD var symKey []byte if aeadSupported { symKey = make([]byte, aeadCipherSuite.Cipher.KeySize()) @@ -451,12 +488,19 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En symKey = make([]byte, cipher.KeySize()) } +======= + symKey := make([]byte, cipher.KeySize()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, err := io.ReadFull(config.Random(), symKey); err != nil { return nil, err } for _, key := range encryptKeys { +<<<<<<< HEAD if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil { +======= + if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } } @@ -493,17 +537,25 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con hashToHashId(crypto.SHA3_512), } defaultHashes := candidateHashes[0:1] +<<<<<<< HEAD primarySelfSignature, _ := signed.PrimarySelfSignature() if primarySelfSignature == nil { return nil, errors.StructuralError("signed entity has no self-signature") } preferredHashes := primarySelfSignature.PreferredHash +======= + preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(preferredHashes) == 0 { preferredHashes = defaultHashes } candidateHashes = intersectPreferences(candidateHashes, preferredHashes) if len(candidateHashes) == 0 { +<<<<<<< HEAD return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes") +======= + return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) @@ -518,7 +570,10 @@ type signatureWriter struct { hashType crypto.Hash wrappedHash hash.Hash h hash.Hash +<<<<<<< HEAD salt []byte // v6 only +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) signer *packet.PrivateKey sigType packet.SignatureType config *packet.Config @@ -542,10 +597,13 @@ func (s signatureWriter) Close() error { sig.Hash = s.hashType sig.Metadata = s.metadata +<<<<<<< HEAD if err := sig.SetSalt(s.salt); err != nil { return err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := sig.Sign(s.h, s.signer, s.config); err != nil { return err } diff --git a/vendor/github.com/alecthomas/go-check-sumtype/README.md b/vendor/github.com/alecthomas/go-check-sumtype/README.md index 287aa68b7f..5a458db386 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/README.md +++ b/vendor/github.com/alecthomas/go-check-sumtype/README.md @@ -92,12 +92,15 @@ passing checks, set the `-default-signifies-exhasutive=false` flag. As a special case, if the type switch statement contains a `default` clause that always panics, then exhaustiveness checks are still performed. +<<<<<<< HEAD By default, `go-check-sumtype` will not include shared interfaces in the exhaustiviness check. This can be changed by setting the `-include-shared-interfaces=true` flag. When this flag is set, `go-check-sumtype` will not require that all concrete structs are listed in the switch statement, as long as the switch statement is exhaustive with respect to interfaces the structs implement. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Details and motivation Sum types are otherwise known as discriminated unions. That is, a sum type is diff --git a/vendor/github.com/alecthomas/go-check-sumtype/check.go b/vendor/github.com/alecthomas/go-check-sumtype/check.go index ff7fec728a..c286c653a8 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/check.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/check.go @@ -29,7 +29,11 @@ func (e inexhaustiveError) Error() string { // Names returns a sorted list of names corresponding to the missing variant // cases. func (e inexhaustiveError) Names() []string { +<<<<<<< HEAD list := make([]string, 0, len(e.Missing)) +======= + var list []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, o := range e.Missing { list = append(list, o.Name()) } @@ -92,10 +96,13 @@ func missingVariantsInSwitch( ) (*sumTypeDef, []types.Object) { asserted := findTypeAssertExpr(swtch) ty := pkg.TypesInfo.TypeOf(asserted) +<<<<<<< HEAD if ty == nil { panic(fmt.Sprintf("no type found for asserted expression: %v", asserted)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) def := findDef(defs, ty) if def == nil { // We couldn't find a corresponding sum type, so there's @@ -107,11 +114,19 @@ func missingVariantsInSwitch( // A catch-all case defeats all exhaustiveness checks. return def, nil } +<<<<<<< HEAD variantTypes := make([]types.Type, 0, len(variantExprs)) for _, expr := range variantExprs { variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) } return def, def.missing(variantTypes, config.IncludeSharedInterfaces) +======= + var variantTypes []types.Type + for _, expr := range variantExprs { + variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) + } + return def, def.missing(variantTypes) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // switchVariants returns all case expressions found in a type switch. This diff --git a/vendor/github.com/alecthomas/go-check-sumtype/config.go b/vendor/github.com/alecthomas/go-check-sumtype/config.go index 5c722b75c4..bb09a184c5 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/config.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/config.go @@ -2,7 +2,10 @@ package gochecksumtype type Config struct { DefaultSignifiesExhaustive bool +<<<<<<< HEAD // IncludeSharedInterfaces in the exhaustiviness check. If true, we do not need to list all concrete structs, as long // as the switch statement is exhaustive with respect to interfaces the structs implement. IncludeSharedInterfaces bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/alecthomas/go-check-sumtype/def.go b/vendor/github.com/alecthomas/go-check-sumtype/def.go index 71bdf2f72d..01f02cffc5 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/def.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/def.go @@ -71,7 +71,11 @@ type sumTypeDef struct { // sum type declarations. If no such sum type definition could be found for // any of the given declarations, then an error is returned. func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) { +<<<<<<< HEAD defs := make([]sumTypeDef, 0, len(decls)) +======= + var defs []sumTypeDef +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var errs []error for _, decl := range decls { def, err := newSumTypeDef(decl.Package.Types, decl) @@ -104,7 +108,11 @@ func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) { return nil, notInterfaceError{decl} } hasUnexported := false +<<<<<<< HEAD for i := range iface.NumMethods() { +======= + for i := 0; i < iface.NumMethods(); i++ { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !iface.Method(i).Exported() { hasUnexported = true break @@ -145,7 +153,11 @@ func (def *sumTypeDef) String() string { // missing returns a list of variants in this sum type that are not in the // given list of types. +<<<<<<< HEAD func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) []types.Object { +======= +func (def *sumTypeDef) missing(tys []types.Type) []types.Object { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(ag): This is O(n^2). Fix that. /shrug var missing []types.Object for _, v := range def.Variants { @@ -155,6 +167,7 @@ func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) [ ty = indirect(ty) if types.Identical(varty, ty) { found = true +<<<<<<< HEAD break } if includeSharedInterfaces && implements(varty, ty) { @@ -166,18 +179,26 @@ func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) [ // we do not include interfaces extending the sumtype, as the // all implementations of those interfaces are already covered // by the sumtype. +======= + } + } + if !found { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) missing = append(missing, v) } } return missing } +<<<<<<< HEAD func isInterface(ty types.Type) bool { underlying := indirect(ty).Underlying() _, ok := underlying.(*types.Interface) return ok } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // indirect dereferences through an arbitrary number of pointer types. func indirect(ty types.Type) types.Type { if ty, ok := ty.(*types.Pointer); ok { @@ -185,6 +206,7 @@ func indirect(ty types.Type) types.Type { } return ty } +<<<<<<< HEAD func implements(varty, interfaceType types.Type) bool { underlying := interfaceType.Underlying() @@ -193,3 +215,5 @@ func implements(varty, interfaceType types.Type) bool { } return false } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index a015cc5b20..ba4aa81b5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -165,6 +165,7 @@ type Config struct { // Controls how a resolved AWS account ID is handled for endpoint routing. AccountIDEndpointMode AccountIDEndpointMode +<<<<<<< HEAD // RequestChecksumCalculation determines when request checksum calculation is performed. // @@ -192,6 +193,8 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 57d5c02ee5..b11e09ef33 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,8 @@ package aws // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.34.0" +======= +const goModuleVersion = "1.32.4" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 01d758d5ff..bd8e4b3c92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -34,9 +34,12 @@ const ( FeatureMetadata2 ) +<<<<<<< HEAD // Hardcoded value to specify which version of the user agent we're using const uaMetadata = "ua/2.1" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (k SDKAgentKeyType) string() string { switch k { case APIMetadata: @@ -76,6 +79,7 @@ type UserAgentFeature string // Enumerates UserAgentFeature. const ( +<<<<<<< HEAD UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) UserAgentFeatureWaiter = "B" UserAgentFeaturePaginator = "C" @@ -98,6 +102,21 @@ const ( UserAgentFeatureRequestChecksumWhenRequired = "a" UserAgentFeatureResponseChecksumWhenSupported = "b" UserAgentFeatureResponseChecksumWhenRequired = "c" +======= + UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) + UserAgentFeatureWaiter = "B" + UserAgentFeaturePaginator = "C" + UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) + UserAgentFeatureRetryModeStandard = "E" + UserAgentFeatureRetryModeAdaptive = "F" + UserAgentFeatureS3Transfer = "G" + UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) + UserAgentFeatureS3CryptoV2 = "I" // n/a + UserAgentFeatureS3ExpressBucket = "J" + UserAgentFeatureS3AccessGrants = "K" // not yet implemented + UserAgentFeatureGZIPRequestCompression = "L" + UserAgentFeatureProtocolRPCV2CBOR = "M" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // RequestUserAgent is a build middleware that set the User-Agent for the request. @@ -119,7 +138,10 @@ type RequestUserAgent struct { func NewRequestUserAgent() *RequestUserAgent { userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() addProductName(userAgent) +<<<<<<< HEAD addUserAgentMetadata(userAgent) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) addProductName(sdkAgent) r := &RequestUserAgent{ @@ -147,10 +169,13 @@ func addProductName(builder *smithyhttp.UserAgentBuilder) { builder.AddKeyValue(aws.SDKName, aws.SDKVersion) } +<<<<<<< HEAD func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) { builder.AddKey(uaMetadata) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. func AddUserAgentKey(key string) func(*middleware.Stack) error { return func(stack *middleware.Stack) error { @@ -275,10 +300,17 @@ func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { const userAgent = "User-Agent" +<<<<<<< HEAD if len(u.features) > 0 { updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) } updateHTTPHeader(request, userAgent, u.userAgent.Build()) +======= + updateHTTPHeader(request, userAgent, u.userAgent.Build()) + if len(u.features) > 0 { + updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go index 6669a3ddfd..68b63709d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -1,8 +1,13 @@ package query import ( +<<<<<<< HEAD "net/url" "strconv" +======= + "fmt" + "net/url" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Array represents the encoding of Query lists and sets. A Query array is a @@ -21,8 +26,24 @@ type Array struct { // keys for each element in the list. For example, an entry might have the // key "ParentStructure.ListName.member.MemberName.1". // +<<<<<<< HEAD // When the array is not flat the prefix will contain the memberName otherwise the memberName is ignored prefix string +======= + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the list is flat or not. A list that is not flat will produce the + // following entry to the url.Values for a given entry: + // ListName.MemberName.1=value + // A list that is flat will produce the following: + // ListName.1=value + flat bool + // The location name of the member. In most cases this should be "member". + memberName string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Elements are stored in values, so we keep track of the list size here. size int32 // Empty lists are encoded as "=", if we add a value later we will @@ -34,6 +55,7 @@ func newArray(values url.Values, prefix string, flat bool, memberName string) *A emptyValue := newValue(values, prefix, flat) emptyValue.String("") +<<<<<<< HEAD if !flat { // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead prefix = prefix + keySeparator + memberName @@ -42,6 +64,13 @@ func newArray(values url.Values, prefix string, flat bool, memberName string) *A return &Array{ values: values, prefix: prefix, +======= + return &Array{ + values: values, + prefix: prefix, + flat: flat, + memberName: memberName, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) emptyValue: emptyValue, } } @@ -55,7 +84,16 @@ func (a *Array) Value() Value { // Query lists start a 1, so adjust the size first a.size++ +<<<<<<< HEAD // Lists can't have flat members // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead return newValue(a.values, a.prefix+keySeparator+strconv.FormatInt(int64(a.size), 10), false) +======= + prefix := a.prefix + if !a.flat { + prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) + } + // Lists can't have flat members + return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go index 305a8ace30..f722e25e89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -1,6 +1,13 @@ package query +<<<<<<< HEAD import "net/url" +======= +import ( + "fmt" + "net/url" +) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Object represents the encoding of Query structures and unions. A Query // object is a representation of a mapping of string keys to arbitrary @@ -53,16 +60,24 @@ func (o *Object) FlatKey(name string) Value { func (o *Object) key(name string, flatValue bool) Value { if o.prefix != "" { +<<<<<<< HEAD // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead return newValue(o.values, o.prefix+keySeparator+name, flatValue) +======= + return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return newValue(o.values, name, flatValue) } func (o *Object) keyWithValues(name string, flatValue bool) Value { if o.prefix != "" { +<<<<<<< HEAD // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead return newAppendValue(o.values, o.prefix+keySeparator+name, flatValue) +======= + return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return newAppendValue(o.values, name, flatValue) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go index 8063c592dd..d7b463f04b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -7,8 +7,11 @@ import ( "github.com/aws/smithy-go/encoding/httpbinding" ) +<<<<<<< HEAD const keySeparator = "." +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Value represents a Query Value type. type Value struct { // The query values to add the value to. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 1b485f9988..29531ff294 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -116,6 +116,7 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { case errors.As(err, &conErr) && conErr.ConnectionError(): retryable = true +<<<<<<< HEAD case strings.Contains(err.Error(), "use of closed network connection"): fallthrough case strings.Contains(err.Error(), "connection reset"): @@ -123,6 +124,9 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { // are effectively the same. It appears to be the difference between // sync and async read of TCP RST in the stdlib's net.Conn read loop. // see #2737 +======= + case strings.Contains(err.Error(), "connection reset"): +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) retryable = true case errors.As(err, &urlErr): diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 3cb7d14ef4..f7bad1cf85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.29.2 (2025-01-24) * **Bug Fix**: Fix env config naming and usage of deprecated ioutil @@ -47,6 +48,8 @@ * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.28.3 (2024-11-07) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 09d9b63116..2e5b7a52de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -83,12 +83,15 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the AccountIDEndpointMode if present in env var or shared config profile resolveAccountIDEndpointMode, +<<<<<<< HEAD // Sets the RequestChecksumCalculation if present in env var or shared config profile resolveRequestChecksumCalculation, // Sets the ResponseChecksumValidation if present in env var or shared config profile resolveResponseChecksumValidation, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A Config represents a generic configuration value or set of values. This type @@ -218,7 +221,11 @@ func resolveConfigLoaders(options *LoadOptions) []loader { loaders[0] = loadEnvConfig // specification of a profile should cause a load failure if it doesn't exist +<<<<<<< HEAD if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" { +======= + if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) loaders[1] = loadSharedConfig } else { loaders[1] = loadSharedConfigIgnoreNotExist diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 9db507e38e..0783f9eedf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -5,6 +5,10 @@ import ( "context" "fmt" "io" +<<<<<<< HEAD +======= + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "strconv" "strings" @@ -20,6 +24,7 @@ const CredentialsSourceName = "EnvConfigCredentials" // Environment variables that will be read for configuration values. const ( +<<<<<<< HEAD awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID" awsAccessKeyEnv = "AWS_ACCESS_KEY" @@ -77,18 +82,81 @@ const ( awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION" awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" +======= + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnvVar = "AWS_ACCESS_KEY" + + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnvVar = "AWS_SECRET_KEY" + + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + + awsRegionEnvVar = "AWS_REGION" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + + awsProfileEnvVar = "AWS_PROFILE" + awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" + + awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + + awsConfigFileEnvVar = "AWS_CONFIG_FILE" + + awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" + + awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + + awsRoleARNEnvVar = "AWS_ROLE_ARN" + awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" + + awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" + + awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" + + awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + + awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" + + awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + + awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" + + awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" + + awsDefaultMode = "AWS_DEFAULTS_MODE" + + awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" + awsRetryMode = "AWS_RETRY_MODE" + awsSdkAppID = "AWS_SDK_UA_APP_ID" + + awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURL = "AWS_ENDPOINT_URL" + + awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION" + awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" awsAccountIDEnv = "AWS_ACCOUNT_ID" awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" +<<<<<<< HEAD awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION" awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( credAccessEnvKeys = []string{ +<<<<<<< HEAD awsAccessKeyIDEnv, awsAccessKeyEnv, } @@ -103,6 +171,22 @@ var ( profileEnvKeys = []string{ awsProfileEnv, awsDefaultProfileEnv, +======= + awsAccessKeyIDEnvVar, + awsAccessKeyEnvVar, + } + credSecretEnvKeys = []string{ + awsSecretAccessKeyEnvVar, + awsSecretKeyEnvVar, + } + regionEnvKeys = []string{ + awsRegionEnvVar, + awsDefaultRegionEnvVar, + } + profileEnvKeys = []string{ + awsProfileEnvVar, + awsDefaultProfileEnvVar, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ) @@ -298,12 +382,15 @@ type EnvConfig struct { // Indicates whether account ID will be required/ignored in endpoint2.0 routing AccountIDEndpointMode aws.AccountIDEndpointMode +<<<<<<< HEAD // Indicates whether request checksum should be calculated RequestChecksumCalculation aws.RequestChecksumCalculation // Indicates whether response checksum should be validated ResponseChecksumValidation aws.ResponseChecksumValidation +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -324,6 +411,7 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) if creds.HasKeys() { creds.AccountID = os.Getenv(awsAccountIDEnv) +<<<<<<< HEAD creds.SessionToken = os.Getenv(awsSessionTokenEnv) cfg.Credentials = creds } @@ -331,10 +419,20 @@ func NewEnvConfig() (EnvConfig, error) { cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv) cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv) cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv) +======= + creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) + cfg.Credentials = creds + } + + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) setStringFromEnvVal(&cfg.Region, regionEnvKeys) setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) +<<<<<<< HEAD cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv) cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv) @@ -397,6 +495,70 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv}) if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil { +======= + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) + + cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) + + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) + + cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + + cfg.AppID = os.Getenv(awsSdkAppID) + + if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil { + return cfg, err + } + if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { + return cfg, err + } + + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { + return cfg, err + } + + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { + return cfg, err + } + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { + return cfg, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { + return cfg, err + } + + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { + return cfg, err + } + + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { + return cfg, err + } + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { + return cfg, err + } + + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) + + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return cfg, err } @@ -408,6 +570,7 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } +<<<<<<< HEAD if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil { return cfg, err } @@ -415,6 +578,8 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return cfg, nil } @@ -447,6 +612,7 @@ func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpo return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil } +<<<<<<< HEAD func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) { return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil } @@ -455,6 +621,8 @@ func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseC return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -551,6 +719,7 @@ func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) e return nil } +<<<<<<< HEAD func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error { for _, k := range keys { value := os.Getenv(k) @@ -590,6 +759,8 @@ func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetRegion returns the AWS Region if set in the environment. Returns an empty // string if not set. func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { @@ -646,7 +817,11 @@ func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { return nil, false, nil } +<<<<<<< HEAD b, err := os.ReadFile(c.CustomCABundle) +======= + b, err := ioutil.ReadFile(c.CustomCABundle) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, false, err } @@ -670,7 +845,11 @@ func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { // GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use // with configured endpoints. func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { +<<<<<<< HEAD if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" { +======= + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return endpt, true, nil } return "", false, nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 1859fe9316..e14a79ef64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,8 @@ package config // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.29.2" +======= +const goModuleVersion = "1.28.3" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 0810ecf16a..3156c563da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -216,6 +216,7 @@ type LoadOptions struct { // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool +<<<<<<< HEAD // Whether account id should be built into endpoint resolution AccountIDEndpointMode aws.AccountIDEndpointMode @@ -225,6 +226,10 @@ type LoadOptions struct { // Specifies if response checksum should be validated ResponseChecksumValidation aws.ResponseChecksumValidation +======= + AccountIDEndpointMode aws.AccountIDEndpointMode + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Service endpoint override. This value is not necessarily final and is // passed to the service's EndpointResolverV2 for further delegation. BaseEndpoint string @@ -295,6 +300,7 @@ func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountI return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil } +<<<<<<< HEAD func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil } @@ -303,6 +309,8 @@ func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.Res return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) { return o.BaseEndpoint, o.BaseEndpoint != "", nil } @@ -372,6 +380,7 @@ func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { } } +<<<<<<< HEAD // WithRequestChecksumCalculation is a helper function to construct functional options // that sets RequestChecksumCalculation on config's LoadOptions func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc { @@ -392,6 +401,8 @@ func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOption } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index a8ff40d846..bb739f84be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -242,6 +242,7 @@ func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.A return } +<<<<<<< HEAD // requestChecksumCalculationProvider provides access to the RequestChecksumCalculation type requestChecksumCalculationProvider interface { getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) @@ -276,6 +277,8 @@ func getResponseChecksumValidation(ctx context.Context, configs configs) (value return } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index a68bd0993f..c2df1b398e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -182,6 +182,7 @@ func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs return nil } +<<<<<<< HEAD // resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's // SharedConfig or EnvConfig func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error { @@ -212,6 +213,8 @@ func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, con return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index 00b071fe6f..4ee4a4fec3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -118,11 +118,14 @@ const ( accountIDKey = "aws_account_id" accountIDEndpointMode = "account_id_endpoint_mode" +<<<<<<< HEAD requestChecksumCalculationKey = "request_checksum_calculation" responseChecksumValidationKey = "response_checksum_validation" checksumWhenSupported = "when_supported" checksumWhenRequired = "when_required" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -351,12 +354,15 @@ type SharedConfig struct { S3DisableExpressAuth *bool AccountIDEndpointMode aws.AccountIDEndpointMode +<<<<<<< HEAD // RequestChecksumCalculation indicates if the request checksum should be calculated RequestChecksumCalculation aws.RequestChecksumCalculation // ResponseChecksumValidation indicates if the response checksum should be validated ResponseChecksumValidation aws.ResponseChecksumValidation +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -1144,6 +1150,7 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) } +<<<<<<< HEAD if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil { return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err) } @@ -1151,6 +1158,8 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), @@ -1225,6 +1234,7 @@ func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key st return nil } +<<<<<<< HEAD func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error { if !sec.Has(key) { return nil @@ -1261,6 +1271,8 @@ func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { if c.RequestMinCompressSizeBytes == nil { return 0, false, nil @@ -1279,6 +1291,7 @@ func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.Account return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil } +<<<<<<< HEAD func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil } @@ -1287,6 +1300,8 @@ func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.Re return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 8a3ed35bed..517cacec7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.17.55 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -45,6 +46,8 @@ * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.17.44 (2024-11-07) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index fd852ab0c3..bb44597712 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,8 @@ package credentials // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.17.55" +======= +const goModuleVersion = "1.17.44" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 3154dfa30f..fdb7ff883c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.16.25 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -24,6 +25,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.16.19 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index cb091ba334..5e91ba28ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,8 @@ package imds // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.16.25" +======= +const goModuleVersion = "1.16.19" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 9818ebc724..9489ff2a5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.3.29 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -24,6 +25,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.3.23 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 3976533036..313517304c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,8 @@ package configsources // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.3.29" +======= +const goModuleVersion = "1.3.23" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 43f6449be3..71a945af89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -47,9 +47,12 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, +<<<<<<< HEAD "ap-southeast-7" : { "description" : "Asia Pacific (Thailand)" }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "aws-global" : { "description" : "AWS Standard global region" }, @@ -92,9 +95,12 @@ "me-south-1" : { "description" : "Middle East (Bahrain)" }, +<<<<<<< HEAD "mx-central-1" : { "description" : "Mexico (Central)" }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sa-east-1" : { "description" : "South America (Sao Paulo)" }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index d30a3176b5..92195c00bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v2.6.29 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -25,6 +26,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v2.6.23 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 994cb44cf8..d94e914833 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,8 @@ package endpoints // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "2.6.29" +======= +const goModuleVersion = "2.6.23" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 1d23b9be22..465a27c6ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,7 +1,10 @@ +<<<<<<< HEAD # v1.8.2 (2025-01-24) * **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.8.1 (2024-08-15) * **Dependency Update**: Bump minimum Go version to 1.21. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 355ed39e11..cb5dc76a60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,8 @@ package ini // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.8.2" +======= +const goModuleVersion = "1.8.1" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index ef78753a92..432c4d0a13 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.12.2 (2025-01-24) * **Dependency Update**: Upgrade to smithy-go v1.22.2. @@ -6,6 +7,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.12.0 (2024-10-04) * **Feature**: Add support for HTTP client metrics. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index cbf79b401d..d7ed9406be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,8 @@ package acceptencoding // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.12.2" +======= +const goModuleVersion = "1.12.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 715587f519..077ab4ecde 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.12.10 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -24,6 +25,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.12.4 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 8d8a4c9faa..0c99c6f414 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,8 @@ package presignedurl // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.12.10" +======= +const goModuleVersion = "1.12.4" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md index bc175840da..4685f65fb0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.37.14 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -36,6 +37,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.37.5 (2024-11-07) * **Bug Fix**: Adds case-insensitive handling of error message fields in service responses diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go index 320afdf250..ce77e2ac3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -685,7 +685,11 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") }) +<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { +======= + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go index a57e7a29f8..88d2cb9d1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -3,4 +3,8 @@ package kms // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.37.14" +======= +const goModuleVersion = "1.37.5" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go index 706d1dd418..461f371f0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -365,6 +365,7 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ +<<<<<<< HEAD Region: "ap-southeast-7", }: endpoints.Endpoint{}, endpoints.EndpointKey{ @@ -383,6 +384,8 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "ca-central-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ @@ -617,6 +620,7 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ +<<<<<<< HEAD Region: "mx-central-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ @@ -635,6 +639,8 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "sa-east-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index d8f0d5d1ef..bd100f7037 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.24.12 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -28,6 +29,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.24.5 (2024-11-07) * **Bug Fix**: Adds case-insensitive handling of error message fields in service responses diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index 0b244f142c..cf9812ea3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -684,7 +684,11 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") }) +<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { +======= + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 3bdb12089c..3855509558 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,8 @@ package sso // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.24.12" +======= +const goModuleVersion = "1.24.5" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 2e65069572..6d591c068c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.28.11 (2025-01-24) * **Documentation**: Fixed typos in the descriptions. @@ -29,6 +30,8 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.28.4 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 9b7f4acc84..3c54315d42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -684,7 +684,11 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") }) +<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { +======= + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 2ab3524479..4fc50898fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -12,7 +12,11 @@ import ( // Creates and returns access and refresh tokens for clients that are // authenticated using client secrets. The access token can be used to fetch +<<<<<<< HEAD // short-lived credentials for the assigned AWS accounts or to access application +======= +// short-term credentials for the assigned AWS accounts or to access application +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // APIs using bearer authentication. func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { if params == nil { @@ -43,21 +47,37 @@ type CreateTokenInput struct { // This member is required. ClientSecret *string +<<<<<<< HEAD // Supports the following OAuth grant types: Authorization Code, Device Code, and // Refresh Token. Specify one of the following values, depending on the grant type // that you want: // // * Authorization Code - authorization_code +======= + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that you + // want: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // * Device Code - urn:ietf:params:oauth:grant-type:device_code // // * Refresh Token - refresh_token // +<<<<<<< HEAD +======= + // For information about how to obtain the device code, see the StartDeviceAuthorization topic. + // +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. GrantType *string // Used only when calling this API for the Authorization Code grant type. The +<<<<<<< HEAD // short-lived code is used to identify this authorization request. +======= + // short-term code is used to identify this authorization request. This grant type + // is currently unsupported for the CreateTokenAPI. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Code *string // Used only when calling this API for the Authorization Code grant type. This @@ -65,9 +85,15 @@ type CreateTokenInput struct { // challenge value the client passed at authorization time. CodeVerifier *string +<<<<<<< HEAD // Used only when calling this API for the Device Code grant type. This // short-lived code is used to identify this authorization request. This comes from // the result of the StartDeviceAuthorizationAPI. +======= + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the result + // of the StartDeviceAuthorizationAPI. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DeviceCode *string // Used only when calling this API for the Authorization Code grant type. This @@ -76,7 +102,11 @@ type CreateTokenInput struct { RedirectUri *string // Used only when calling this API for the Refresh Token grant type. This token is +<<<<<<< HEAD // used to refresh short-lived tokens, such as the access token, that might expire. +======= + // used to refresh short-term tokens, such as the access token, that might expire. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index e5253ce884..ff02cdab36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -12,7 +12,11 @@ import ( // Creates and returns access and refresh tokens for clients and applications that // are authenticated using IAM entities. The access token can be used to fetch +<<<<<<< HEAD // short-lived credentials for the assigned Amazon Web Services accounts or to +======= +// short-term credentials for the assigned Amazon Web Services accounts or to +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // access application APIs using bearer authentication. func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { if params == nil { @@ -59,7 +63,11 @@ type CreateTokenWithIAMInput struct { Assertion *string // Used only when calling this API for the Authorization Code grant type. This +<<<<<<< HEAD // short-lived code is used to identify this authorization request. The code is +======= + // short-term code is used to identify this authorization request. The code is +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // obtained through a redirect from IAM Identity Center to a redirect URI persisted // in the Authorization Code GrantOptions for the application. Code *string @@ -75,7 +83,11 @@ type CreateTokenWithIAMInput struct { RedirectUri *string // Used only when calling this API for the Refresh Token grant type. This token is +<<<<<<< HEAD // used to refresh short-lived tokens, such as the access token, that might expire. +======= + // used to refresh short-term tokens, such as the access token, that might expire. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 2022270db2..604ccfce8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -10,9 +10,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) +<<<<<<< HEAD // Registers a public client with IAM Identity Center. This allows clients to // perform authorization using the authorization code grant with Proof Key for Code // Exchange (PKCE) or the device code grant. +======= +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { if params == nil { params = &RegisterClientInput{} @@ -48,6 +54,7 @@ type RegisterClientInput struct { EntitledApplicationArn *string // The list of OAuth 2.0 grant types that are defined by the client. This list is +<<<<<<< HEAD // used to restrict the token granting flows available to the client. Supports the // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh // Token. @@ -57,6 +64,9 @@ type RegisterClientInput struct { // * Device Code - urn:ietf:params:oauth:grant-type:device_code // // * Refresh Token - refresh_token +======= + // used to restrict the token granting flows available to the client. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GrantTypes []string // The IAM Identity Center Issuer URL associated with an instance of IAM Identity diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index f3510b18c5..4d0f8b31ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -8,12 +8,18 @@ // Center. The service also enables the client to fetch the user’s access token // upon successful authentication and authorization with IAM Identity Center. // +<<<<<<< HEAD // # API namespaces // // IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity // Center OpenID Connect uses the sso-oidc namespace. // // # Considerations for using this guide +======= +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Before you begin using this guide, we recommend that you first review the // following important information about how the IAM Identity Center OIDC service diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 9f78e8f741..d85a80c1aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,8 @@ package ssooidc // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.28.11" +======= +const goModuleVersion = "1.28.4" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 4a95edf92f..0acf9392ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # v1.33.10 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -44,6 +45,8 @@ * **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.32.4 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 25787325f2..1d957a1193 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -688,7 +688,11 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") }) +<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { +======= + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index d056327746..f47b34cfe8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -16,7 +16,11 @@ import ( // Amazon Web Services resources. These temporary credentials consist of an access // key ID, a secret access key, and a security token. Typically, you use AssumeRole // within your account or for cross-account access. For a comparison of AssumeRole +<<<<<<< HEAD // with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the +======= +// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IAM User Guide. // // # Permissions @@ -26,6 +30,7 @@ import ( // cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken // API operations. // +<<<<<<< HEAD // (Optional) You can pass inline or managed session policies to this operation. // You can pass a single JSON policy document to use as an inline session policy. // You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use @@ -36,6 +41,18 @@ import ( // policies. You can use the role's temporary credentials in subsequent Amazon Web // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the +======= +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // identity-based policy of the role that is being assumed. For more information, // see [Session Policies]in the IAM User Guide. // @@ -104,9 +121,16 @@ import ( // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +<<<<<<< HEAD // [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +======= +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { @@ -140,18 +164,24 @@ type AssumeRoleInput struct { // the temporary security credentials will expose the role session name to the // external account in their CloudTrail logs. // +<<<<<<< HEAD // For security purposes, administrators can view this field in [CloudTrail logs] to help identify // who performed an action in Amazon Web Services. Your administrator might require // that you specify your user name as the session name when you assume the role. // For more information, see [sts:RoleSessionName]sts:RoleSessionName . // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // +<<<<<<< HEAD // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. RoleSessionName *string @@ -170,7 +200,11 @@ type AssumeRoleInput struct { // 43200 seconds (12 hours), depending on the maximum session duration setting for // your role. However, if you assume a role using role chaining and provide a // DurationSeconds parameter value greater than one hour, the operation fails. To +<<<<<<< HEAD // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]. +======= + // learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // By default, the value is set to 3600 seconds. // @@ -180,7 +214,11 @@ type AssumeRoleInput struct { // parameter that specifies the maximum length of the console session. For more // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. // +<<<<<<< HEAD // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration +======= + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 @@ -225,10 +263,14 @@ type AssumeRoleInput struct { // by percentage how close the policies and tags for your request are to the upper // size limit. // +<<<<<<< HEAD // For more information about role session permissions, see [Session policies]. // // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +======= + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to @@ -283,10 +325,17 @@ type AssumeRoleInput struct { SerialNumber *string // The source identity specified by the principal that is calling the AssumeRole +<<<<<<< HEAD // operation. The source identity value persists across [chained role]sessions. // // You can require users to specify a source identity when they assume a role. You // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy. +======= + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // You can use source identity information in CloudTrail logs to determine who took // actions with a role. You can use the aws:SourceIdentity condition key to // further control access to Amazon Web Services resources based on the value of @@ -295,6 +344,7 @@ type AssumeRoleInput struct { // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can also +<<<<<<< HEAD // include underscores or any of the following characters: +=,.@-. You cannot use a // value that begins with the text aws: . This prefix is reserved for Amazon Web // Services internal use. @@ -302,6 +352,13 @@ type AssumeRoleInput struct { // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity +======= + // include underscores or any of the following characters: =,.@-. You cannot use a + // value that begins with the text aws: . This prefix is reserved for Amazon Web + // Services internal use. + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SourceIdentity *string // A list of session tags that you want to pass. Each session tag consists of a @@ -354,8 +411,13 @@ type AssumeRoleInput struct { // a tag key as transitive, the corresponding key and value passes to subsequent // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. // +<<<<<<< HEAD // This parameter is optional. The transitive status of a session tag does not // impact its packed binary size. +======= + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // If you choose not to specify a transitive tag key, then no tags are passed from // this session to any subsequent sessions. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index d0e117ac92..42c71f5c24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -16,7 +16,11 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that +<<<<<<< HEAD // produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +======= +// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // The temporary security credentials returned by this operation consist of an // access key ID, a secret access key, and a security token. Applications can use @@ -130,10 +134,17 @@ import ( // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session // [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +<<<<<<< HEAD // [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html // [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +======= +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html // [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html // [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining @@ -219,8 +230,11 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // +<<<<<<< HEAD // For more information about role session permissions, see [Session policies]. // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext @@ -229,7 +243,10 @@ type AssumeRoleWithSAMLInput struct { // size limit. // // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +<<<<<<< HEAD // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to @@ -307,8 +324,12 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 +<<<<<<< HEAD // The value in the SourceIdentity attribute in the SAML assertion. The source // identity value persists across [chained role]sessions. +======= + // The value in the SourceIdentity attribute in the SAML assertion. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // You can require users to set a source identity value when they assume a role. // You do this by using the sts:SourceIdentity condition key in a role trust @@ -325,7 +346,11 @@ type AssumeRoleWithSAMLOutput struct { // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // +<<<<<<< HEAD // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts +======= + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 0ae4bc173e..1099b87ce3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -31,7 +31,11 @@ import ( // Services credentials. Instead, the identity of the caller is validated by using // a token from the web identity provider. For a comparison of // AssumeRoleWithWebIdentity with the other API operations that produce temporary +<<<<<<< HEAD // credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +======= +// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these @@ -45,7 +49,11 @@ import ( // DurationSeconds parameter to specify the duration of your session. You can // provide a value from 900 seconds (15 minutes) up to the maximum session duration // setting for the role. This setting can have a value from 1 hour to 12 hours. To +<<<<<<< HEAD // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide. +======= +// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The maximum session duration limit applies when you use the AssumeRole* API // operations or the assume-role* CLI commands. However the limit does not apply // when you use those operations to create a console URL. For more information, see @@ -111,23 +119,50 @@ import ( // that you avoid using any personally identifiable information (PII) in this // field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. // +<<<<<<< HEAD // For more information about how to use OIDC federation and the +======= +// For more information about how to use web identity federation and the +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AssumeRoleWithWebIdentity API, see the following resources: // // [Using Web Identity Federation API Operations for Mobile Apps] // - and [Federation Through a Web-based Identity Provider]. // +<<<<<<< HEAD +======= +// [Web Identity Federation Playground] +// - . Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then using +// those credentials to make a request to Amazon Web Services. +// +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Amazon Web Services SDK for iOS Developer Guide] // - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the // identity providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. // +<<<<<<< HEAD // [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ // [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +======= +// [Web Identity Federation with Mobile Applications] +// - . This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon S3. +// +// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/ +// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html // [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html @@ -137,7 +172,11 @@ import ( // [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +<<<<<<< HEAD // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration +======= +// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html // [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { @@ -159,6 +198,7 @@ type AssumeRoleWithWebIdentityInput struct { // The Amazon Resource Name (ARN) of the role that the caller is assuming. // +<<<<<<< HEAD // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles]. // The trust policies of these roles must accept the cognito-identity.amazonaws.com // service principal and must contain the cognito-identity.amazonaws.com:aud @@ -170,6 +210,8 @@ type AssumeRoleWithWebIdentityInput struct { // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. RoleArn *string @@ -179,28 +221,39 @@ type AssumeRoleWithWebIdentityInput struct { // associated with that user. This session name is included as part of the ARN and // assumed role ID in the AssumedRoleUser response element. // +<<<<<<< HEAD // For security purposes, administrators can view this field in [CloudTrail logs] to help identify // who performed an action in Amazon Web Services. Your administrator might require // that you specify your user name as the session name when you assume the role. // For more information, see [sts:RoleSessionName]sts:RoleSessionName . // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // +<<<<<<< HEAD // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. RoleSessionName *string // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the // identity provider. Your application must get this token by authenticating the // user who is using your application with a web identity provider before the +<<<<<<< HEAD // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token // must be formatted as either an integer or a long integer. Tokens must be signed // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or // ES512). +======= + // application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA + // algorithms (RS256) are supported. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // This member is required. WebIdentityToken *string @@ -242,8 +295,11 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // +<<<<<<< HEAD // For more information about role session permissions, see [Session policies]. // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext @@ -252,7 +308,10 @@ type AssumeRoleWithWebIdentityInput struct { // size limit. // // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +<<<<<<< HEAD // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to @@ -350,7 +409,11 @@ type AssumeRoleWithWebIdentityOutput struct { // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // +<<<<<<< HEAD // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts +======= + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html SourceIdentity *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index e2ecc792ac..6380eaaae2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -20,7 +20,11 @@ import ( // credentials of an IAM user. As a result, this call is appropriate in contexts // where those credentials can be safeguarded, usually in a server-based // application. For a comparison of GetFederationToken with the other API +<<<<<<< HEAD // operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +======= +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Although it is possible to call GetFederationToken using the security // credentials of an Amazon Web Services account root user rather than an IAM user @@ -104,9 +108,15 @@ import ( // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html // [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken +<<<<<<< HEAD // [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +======= +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index fdc451117b..f0096ab3d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -22,7 +22,11 @@ import ( // the call returns, IAM users can then make programmatic calls to API operations // that require MFA authentication. An incorrect MFA code causes the API to return // an access denied error. For a comparison of GetSessionToken with the other API +<<<<<<< HEAD // operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +======= +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // No permissions are required for users to perform this operation. The purpose of // the sts:GetSessionToken operation is to authenticate the user using MFA. You @@ -63,10 +67,17 @@ import ( // credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. // // [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html +<<<<<<< HEAD // [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken // [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +======= +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { params = &GetSessionTokenInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 59349890f6..8ccfba7ba3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -410,6 +410,7 @@ func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhtt } } +<<<<<<< HEAD type awsAwsquery_deserializeOpAssumeRoot struct { } @@ -525,6 +526,8 @@ func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, met } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { } @@ -2383,6 +2386,7 @@ func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **Assume return nil } +<<<<<<< HEAD func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -2438,6 +2442,8 @@ func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, dec return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index 70a88452ee..dcf8d7d773 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -13,7 +13,10 @@ "api_op_AssumeRole.go", "api_op_AssumeRoleWithSAML.go", "api_op_AssumeRoleWithWebIdentity.go", +<<<<<<< HEAD "api_op_AssumeRoot.go", +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "api_op_DecodeAuthorizationMessage.go", "api_op_GetAccessKeyInfo.go", "api_op_GetCallerIdentity.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 6db8f82fb5..6bf48e4be2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,8 @@ package sts // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.33.10" +======= +const goModuleVersion = "1.32.4" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index 4e07994d04..bb68d8fb95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -176,9 +176,12 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-5", }: endpoints.Endpoint{}, endpoints.EndpointKey{ +<<<<<<< HEAD Region: "ap-southeast-7", }: endpoints.Endpoint{}, endpoints.EndpointKey{ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "aws-global", }: endpoints.Endpoint{ Hostname: "sts.amazonaws.com", @@ -226,9 +229,12 @@ var defaultPartitions = endpoints.Partitions{ Region: "me-south-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ +<<<<<<< HEAD Region: "mx-central-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "sa-east-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go index 96b222136b..94e32462a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -226,6 +226,7 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } +<<<<<<< HEAD type awsAwsquery_serializeOpAssumeRoot struct { } @@ -296,6 +297,8 @@ func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, return next.HandleSerialize(ctx, in) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { } @@ -891,6 +894,7 @@ func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRole return nil } +<<<<<<< HEAD func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error { object := value.Object() _ = object @@ -915,6 +919,8 @@ func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value qu return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { object := value.Object() _ = object diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 041629bba2..76b5392d36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -95,8 +95,13 @@ func (e *IDPRejectedClaimException) ErrorCode() string { func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The error returned if the message passed to DecodeAuthorizationMessage was +<<<<<<< HEAD // invalid. This can happen if the token contains invalid characters, such as line // breaks, or if the message has expired. +======= +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type InvalidAuthorizationMessageException struct { Message *string @@ -218,10 +223,17 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM +<<<<<<< HEAD // console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM // User Guide. // // [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html +======= +// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM +// User Guide. +// +// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type RegionDisabledException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go index 1026e22118..723d773c9d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -70,6 +70,7 @@ func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Conte return next.HandleInitialize(ctx, in) } +<<<<<<< HEAD type validateOpAssumeRoot struct { } @@ -90,6 +91,8 @@ func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middlewa return next.HandleInitialize(ctx, in) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type validateOpDecodeAuthorizationMessage struct { } @@ -162,10 +165,13 @@ func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) } +<<<<<<< HEAD func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) } @@ -278,6 +284,7 @@ func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) } } +<<<<<<< HEAD func validateOpAssumeRootInput(v *AssumeRootInput) error { if v == nil { return nil @@ -296,6 +303,8 @@ func validateOpAssumeRootInput(v *AssumeRootInput) error { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 2945185b0b..4da18aa2c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -349,7 +349,11 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s if cfg.hasSSOTokenProviderConfiguration() { skippedFiles = 0 for _, f := range files { +<<<<<<< HEAD section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)) +======= + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ok { var ssoSession ssoSession ssoSession.setFromIniSection(section) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 7ab65bae79..4761481d6b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,8 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK +<<<<<<< HEAD const SDKVersion = "1.55.6" +======= +const SDKVersion = "1.55.5" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go index 0c54d99494..8a54b52ebd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go @@ -3,7 +3,10 @@ package dynamodbattribute import ( "bytes" "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "reflect" "runtime" @@ -26,7 +29,11 @@ func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { +<<<<<<< HEAD err = errors.New(s) +======= + err = fmt.Errorf(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -74,7 +81,11 @@ func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (er if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { +<<<<<<< HEAD err = errors.New(s) +======= + err = fmt.Errorf(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -128,7 +139,11 @@ func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { +<<<<<<< HEAD err = errors.New(s) +======= + err = fmt.Errorf(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -177,7 +192,11 @@ func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { +<<<<<<< HEAD err = errors.New(s) +======= + err = fmt.Errorf(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -225,7 +244,11 @@ func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) { if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { +<<<<<<< HEAD err = errors.New(s) +======= + err = fmt.Errorf(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -255,7 +278,11 @@ func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) { if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { +<<<<<<< HEAD err = errors.New(s) +======= + err = fmt.Errorf(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index de39171cf0..da44f788cf 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD # Release (2025-01-21) ## General Highlights @@ -17,6 +18,8 @@ * `github.com/aws/smithy-go`: v1.22.1 * **Bug Fix**: Fix failure to replace URI path segments when their names overlap. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Release (2024-10-03) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md index 1f8d01ff6a..106fea678a 100644 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -39,6 +39,7 @@ To send us a pull request, please: GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). +<<<<<<< HEAD ### Changelog Documents (You can SKIP this step if you are only changing the code generator, and not the runtime). @@ -70,6 +71,8 @@ These are used to generate the content `CHANGELOG.md` and Release Notes. The for * description: Description of this change. Most of the times is the same as the title of the PR * modules: which Go modules does this change impact. The root module is expressed as "." +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index a3c2cf173d..ef77695f40 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -98,6 +98,7 @@ module-version: ############## .PHONY: install-changelog +<<<<<<< HEAD external-changelog: mkdir -p .changelog cp changelog-template.json .changelog/00000000-0000-0000-0000-000000000000.json @@ -105,5 +106,7 @@ external-changelog: @echo "Make sure to rename the file with your new id, like .changelog/12345678-1234-1234-1234-123456789012.json" @echo "See CONTRIBUTING.md 'Changelog Documents' and an example at https://github.com/aws/smithy-go/pull/543/files" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) install-changelog: go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go index 9ae308540c..7b4e035f15 100644 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -22,6 +22,7 @@ func bufCap(b []byte, n int) []byte { // replacePathElement replaces a single element in the path []byte. // Escape is used to control whether the value will be escaped using Amazon path escape style. func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { +<<<<<<< HEAD // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error fieldBuf = bufCap(fieldBuf, len(key)+2) // { } fieldBuf = append(fieldBuf, uriTokenStart) @@ -44,11 +45,38 @@ func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([] encodeSep = false } end := start + len(fieldBuf) +======= + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + + start := bytes.Index(path, fieldBuf) + end := start + len(fieldBuf) + if start < 0 || len(path[end:]) == 0 { + // TODO what to do about error? + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) + } + + encodeSep := true + if path[end] == uriTokenSkip { + // '+' token means do not escape slashes + encodeSep = false + end++ + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if escape { val = EscapePath(val, encodeSep) } +<<<<<<< HEAD +======= + if path[end] != uriTokenStop { + return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) + } + end++ + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fieldBuf = bufCap(fieldBuf, len(val)) fieldBuf = append(fieldBuf, val...) diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index a51ceca4ce..a5cf9948b1 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,8 @@ package smithy // goModuleVersion is the tagged release for this module +<<<<<<< HEAD const goModuleVersion = "1.22.2" +======= +const goModuleVersion = "1.22.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go index db9801bea5..8e7794c6cd 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/host.go +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -69,7 +69,11 @@ func ValidPortNumber(port string) bool { return true } +<<<<<<< HEAD // ValidHostLabel returns whether the label is a valid RFC 3986 host label. +======= +// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go index d1beaa595d..f5757282cc 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/metrics.go +++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go @@ -5,7 +5,10 @@ import ( "crypto/tls" "net/http" "net/http/httptrace" +<<<<<<< HEAD "sync/atomic" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" "github.com/aws/smithy-go/metrics" @@ -43,10 +46,17 @@ type timedClientDo struct { } func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) { +<<<<<<< HEAD c.hm.doStart.Store(now()) resp, err := c.ClientDo.Do(r) c.hm.DoRequestDuration.Record(r.Context(), c.hm.doStart.Elapsed()) +======= + c.hm.doStart = now() + resp, err := c.ClientDo.Do(r) + + c.hm.DoRequestDuration.Record(r.Context(), elapsed(c.hm.doStart)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return resp, err } @@ -59,10 +69,17 @@ type httpMetrics struct { DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte +<<<<<<< HEAD doStart safeTime dnsStart safeTime connectStart safeTime tlsStart safeTime +======= + doStart time.Time + dnsStart time.Time + connectStart time.Time + tlsStart time.Time +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { @@ -116,6 +133,7 @@ func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { } func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) { +<<<<<<< HEAD m.dnsStart.Store(now()) } @@ -125,6 +143,17 @@ func (m *httpMetrics) ConnectStart(string, string) { func (m *httpMetrics) TLSHandshakeStart() { m.tlsStart.Store(now()) +======= + m.dnsStart = now() +} + +func (m *httpMetrics) ConnectStart(string, string) { + m.connectStart = now() +} + +func (m *httpMetrics) TLSHandshakeStart() { + m.tlsStart = now() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) { @@ -141,25 +170,41 @@ func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) { func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) { return func(httptrace.DNSDoneInfo) { +<<<<<<< HEAD m.DNSLookupDuration.Record(ctx, m.dnsStart.Elapsed()) +======= + m.DNSLookupDuration.Record(ctx, elapsed(m.dnsStart)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) { return func(string, string, error) { +<<<<<<< HEAD m.ConnectDuration.Record(ctx, m.connectStart.Elapsed()) +======= + m.ConnectDuration.Record(ctx, elapsed(m.connectStart)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) { return func(tls.ConnectionState, error) { +<<<<<<< HEAD m.TLSHandshakeDuration.Record(ctx, m.tlsStart.Elapsed()) +======= + m.TLSHandshakeDuration.Record(ctx, elapsed(m.tlsStart)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() { return func() { +<<<<<<< HEAD m.TimeToFirstByte.Record(ctx, m.doStart.Elapsed()) +======= + m.TimeToFirstByte.Record(ctx, elapsed(m.doStart)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -178,6 +223,7 @@ func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) { }) } +<<<<<<< HEAD type safeTime struct { atomic.Value // time.Time } @@ -194,5 +240,10 @@ func (st *safeTime) Load() time.Time { func (st *safeTime) Elapsed() float64 { end := now() elapsed := end.Sub(st.Load()) +======= +func elapsed(start time.Time) float64 { + end := now() + elapsed := end.Sub(start) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return float64(elapsed) / 1e9 } diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go index 914338f2e7..ab03f84c5d 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -2,10 +2,17 @@ package http import ( "context" +<<<<<<< HEAD "io" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" +======= + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + "io" + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // AddErrorCloseResponseBodyMiddleware adds the middleware to automatically @@ -30,7 +37,11 @@ func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( if err != nil { if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { // Consume the full body to prevent TCP connection resets on some platforms +<<<<<<< HEAD _, _ = io.Copy(io.Discard, resp.Body) +======= + _, _ = io.Copy(ioutil.Discard, resp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Do not validate that the response closes successfully. resp.Body.Close() } @@ -64,7 +75,11 @@ func (m *closeResponseBody) HandleDeserialize( if resp, ok := out.RawResponse.(*Response); ok { // Consume the full body to prevent TCP connection resets on some platforms +<<<<<<< HEAD _, copyErr := io.Copy(io.Discard, resp.Body) +======= + _, copyErr := io.Copy(ioutil.Discard, resp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if copyErr != nil { middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") } diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go index 5cbf6f10ac..6f8fff497f 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -4,6 +4,10 @@ import ( "context" "fmt" "io" +<<<<<<< HEAD +======= + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strings" @@ -166,7 +170,11 @@ func (r *Request) Build(ctx context.Context) *http.Request { switch stream := r.stream.(type) { case *io.PipeReader: +<<<<<<< HEAD req.Body = io.NopCloser(stream) +======= + req.Body = ioutil.NopCloser(stream) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req.ContentLength = -1 default: // HTTP Client Request must only have a non-nil body if the @@ -174,7 +182,11 @@ func (r *Request) Build(ctx context.Context) *http.Request { // Client will interpret a non-nil body and ContentLength 0 as // "unknown". This is unwanted behavior. if req.ContentLength != 0 && r.stream != nil { +<<<<<<< HEAD req.Body = iointernal.NewSafeReadCloser(io.NopCloser(stream)) +======= + req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml index bc79b83961..a5bb92c6b0 100644 --- a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml +++ b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml @@ -39,17 +39,27 @@ linters: enable-all: true disable: - cyclop +<<<<<<< HEAD - depguard - dupl - dupword - exhaustruct - exportloopref +======= + - deadcode + - depguard + - dupl + - dupword + - exhaustivestruct + - exhaustruct +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - forbidigo - funlen - gci - gocognit - gocyclo - godox +<<<<<<< HEAD - mnd - lll - maintidx @@ -61,6 +71,26 @@ linters: - rowserrcheck - testpackage - tparallel +======= + - golint + - gomnd + - ifshort + - interfacer + - lll + - maintidx + - maligned + - nakedret + - nestif + - nlreturn + - nosnakecase + - paralleltest + - prealloc + - rowserrcheck + - scopelint + - structcheck + - testpackage + - varcheck +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - varnamelen - wastedassign @@ -68,4 +98,8 @@ issues: exclude-use-default: true max-issues-per-linter: 0 max-same-issues: 0 +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # vim: set sw=2 ts=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/vendor/github.com/bombsimon/wsl/v4/analyzer.go index e51df89c6c..3048bf0db5 100644 --- a/vendor/github.com/bombsimon/wsl/v4/analyzer.go +++ b/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -3,7 +3,10 @@ package wsl import ( "flag" "go/ast" +<<<<<<< HEAD "go/token" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "golang.org/x/tools/go/analysis" @@ -79,6 +82,7 @@ func (wa *wslAnalyzer) flags() flag.FlagSet { func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { for _, file := range pass.Files { +<<<<<<< HEAD filename := getFilename(pass.Fset, file) if !strings.HasSuffix(filename, ".go") { continue @@ -90,6 +94,14 @@ func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { // The file is skipped if the "unadjusted" file is a Go file, and it's a generated file (ex: "_test.go" file). // The other non-Go files are skipped by the first 'if' with the adjusted position. if !wa.config.IncludeGenerated && ast.IsGenerated(file) && strings.HasSuffix(fn, ".go") { +======= + if !wa.config.IncludeGenerated && ast.IsGenerated(file) { + continue + } + + filename := pass.Fset.PositionFor(file.Pos(), false).Filename + if !strings.HasSuffix(filename, ".go") { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -133,7 +145,11 @@ type multiStringValue struct { // Set implements the flag.Value interface and will overwrite the pointer to the // slice with a new pointer after splitting the flag by comma. func (m *multiStringValue) Set(value string) error { +<<<<<<< HEAD var s []string +======= + s := []string{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, v := range strings.Split(value, ",") { s = append(s, strings.TrimSpace(v)) @@ -152,6 +168,7 @@ func (m *multiStringValue) String() string { return strings.Join(*m.slicePtr, ", ") } +<<<<<<< HEAD func getFilename(fset *token.FileSet, file *ast.File) string { filename := fset.PositionFor(file.Pos(), true).Filename @@ -161,3 +178,5 @@ func getFilename(fset *token.FileSet, file *ast.File) string { return filename } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/bombsimon/wsl/v4/wsl.go b/vendor/github.com/bombsimon/wsl/v4/wsl.go index 44c7abe219..8ddd9f771d 100644 --- a/vendor/github.com/bombsimon/wsl/v4/wsl.go +++ b/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -353,7 +353,11 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { return false } +<<<<<<< HEAD for j := range n { +======= + for j := 0; j < n; j++ { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s1 := statements[i+j] s2 := statements[i+j+1] @@ -1113,8 +1117,13 @@ func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne return } +<<<<<<< HEAD blockStartLine = p.fileSet.Position(blockStartPos).Line blockEndLine = p.fileSet.Position(blockEndPos).Line +======= + blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line + blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // No whitespace possible if LBrace and RBrace is on the same line. if blockStartLine == blockEndLine { @@ -1362,6 +1371,7 @@ func isExampleFunc(ident *ast.Ident) bool { } func (p *processor) nodeStart(node ast.Node) int { +<<<<<<< HEAD return p.fileSet.Position(node.Pos()).Line } @@ -1370,6 +1380,16 @@ func (p *processor) nodeEnd(node ast.Node) int { if isEmptyLabeledStmt(node) { return p.fileSet.Position(node.Pos()).Line +======= + return p.fileSet.PositionFor(node.Pos(), false).Line +} + +func (p *processor) nodeEnd(node ast.Node) int { + line := p.fileSet.PositionFor(node.End(), false).Line + + if isEmptyLabeledStmt(node) { + return p.fileSet.PositionFor(node.Pos(), false).Line +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return line @@ -1408,7 +1428,11 @@ func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string) } func (p *processor) addWarning(w string, pos token.Pos, t interface{}) { +<<<<<<< HEAD position := p.fileSet.Position(pos) +======= + position := p.fileSet.PositionFor(pos, false) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.warnings = append(p.warnings, fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), diff --git a/vendor/github.com/buildkite/agent/v3/api/artifacts.go b/vendor/github.com/buildkite/agent/v3/api/artifacts.go index 351345de9d..3d61432f9f 100644 --- a/vendor/github.com/buildkite/agent/v3/api/artifacts.go +++ b/vendor/github.com/buildkite/agent/v3/api/artifacts.go @@ -51,6 +51,7 @@ type Artifact struct { } type ArtifactBatch struct { +<<<<<<< HEAD ID string `json:"id"` Artifacts []*Artifact `json:"artifacts"` UploadDestination string `json:"upload_destination"` @@ -92,6 +93,27 @@ type ArtifactBatchCreateResponse struct { // uploads. It overrides InstructionTemplate and should not contain // interpolations. Map: artifact ID -> instructions for that artifact. PerArtifactInstructions map[string]*ArtifactUploadInstructions `json:"per_artifact_instructions"` +======= + ID string `json:"id"` + Artifacts []*Artifact `json:"artifacts"` + UploadDestination string `json:"upload_destination"` +} + +type ArtifactUploadInstructions struct { + Data map[string]string `json:"data"` + Action struct { + URL string `json:"url,omitempty"` + Method string `json:"method"` + Path string `json:"path"` + FileInput string `json:"file_input"` + } +} + +type ArtifactBatchCreateResponse struct { + ID string `json:"id"` + ArtifactIDs []string `json:"artifact_ids"` + UploadInstructions *ArtifactUploadInstructions `json:"upload_instructions"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ArtifactSearchOptions specifies the optional parameters to the @@ -104,6 +126,7 @@ type ArtifactSearchOptions struct { IncludeDuplicates bool `url:"include_duplicates,omitempty"` } +<<<<<<< HEAD // ArtifactState represents the state of a single artifact, when calling UpdateArtifacts. type ArtifactState struct { ID string `json:"id"` @@ -127,6 +150,20 @@ type ArtifactBatchUpdateRequest struct { // CreateArtifacts takes a slice of artifacts, and creates them on Buildkite as a batch. func (c *Client) CreateArtifacts(ctx context.Context, jobID string, batch *ArtifactBatch) (*ArtifactBatchCreateResponse, *Response, error) { u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobID)) +======= +type ArtifactBatchUpdateArtifact struct { + ID string `json:"id"` + State string `json:"state"` +} + +type ArtifactBatchUpdateRequest struct { + Artifacts []*ArtifactBatchUpdateArtifact `json:"artifacts"` +} + +// CreateArtifacts takes a slice of artifacts, and creates them on Buildkite as a batch. +func (c *Client) CreateArtifacts(ctx context.Context, jobId string, batch *ArtifactBatch) (*ArtifactBatchCreateResponse, *Response, error) { + u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobId)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := c.newRequest(ctx, "POST", u, batch) if err != nil { @@ -142,11 +179,21 @@ func (c *Client) CreateArtifacts(ctx context.Context, jobID string, batch *Artif return createResponse, resp, err } +<<<<<<< HEAD // UpdateArtifacts updates Buildkite with one or more artifact states. func (c *Client) UpdateArtifacts(ctx context.Context, jobID string, artifactStates []ArtifactState) (*Response, error) { u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobID)) payload := ArtifactBatchUpdateRequest{ Artifacts: artifactStates, +======= +// Updates a particular artifact +func (c *Client) UpdateArtifacts(ctx context.Context, jobId string, artifactStates map[string]string) (*Response, error) { + u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobId)) + payload := ArtifactBatchUpdateRequest{} + + for id, state := range artifactStates { + payload.Artifacts = append(payload.Artifacts, &ArtifactBatchUpdateArtifact{id, state}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } req, err := c.newRequest(ctx, "PUT", u, payload) @@ -154,12 +201,26 @@ func (c *Client) UpdateArtifacts(ctx context.Context, jobID string, artifactStat return nil, err } +<<<<<<< HEAD return c.doRequest(req, nil) } // SearchArtifacts searches Buildkite for a set of artifacts func (c *Client) SearchArtifacts(ctx context.Context, buildID string, opt *ArtifactSearchOptions) ([]*Artifact, *Response, error) { u := fmt.Sprintf("builds/%s/artifacts/search", railsPathEscape(buildID)) +======= + resp, err := c.doRequest(req, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// SearchArtifacts searches Buildkite for a set of artifacts +func (c *Client) SearchArtifacts(ctx context.Context, buildId string, opt *ArtifactSearchOptions) ([]*Artifact, *Response, error) { + u := fmt.Sprintf("builds/%s/artifacts/search", railsPathEscape(buildId)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) u, err := addOptions(u, opt) if err != nil { return nil, nil, err diff --git a/vendor/github.com/buildkite/agent/v3/api/auth.go b/vendor/github.com/buildkite/agent/v3/api/auth.go new file mode 100644 index 0000000000..1fb28da103 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/auth.go @@ -0,0 +1,37 @@ +package api + +import ( + "fmt" + "net/http" +) + +type canceler interface { + CancelRequest(*http.Request) +} + +// authenticatedTransport manages injection of the API token +type authenticatedTransport struct { + // The Token used for authentication. This can either the be + // organizations registration token, or the agents access token. + Token string + + // Delegate is the underlying HTTP transport + Delegate http.RoundTripper +} + +// RoundTrip invoked each time a request is made +func (t authenticatedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Token == "" { + return nil, fmt.Errorf("Invalid token, empty string supplied") + } + + req.Header.Set("Authorization", fmt.Sprintf("Token %s", t.Token)) + + return t.Delegate.RoundTrip(req) +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *authenticatedTransport) CancelRequest(req *http.Request) { + cancelableTransport := t.Delegate.(canceler) + cancelableTransport.CancelRequest(req) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/client.go b/vendor/github.com/buildkite/agent/v3/api/client.go index 9803437b2d..fd0a336949 100644 --- a/vendor/github.com/buildkite/agent/v3/api/client.go +++ b/vendor/github.com/buildkite/agent/v3/api/client.go @@ -11,13 +11,20 @@ import ( "fmt" "io" "net/http" +<<<<<<< HEAD +======= + "net/http/httputil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/url" "reflect" "strconv" "strings" "time" +<<<<<<< HEAD "github.com/buildkite/agent/v3/internal/agenthttp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/buildkite/agent/v3/logger" "github.com/google/go-querystring/query" ) @@ -45,9 +52,12 @@ type Config struct { // If true, requests and responses will be dumped and set to the logger DebugHTTP bool +<<<<<<< HEAD // If true timings for each request will be logged TraceHTTP bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The http client used, leave nil for the default HTTPClient *http.Client @@ -77,22 +87,55 @@ func NewClient(l logger.Logger, conf Config) *Client { conf.UserAgent = defaultUserAgent } +<<<<<<< HEAD if conf.HTTPClient != nil { return &Client{ logger: l, client: conf.HTTPClient, conf: conf, +======= + httpClient := conf.HTTPClient + if conf.HTTPClient == nil { + + // use the default transport as it is optimized and configured for http2 + // and will avoid accidents in the future + tr := http.DefaultTransport.(*http.Transport).Clone() + + if conf.DisableHTTP2 { + tr.ForceAttemptHTTP2 = false + tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) + // The default TLSClientConfig has h2 in NextProtos, so the negotiated TLS connection will assume h2 support. + // see https://github.com/golang/go/issues/50571 + tr.TLSClientConfig.NextProtos = []string{"http/1.1"} + } + + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + + httpClient = &http.Client{ + Timeout: 60 * time.Second, + Transport: &authenticatedTransport{ + Token: conf.Token, + Delegate: tr, + }, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return &Client{ logger: l, +<<<<<<< HEAD client: agenthttp.NewClient( agenthttp.WithAuthToken(conf.Token), agenthttp.WithAllowHTTP2(!conf.DisableHTTP2), agenthttp.WithTLSConfig(conf.TLSConfig), ), conf: conf, +======= + client: httpClient, + conf: conf, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -219,6 +262,7 @@ func newResponse(r *http.Response) *Response { // interface, the raw response body will be written to v, without attempting to // first decode it. func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { +<<<<<<< HEAD resp, err := agenthttp.Do(c.logger, c.client, req, agenthttp.WithDebugHTTP(c.conf.DebugHTTP), @@ -227,12 +271,64 @@ func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { if err != nil { return nil, err } +======= + var err error + + if c.conf.DebugHTTP { + // If the request is a multi-part form, then it's probably a + // file upload, in which case we don't want to spewing out the + // file contents into the debug log (especially if it's been + // gzipped) + var requestDump []byte + if strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") { + requestDump, err = httputil.DumpRequestOut(req, false) + } else { + requestDump, err = httputil.DumpRequestOut(req, true) + } + + if err != nil { + c.logger.Debug("ERR: %s\n%s", err, string(requestDump)) + } else { + c.logger.Debug("%s", string(requestDump)) + } + } + + ts := time.Now() + + c.logger.Debug("%s %s", req.Method, req.URL) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + c.logger.WithFields( + logger.StringField("proto", resp.Proto), + logger.IntField("status", resp.StatusCode), + logger.DurationField("Δ", time.Since(ts)), + ).Debug("↳ %s %s", req.Method, req.URL) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defer resp.Body.Close() defer io.Copy(io.Discard, resp.Body) response := newResponse(resp) +<<<<<<< HEAD if err := checkResponse(resp); err != nil { +======= + if c.conf.DebugHTTP { + responseDump, err := httputil.DumpResponse(resp, true) + if err != nil { + c.logger.Debug("\nERR: %s\n%s", err, string(responseDump)) + } else { + c.logger.Debug("\n%s", string(responseDump)) + } + } + + err = checkResponse(resp) + if err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // even though there was an error, we still return the response // in case the caller wants to inspect it further return response, err @@ -252,7 +348,11 @@ func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { } } +<<<<<<< HEAD return response, nil +======= + return response, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ErrorResponse provides a message. diff --git a/vendor/github.com/buildkite/agent/v3/api/oidc.go b/vendor/github.com/buildkite/agent/v3/api/oidc.go index b28378894e..8a54f3947c 100644 --- a/vendor/github.com/buildkite/agent/v3/api/oidc.go +++ b/vendor/github.com/buildkite/agent/v3/api/oidc.go @@ -10,15 +10,23 @@ type OIDCToken struct { } type OIDCTokenRequest struct { +<<<<<<< HEAD Job string Audience string Lifetime int Claims []string AWSSessionTags []string +======= + Job string + Audience string + Lifetime int + Claims []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*OIDCToken, *Response, error) { m := &struct { +<<<<<<< HEAD Audience string `json:"audience,omitempty"` Lifetime int `json:"lifetime,omitempty"` Claims []string `json:"claims,omitempty"` @@ -28,6 +36,15 @@ func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*O Lifetime: methodReq.Lifetime, Claims: methodReq.Claims, AWSSessionTags: methodReq.AWSSessionTags, +======= + Audience string `json:"audience,omitempty"` + Lifetime int `json:"lifetime,omitempty"` + Claims []string `json:"claims,omitempty"` + }{ + Audience: methodReq.Audience, + Lifetime: methodReq.Lifetime, + Claims: methodReq.Claims, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } u := fmt.Sprintf("jobs/%s/oidc/tokens", railsPathEscape(methodReq.Job)) diff --git a/vendor/github.com/buildkite/agent/v3/api/steps.go b/vendor/github.com/buildkite/agent/v3/api/steps.go index dcf758478b..e0c0712a52 100644 --- a/vendor/github.com/buildkite/agent/v3/api/steps.go +++ b/vendor/github.com/buildkite/agent/v3/api/steps.go @@ -54,6 +54,7 @@ func (c *Client) StepUpdate(ctx context.Context, stepIdOrKey string, stepUpdate return c.doRequest(req, nil) } +<<<<<<< HEAD type StepCancel struct { Build string `json:"build_id"` @@ -82,3 +83,5 @@ func (c *Client) StepCancel(ctx context.Context, stepIdOrKey string, stepCancel return stepCancelResponse, resp, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/buildkite/agent/v3/version/VERSION b/vendor/github.com/buildkite/agent/v3/version/VERSION index 7ca8b9836e..dde8778627 100644 --- a/vendor/github.com/buildkite/agent/v3/version/VERSION +++ b/vendor/github.com/buildkite/agent/v3/version/VERSION @@ -1 +1,5 @@ +<<<<<<< HEAD 3.91.0 +======= +3.81.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/buildkite/agent/v3/version/version.go b/vendor/github.com/buildkite/agent/v3/version/version.go index ac515e225a..0b34eea618 100644 --- a/vendor/github.com/buildkite/agent/v3/version/version.go +++ b/vendor/github.com/buildkite/agent/v3/version/version.go @@ -38,10 +38,13 @@ func BuildNumber() string { return buildNumber } +<<<<<<< HEAD func IsDevelopmentBuild() bool { return buildNumber == "x" } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // commitInfo returns a string consisting of the commit hash and whether the the build was made in a // `dirty` working directory or not. A dirty working directory is one that has uncommitted changes // to files that git would track. diff --git a/vendor/github.com/buildkite/go-pipeline/README.md b/vendor/github.com/buildkite/go-pipeline/README.md index 0d7a10ea24..17fb550fd7 100644 --- a/vendor/github.com/buildkite/go-pipeline/README.md +++ b/vendor/github.com/buildkite/go-pipeline/README.md @@ -3,7 +3,11 @@ [![Build status](https://badge.buildkite.com/1fad7fb9610283e4955ea4ec4c88faca52162b637fea61821e.svg)](https://buildkite.com/buildkite/go-pipeline) [![Go Reference](https://pkg.go.dev/badge/github.com/buildkite/go-pipeline.svg)](https://pkg.go.dev/github.com/buildkite/go-pipeline) +<<<<<<< HEAD `go-pipeline` is a Go library used for building and modifying Buildkite pipelines in golang. It's used internally by the [Buildkite Agent](https://github.com/buildkite/agent) to inspect and sign pipelines prior to uploading them, but is also useful for building tools that generate pipelines. +======= +`go-pipeline` is a Go libary used for building and modifying Buildkite pipelines in golang. It's used internally by the [Buildkite Agent](https://github.com/buildkite/agent) to inspect and sign pipelines prior to uploading them, but is also useful for building tools that generate pipelines. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Installation diff --git a/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go b/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go index d528f348d2..2746c08b55 100644 --- a/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go +++ b/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go @@ -16,7 +16,10 @@ import ( var ( ErrIntoNonPointer = errors.New("cannot unmarshal into non-pointer") ErrIntoNil = errors.New("cannot unmarshal into nil") +<<<<<<< HEAD ErrNotSettable = errors.New("target value not settable") +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ErrIncompatibleTypes = errors.New("incompatible types") ErrUnsupportedSrc = errors.New("cannot unmarshal from src") ErrMultipleInlineFields = errors.New(`multiple fields tagged with yaml:",inline"`) @@ -164,6 +167,7 @@ func Unmarshal(src, dst any) error { if sdst.Kind() != reflect.Slice { return fmt.Errorf("%w: cannot unmarshal []any into %T", ErrIncompatibleTypes, dst) } +<<<<<<< HEAD stype := sdst.Type() // stype = []E = the type of the slice etype := stype.Elem() // etype = E = Type of the slice's elements if sdst.IsNil() { @@ -174,6 +178,12 @@ func Unmarshal(src, dst any) error { var warns []error for i, a := range tsrc { x := reflect.New(etype) // x := new(E) (type *E) +======= + etype := sdst.Type().Elem() // E = Type of the slice's elements + var warns []error + for i, a := range tsrc { + x := reflect.New(etype) // *E +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err := Unmarshal(a, x.Interface()) if w := warning.As(err); w != nil { warns = append(warns, w.Wrapf("while unmarshaling item at index %d of %d", i, len(tsrc))) @@ -243,11 +253,15 @@ func (m *Map[K, V]) decodeInto(target any) error { if !ok { return fmt.Errorf("%w: cannot unmarshal from %T, want K=string, V=any", ErrIncompatibleTypes, m) } +<<<<<<< HEAD // Note: m, and therefore tm, can be nil at this moment. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Work out the kind of target being used. // Dereference the target to find the inner value, if needed. targetValue := reflect.ValueOf(target) +<<<<<<< HEAD switch targetValue.Kind() { case reflect.Pointer: // Passed a pointer to something. @@ -268,20 +282,45 @@ func (m *Map[K, V]) decodeInto(target any) error { case reflect.Map: // Continue below. +======= + var innerValue reflect.Value + switch targetValue.Kind() { + case reflect.Pointer: + // Passed a pointer to something. + if targetValue.IsNil() { + return ErrIntoNil + } + innerValue = targetValue.Elem() + + case reflect.Map: + // Passed a map directly. + innerValue = targetValue + if innerValue.IsNil() { + return ErrIntoNil + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return fmt.Errorf("%w: cannot unmarshal %T into %T, want map or *struct{...}", ErrIncompatibleTypes, m, target) } +<<<<<<< HEAD switch targetValue.Kind() { case reflect.Map: // Process the map directly. mapType := targetValue.Type() +======= + switch innerValue.Kind() { + case reflect.Map: + // Process the map directly. + mapType := innerValue.Type() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // For simplicity, require the key type to be string. if keyType := mapType.Key(); keyType.Kind() != reflect.String { return fmt.Errorf("%w for map key: cannot unmarshal %T into %T", ErrIncompatibleTypes, m, target) } +<<<<<<< HEAD // If tm is nil, then set the target to nil. if tm == nil { if targetValue.IsNil() { @@ -300,6 +339,11 @@ func (m *Map[K, V]) decodeInto(target any) error { return ErrNotSettable } targetValue.Set(reflect.MakeMapWithSize(mapType, tm.Len())) +======= + // If target is a pointer to a nil map (with type), create a new map. + if innerValue.IsNil() { + innerValue.Set(reflect.MakeMapWithSize(mapType, tm.Len())) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } valueType := mapType.Elem() @@ -313,7 +357,11 @@ func (m *Map[K, V]) decodeInto(target any) error { return fmt.Errorf("unmarshaling value for key %q: %w", k, err) } +<<<<<<< HEAD targetValue.SetMapIndex(reflect.ValueOf(k), nv.Elem()) +======= + innerValue.SetMapIndex(reflect.ValueOf(k), nv.Elem()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil }); err != nil { return err @@ -328,7 +376,11 @@ func (m *Map[K, V]) decodeInto(target any) error { // These are the (accessible by reflection) fields it has. // This includes non-exported fields. +<<<<<<< HEAD fields := reflect.VisibleFields(targetValue.Type()) +======= + fields := reflect.VisibleFields(innerValue.Type()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var inlineField reflect.StructField outlineKeys := make(map[string]struct{}) @@ -390,7 +442,11 @@ func (m *Map[K, V]) decodeInto(target any) error { // Now load value into the field recursively. // Get a pointer to the field. This works because target is a pointer. +<<<<<<< HEAD ptrToField := targetValue.FieldByIndex(field.Index).Addr() +======= + ptrToField := innerValue.FieldByIndex(field.Index).Addr() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err := Unmarshal(value, ptrToField.Interface()) if w := warning.As(err); w != nil { warns = append(warns, w.Wrapf("while unmarshaling the value for key %q into struct field %q", key, field.Name)) @@ -405,7 +461,11 @@ func (m *Map[K, V]) decodeInto(target any) error { // The rest is handling the ",inline" field. // We support any field that Unmarshal can unmarshal tm into. +<<<<<<< HEAD inlinePtr := targetValue.FieldByIndex(inlineField.Index).Addr() +======= + inlinePtr := innerValue.FieldByIndex(inlineField.Index).Addr() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copy all values that weren't non-inline fields into a temporary map. // This is just to avoid mutating tm. diff --git a/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go b/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go index ba75e74f18..e4e4f3e7fe 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go +++ b/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go @@ -147,9 +147,13 @@ func (m *Matrix) validatePermutation(p MatrixPermutation) error { // Check that the dimensions in the permutation are unique and defined in // the matrix setup. for dim := range p { +<<<<<<< HEAD // An empty but non-nil setup dimension is valid (all values may be // given by adjustment tuples). if m.Setup[dim] == nil { +======= + if len(m.Setup[dim]) == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("%w: %q", errPermutationUnknownDimension, dim) } } @@ -182,9 +186,13 @@ func (m *Matrix) validatePermutation(p MatrixPermutation) error { return fmt.Errorf("%w: %d != %d", errAdjustmentLengthMismatch, len(adj.With), len(m.Setup)) } for dim := range adj.With { +<<<<<<< HEAD // An empty but non-nil setup dimension is valid (all values may be // given by adjustment tuples). if m.Setup[dim] == nil { +======= + if len(m.Setup[dim]) == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("%w: %q", errAdjustmentUnknownDimension, dim) } } diff --git a/vendor/github.com/buildkite/interpolate/interpolate.go b/vendor/github.com/buildkite/interpolate/interpolate.go index 307956491b..69de810f3c 100644 --- a/vendor/github.com/buildkite/interpolate/interpolate.go +++ b/vendor/github.com/buildkite/interpolate/interpolate.go @@ -1,8 +1,13 @@ package interpolate import ( +<<<<<<< HEAD "fmt" "strings" +======= + "bytes" + "fmt" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Interpolate takes a set of environment and interpolates it into the provided string using shell script expansions @@ -28,6 +33,7 @@ func Identifiers(str string) ([]string, error) { // An expansion is something that takes in ENV and returns a string or an error type Expansion interface { +<<<<<<< HEAD // Expand expands the expansion using variables from env. Expand(env Env) (string, error) @@ -35,6 +41,9 @@ type Expansion interface { // Escaped expansions do something special and return identifiers // (starting with $) that *would* become referenced after a round of // unescaping. +======= + Expand(env Env) (string, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Identifiers() []string } @@ -90,6 +99,7 @@ func (e UnsetValueExpansion) Expand(env Env) (string, error) { // EscapedExpansion is an expansion that is delayed until later on (usually by a later process) type EscapedExpansion struct { +<<<<<<< HEAD // PotentialIdentifier is an identifier for the purpose of Identifiers, // but not for the purpose of Expand. PotentialIdentifier string @@ -101,6 +111,17 @@ func (e EscapedExpansion) Identifiers() []string { func (e EscapedExpansion) Expand(Env) (string, error) { return "$", nil +======= + Identifier string +} + +func (e EscapedExpansion) Identifiers() []string { + return []string{"$" + e.Identifier} +} + +func (e EscapedExpansion) Expand(Env) (string, error) { + return "$" + e.Identifier, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SubstringExpansion returns a substring (or slice) of the env @@ -201,7 +222,11 @@ func (e Expression) Identifiers() []string { } func (e Expression) Expand(env Env) (string, error) { +<<<<<<< HEAD var buf strings.Builder +======= + buf := &bytes.Buffer{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, item := range e { if item.Expansion != nil { @@ -209,9 +234,15 @@ func (e Expression) Expand(env Env) (string, error) { if err != nil { return "", err } +<<<<<<< HEAD buf.WriteString(result) } else { buf.WriteString(item.Text) +======= + _, _ = buf.WriteString(result) + } else { + _, _ = buf.WriteString(item.Text) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/buildkite/interpolate/parser.go b/vendor/github.com/buildkite/interpolate/parser.go index e28511b23e..f5f6207f47 100644 --- a/vendor/github.com/buildkite/interpolate/parser.go +++ b/vendor/github.com/buildkite/interpolate/parser.go @@ -87,7 +87,11 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { return nil, err } +<<<<<<< HEAD expr = append(expr, ExpressionItem{Expansion: ee}) +======= + expr = append(expr, ee) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -100,12 +104,20 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { // If we run into a dollar sign and it's not the last char, it's an expansion if c == '$' && p.pos < (len(p.input)-1) { +<<<<<<< HEAD expressionItem, err := p.parseExpansion() if err != nil { return nil, err } expr = append(expr, expressionItem) +======= + expansion, err := p.parseExpansion() + if err != nil { + return nil, err + } + expr = append(expr, ExpressionItem{Expansion: expansion}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -123,6 +135,7 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { return expr, nil } +<<<<<<< HEAD // parseEscapedExpansion attempts to extract a *potential* identifier or brace // expression from the text following the escaped dollarsign. func (p *Parser) parseEscapedExpansion() (EscapedExpansion, error) { @@ -191,6 +204,49 @@ func (p *Parser) parseExpansion() (ExpressionItem, error) { return ExpressionItem{Expansion: VariableExpansion{ Identifier: identifier, }}, nil +======= +func (p *Parser) parseEscapedExpansion() (ExpressionItem, error) { + next := p.peekRune() + switch { + case next == '{': + // if it's an escaped brace expansion, (eg $${MY_COOL_VAR:-5}) consume text until the close brace + id := p.scanUntil(func(r rune) bool { return r == '}' }) + id = id + string(p.nextRune()) // we know that the next rune is a close brace, chuck it on the end + return ExpressionItem{Expansion: EscapedExpansion{Identifier: id}}, nil + + case unicode.IsLetter(next): + // it's an escaped identifier (eg $$MY_COOL_VAR) + id, err := p.scanIdentifier() + if err != nil { + return ExpressionItem{}, err + } + + return ExpressionItem{Expansion: EscapedExpansion{Identifier: id}}, nil + + default: + // there's no identifier or brace afterward, so it's probably a literal escaped dollar sign + // just return a text item with the dollar sign + return ExpressionItem{Text: "$"}, nil + } +} + +func (p *Parser) parseExpansion() (Expansion, error) { + if c := p.nextRune(); c != '$' { + return nil, fmt.Errorf("Expected expansion to start with $, got %c", c) + } + + // if we have an open brace, this is a brace expansion + if c := p.peekRune(); c == '{' { + return p.parseBraceExpansion() + } + + identifier, err := p.scanIdentifier() + if err != nil { + return nil, err + } + + return VariableExpansion{Identifier: identifier}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (p *Parser) parseBraceExpansion() (Expansion, error) { @@ -205,9 +261,13 @@ func (p *Parser) parseBraceExpansion() (Expansion, error) { if c := p.peekRune(); c == '}' { _ = p.nextRune() +<<<<<<< HEAD return VariableExpansion{ Identifier: identifier, }, nil +======= + return VariableExpansion{Identifier: identifier}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var operator string @@ -328,8 +388,13 @@ func (p *Parser) scanIdentifier() (string, error) { if c := p.peekRune(); !unicode.IsLetter(c) { return "", fmt.Errorf("Expected identifier to start with a letter, got %c", c) } +<<<<<<< HEAD notIdentifierChar := func(r rune) bool { return !(unicode.IsLetter(r) || unicode.IsNumber(r) || r == '_') +======= + var notIdentifierChar = func(r rune) bool { + return (!unicode.IsLetter(r) && !unicode.IsNumber(r) && r != '_') +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return p.scanUntil(notIdentifierChar), nil } diff --git a/vendor/github.com/buildkite/roko/retrier.go b/vendor/github.com/buildkite/roko/retrier.go index 17213ab43d..7c0a699fab 100644 --- a/vendor/github.com/buildkite/roko/retrier.go +++ b/vendor/github.com/buildkite/roko/retrier.go @@ -10,13 +10,20 @@ import ( var defaultRandom = rand.New(rand.NewSource(time.Now().UnixNano())) +<<<<<<< HEAD const defaultJitterInterval = 1000 * time.Millisecond +======= +const jitterInterval = 1000 * time.Millisecond +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Retrier struct { maxAttempts int attemptCount int jitter bool +<<<<<<< HEAD jitterRange jitterRange +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) forever bool rand *rand.Rand @@ -25,11 +32,17 @@ type Retrier struct { intervalCalculator Strategy strategyType string +<<<<<<< HEAD nextInterval time.Duration } type jitterRange struct{ min, max time.Duration } +======= + manualInterval *time.Duration +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Strategy func(*Retrier) time.Duration const ( @@ -122,6 +135,7 @@ func WithStrategy(strategy Strategy, strategyType string) retrierOpt { func WithJitter() retrierOpt { return func(r *Retrier) { r.jitter = true +<<<<<<< HEAD r.jitterRange = jitterRange{min: 0, max: defaultJitterInterval} } } @@ -142,6 +156,8 @@ func WithJitterRange(min, max time.Duration) retrierOpt { min: min, max: max, } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -184,7 +200,11 @@ func NewRetrier(opts ...retrierOpt) *Retrier { oldJitter := r.jitter r.jitter = false // Temporarily turn off jitter while we check if the interval is 0 +<<<<<<< HEAD if r.forever && r.strategyType == constantStrategy && r.intervalCalculator(r) == 0 { +======= + if r.forever && r.strategyType == constantStrategy && r.NextInterval() == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic("retriers using the constant strategy that run forever must have an interval") } r.jitter = oldJitter // and now set it back to what it was previously @@ -192,16 +212,24 @@ func NewRetrier(opts ...retrierOpt) *Retrier { return r } +<<<<<<< HEAD // Jitter returns a duration in the interval in the range [0, r.jitterRange.max - r.jitterRange.min). When no jitter range // is defined, the default range is [0, 1 second). The jitter is recalculated for each retry. // If jitter is disabled, this method will always return 0. +======= +// Jitter returns a duration in the interval (0, 1] s if jitter is enabled, or 0 s if it's not +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (r *Retrier) Jitter() time.Duration { if !r.jitter { return 0 } +<<<<<<< HEAD min, max := float64(r.jitterRange.min), float64(r.jitterRange.max) return time.Duration(min + (max-min)*rand.Float64()) +======= + return time.Duration((1.0 - r.rand.Float64()) * float64(jitterInterval)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MarkAttempt increments the attempt count for the retrier. This affects ShouldGiveUp, and also affects the retry interval @@ -217,7 +245,11 @@ func (r *Retrier) Break() { // SetNextInterval overrides the strategy for the interval before the next try func (r *Retrier) SetNextInterval(d time.Duration) { +<<<<<<< HEAD r.nextInterval = d +======= + r.manualInterval = &d +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ShouldGiveUp returns whether the retrier should stop trying do do the thing it's been asked to do @@ -235,9 +267,20 @@ func (r *Retrier) ShouldGiveUp() bool { return r.attemptCount >= r.maxAttempts } +<<<<<<< HEAD // NextInterval returns the length of time that the retrier will wait before the next retry func (r *Retrier) NextInterval() time.Duration { return r.nextInterval +======= +// NextInterval returns the next interval that the retrier will use. Behind the scenes, it calls the function generated +// by either retrier's strategy +func (r *Retrier) NextInterval() time.Duration { + if r.manualInterval != nil { + return *r.manualInterval + } + + return r.intervalCalculator(r) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *Retrier) String() string { @@ -253,8 +296,14 @@ func (r *Retrier) String() string { return str } +<<<<<<< HEAD if r.nextInterval > 0 { str = str + fmt.Sprintf(" Retrying in %s", r.nextInterval) +======= + nextInterval := r.NextInterval() + if nextInterval > 0 { + str = str + fmt.Sprintf(" Retrying in %s", nextInterval) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { str = str + " Retrying immediately" } @@ -276,16 +325,31 @@ func (r *Retrier) Do(callback func(*Retrier) error) error { // DoWithContext is a context-aware variant of Do. func (r *Retrier) DoWithContext(ctx context.Context, callback func(*Retrier) error) error { for { +<<<<<<< HEAD // Calculate the next interval before we do work - this way, the calls to r.NextInterval() in the callback will be // accurate and include the calculated jitter, if present r.nextInterval = r.intervalCalculator(r) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Perform the action the user has requested we retry err := callback(r) if err == nil { return nil } +<<<<<<< HEAD +======= + // Calculate the next interval before we increment the attempt count + // In the exponential case, if we didn't do this, we'd skip the first interval + // ie, we would wait 2^1, 2^2, 2^3, ..., 2^n+1 seconds (bad) + // instead of 2^0, 2^1, 2^2, ..., 2^n seconds (good) + nextInterval := r.NextInterval() + + // Reset the manualInterval now that the nextInterval has been acquired. + r.manualInterval = nil + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.MarkAttempt() // If the last callback called r.Break(), or if we've hit our call limit, bail out and return the last error we got @@ -293,7 +357,11 @@ func (r *Retrier) DoWithContext(ctx context.Context, callback func(*Retrier) err return err } +<<<<<<< HEAD if err := r.sleepOrDone(ctx, r.nextInterval); err != nil { +======= + if err := r.sleepOrDone(ctx, nextInterval); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } } diff --git a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go index ebf2a0dbea..ed272661dc 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go @@ -8,12 +8,21 @@ import ( "strings" "sync" +<<<<<<< HEAD "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "github.com/butuzov/ireturn/analyzer/internal/config" "github.com/butuzov/ireturn/analyzer/internal/types" +======= + "github.com/butuzov/ireturn/analyzer/internal/config" + "github.com/butuzov/ireturn/analyzer/internal/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const name string = "ireturn" // linter name @@ -23,11 +32,19 @@ type validator interface { } type analyzer struct { +<<<<<<< HEAD once sync.Once mu sync.RWMutex handler validator err error disabledNolint bool +======= + once sync.Once + mu sync.RWMutex + handler validator + err error + diabledNolint bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) found []analysis.Diagnostic } @@ -63,7 +80,11 @@ func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { } // 003. Is it allowed to be checked? +<<<<<<< HEAD if !a.disabledNolint && hasDisallowDirective(f.Doc) { +======= + if !a.diabledNolint && hasDisallowDirective(f.Doc) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -115,7 +136,11 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { // First: checking nonolint directive val := fs.Lookup("nonolint") if val != nil { +<<<<<<< HEAD a.disabledNolint = fs.Lookup("nonolint").Value.String() == "true" +======= + a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Second: validators implementation next @@ -128,7 +153,11 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { } func NewAnalyzer() *analysis.Analyzer { +<<<<<<< HEAD a := analyzer{} +======= + a := analyzer{} //nolint: exhaustivestruct +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &analysis.Analyzer{ Name: name, @@ -196,7 +225,11 @@ func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{} typeParams := val.String() prefix, suffix := "interface{", "}" +<<<<<<< HEAD if strings.HasPrefix(typeParams, prefix) { //nolint:gosimple +======= + if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) typeParams = typeParams[len(prefix):] } if strings.HasSuffix(typeParams, suffix) { diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go index da101c7862..ed8e3eb8c3 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go @@ -2,7 +2,11 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" +<<<<<<< HEAD // allowConfig specifies a list of interfaces (keywords, patterns and regular expressions) +======= +// allowConfig specifies a list of interfaces (keywords, patters and regular expressions) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // that are allowed by ireturn as valid to return, any non listed interface are rejected. type allowConfig struct { *defaultConfig diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go index d6914af862..0d0d9f4503 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go @@ -10,6 +10,10 @@ import ( var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time") +<<<<<<< HEAD +======= +// nolint: exhaustivestruct +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func DefaultValidatorConfig() *allowConfig { return allowAll([]string{ types.NameEmpty, // "empty": empty interfaces (interface{}) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go index b2cde910ce..7cb9f3bfae 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go @@ -2,7 +2,11 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" +<<<<<<< HEAD // rejectConfig specifies a list of interfaces (keywords, patterns and regular expressions) +======= +// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // that are rejected by ireturn as valid to return, any non listed interface are allowed. type rejectConfig struct { *defaultConfig diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go index 0f4286515f..52463de983 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go @@ -47,7 +47,11 @@ func (i IFace) HashString() string { } func (i IFace) ExportDiagnostic() analysis.Diagnostic { +<<<<<<< HEAD return analysis.Diagnostic{ +======= + return analysis.Diagnostic{ //nolint: exhaustivestruct +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Pos: i.Pos, Message: i.String(), } diff --git a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md index da30c8e00f..d2288f08b4 100644 --- a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md +++ b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md @@ -1,3 +1,4 @@ +<<<<<<< HEAD | Function | Mirror | @@ -53,3 +54,206 @@ | `func utf8.FullRune([]byte) bool` | `func utf8.FullRuneInString(string) bool` | | `func utf8.RuneCount([]byte) int` | `func utf8.RuneCountInString(string) int` | | `func utf8.Valid([]byte) bool` | `func utf8.ValidString(string) bool` | +======= + +func (*bufio.Writer) Write([]byte) (int, error) +func (*bufio.Writer) WriteString(string) (int, error) + + +func (*bufio.Writer) WriteRune(rune) (int, error) +func (*bufio.Writer) WriteString(string) (int, error) + + +func (*bytes.Buffer) Write([]byte) (int, error) +func (*bytes.Buffer) WriteString(string) (int, error) + + +func (*bytes.Buffer) WriteRune(rune) (int, error) +func (*bytes.Buffer) WriteString(string) (int, error) + + +func bytes.Compare([]byte, []byte) int +func strings.Compare(string, string) int + + +func bytes.Contains([]byte, []byte) bool +func strings.Contains(string, string) bool + + +func bytes.ContainsAny([]byte, string) bool +func strings.ContainsAny(string, string) bool + + +func bytes.ContainsRune([]byte, byte) bool +func strings.ContainsRune(string, byte) bool + + +func bytes.Count([]byte, []byte) int +func strings.Count(string, string) int + + +func bytes.EqualFold([]byte, []byte) bool +func strings.EqualFold(string, string) bool + + +func bytes.HasPrefix([]byte, []byte) bool +func strings.HasPrefix(string, string) bool + + +func bytes.HasSuffix([]byte, []byte) bool +func strings.HasSuffix(string, string) bool + + +func bytes.Index([]byte, []byte) int +func strings.Index(string, string) int + + +func bytes.IndexAny([]byte, string) int +func strings.IndexAny(string, string) int + + +func bytes.IndexByte([]byte, byte) int +func strings.IndexByte(string, byte) int + + +func bytes.IndexFunc([]byte, func(rune) bool) int +func strings.IndexFunc(string, func(rune) bool) int + + +func bytes.IndexRune([]byte, rune) int +func strings.IndexRune(string, rune) int + + +func bytes.LastIndex([]byte, []byte) int +func strings.LastIndex(string, string) int + + +func bytes.LastIndexAny([]byte, string) int +func strings.LastIndexAny(string, string) int + + +func bytes.LastIndexByte([]byte, byte) int +func strings.LastIndexByte(string, byte) int + + +func bytes.LastIndexFunc([]byte, func(rune) bool) int +func strings.LastIndexFunc(string, func(rune) bool) int + + +func bytes.NewBuffer([]byte) *bytes.Buffer +func bytes.NewBufferString(string) *bytes.Buffer + + +func (*httptest.ResponseRecorder) Write([]byte) (int, error) +func (*httptest.ResponseRecorder) WriteString(string) (int, error) + + +func (*maphash.Hash) Write([]byte) (int, error) +func (*maphash.Hash) WriteString(string) (int, error) + + +func (*os.File) Write([]byte) (int, error) +func (*os.File) WriteString(string) (int, error) + + +func regexp.Match(string, []byte) (bool, error) +func regexp.MatchString(string, string) (bool, error) + + +func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int +func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int + + +func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int +func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int + + +func (*regexp.Regexp) FindIndex([]byte) []int +func (*regexp.Regexp) FindStringIndex(string) []int + + +func (*regexp.Regexp) FindSubmatchIndex([]byte) []int +func (*regexp.Regexp) FindStringSubmatchIndex(string) []int + + +func (*regexp.Regexp) Match([]byte) bool +func (*regexp.Regexp) MatchString(string) bool + + +func (*strings.Builder) Write([]byte) (int, error) +func (*strings.Builder) WriteString(string) (int, error) + + +func (*strings.Builder) WriteRune(rune) (int, error) +func (*strings.Builder) WriteString(string) (int, error) + + +func strings.Compare(string) int +func bytes.Compare([]byte) int + + +func strings.Contains(string) bool +func bytes.Contains([]byte) bool + + +func strings.ContainsAny(string) bool +func bytes.ContainsAny([]byte) bool + + +func strings.ContainsRune(string) bool +func bytes.ContainsRune([]byte) bool + + +func strings.EqualFold(string) bool +func bytes.EqualFold([]byte) bool + + +func strings.HasPrefix(string) bool +func bytes.HasPrefix([]byte) bool + + +func strings.HasSuffix(string) bool +func bytes.HasSuffix([]byte) bool + + +func strings.Index(string) int +func bytes.Index([]byte) int + + +func strings.IndexFunc(string, func(r rune) bool) int +func bytes.IndexFunc([]byte, func(r rune) bool) int + + +func strings.LastIndex(string) int +func bytes.LastIndex([]byte) int + + +func strings.LastIndexAny(string) int +func bytes.LastIndexAny([]byte) int + + +func strings.LastIndexFunc(string, func(r rune) bool) int +func bytes.LastIndexFunc([]byte, func(r rune) bool) int + + +func utf8.DecodeLastRune([]byte) (rune, int) +func utf8.DecodeLastRuneInString(string) (rune, int) + + +func utf8.DecodeRune([]byte) (rune, int) +func utf8.DecodeRuneInString(string) (rune, int) + + +func utf8.FullRune([]byte) bool +func utf8.FullRuneInString(string) bool + + +func utf8.RuneCount([]byte) int +func utf8.RuneCountInString(string) int + + +func utf8.Valid([]byte) bool +func utf8.ValidString(string) bool + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/butuzov/mirror/Makefile b/vendor/github.com/butuzov/mirror/Makefile index dab6f160ae..a27bc8a5bf 100644 --- a/vendor/github.com/butuzov/mirror/Makefile +++ b/vendor/github.com/butuzov/mirror/Makefile @@ -10,8 +10,12 @@ endef # Generate Artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ generate: ## Generate Assets +<<<<<<< HEAD $(MAKE) generate-tests $(MAKE) generate-mirror-table +======= + $(MAKE) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) generate-tests: ## Generates Assets at testdata go run ./cmd/internal/tests/ "$(PWD)/testdata" @@ -53,7 +57,11 @@ tests-summary: bin/tparse lints: ## Run golangci-lint lints: bin/golangci-lint lints: +<<<<<<< HEAD golangci-lint run --no-config ./... --exclude-dirs "^(cmd|testdata)" +======= + golangci-lint run --no-config ./... --skip-dirs "^(cmd|testdata)" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cover: ## Run Coverage @@ -72,8 +80,13 @@ bin/tparse: INSTALL_URL=github.com/mfridman/tparse@v0.13.2 bin/tparse: $(call install_go_bin, tparse, $(INSTALL_URL)) +<<<<<<< HEAD bin/golangci-lint: ## Installs golangci-lint@v1.62.0 (if not exists) bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.62.0 +======= +bin/golangci-lint: ## Installs golangci-lint@v1.55.2 (if not exists) +bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.55.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bin/golangci-lint: $(call install_go_bin, golangci-lint, $(INSTALL_URL)) @@ -100,7 +113,11 @@ help: dep-gawk @ echo "" +<<<<<<< HEAD # Helper Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +======= +# Helper Mehtods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dep-gawk: @ if [ -z "$(shell command -v gawk)" ]; then \ if [ -x /usr/local/bin/brew ]; then $(MAKE) _brew_gawk_install; exit 0; fi; \ @@ -112,21 +129,37 @@ dep-gawk: fi _brew_gawk_install: +<<<<<<< HEAD @ echo "Installing gawk using brew... " +======= + @ echo "Instaling gawk using brew... " +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ brew install gawk --quiet @ echo "done" _ubuntu_gawk_install: +<<<<<<< HEAD @ echo "Installing gawk using apt-get... " +======= + @ echo "Instaling gawk using apt-get... " +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ apt-get -q install gawk -y @ echo "done" _alpine_gawk_install: +<<<<<<< HEAD @ echo "Installing gawk using yum... " +======= + @ echo "Instaling gawk using yum... " +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ apk add --update --no-cache gawk @ echo "done" _centos_gawk_install: +<<<<<<< HEAD @ echo "Installing gawk using yum... " +======= + @ echo "Instaling gawk using yum... " +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ yum install -q -y gawk; @ echo "done" diff --git a/vendor/github.com/butuzov/mirror/analyzer.go b/vendor/github.com/butuzov/mirror/analyzer.go index b15019ce1f..42c59b7618 100644 --- a/vendor/github.com/butuzov/mirror/analyzer.go +++ b/vendor/github.com/butuzov/mirror/analyzer.go @@ -44,9 +44,15 @@ func Run(pass *analysis.Pass, withTests bool) []*checker.Violation { BytesFunctions, BytesBufferMethods, RegexpFunctions, RegexpRegexpMethods, StringFunctions, StringsBuilderMethods, +<<<<<<< HEAD MaphashMethods, MaphashFunctions, BufioMethods, HTTPTestMethods, OsFileMethods, UTF8Functions, +======= + BufioMethods, HTTPTestMethods, + OsFileMethods, MaphashMethods, + UTF8Functions, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) check.Type = checker.WrapType(pass.TypesInfo) diff --git a/vendor/github.com/butuzov/mirror/checkers_maphash.go b/vendor/github.com/butuzov/mirror/checkers_maphash.go index 345a64123e..03bf490515 100644 --- a/vendor/github.com/butuzov/mirror/checkers_maphash.go +++ b/vendor/github.com/butuzov/mirror/checkers_maphash.go @@ -2,6 +2,7 @@ package mirror import "github.com/butuzov/mirror/internal/checker" +<<<<<<< HEAD var ( MaphashFunctions = []checker.Violation{ { // maphash.Bytes @@ -65,3 +66,37 @@ var ( }, } ) +======= +var MaphashMethods = []checker.Violation{ + { // (*hash/maphash).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `Write($0)`, + Returns: []string{"int", "error"}, + }, + }, + { // (*hash/maphash).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `WriteString($0)`, + Returns: []string{"int", "error"}, + }, + }, +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/vendor/github.com/butuzov/mirror/internal/checker/checker.go index fb9ba41729..cddacf12ea 100644 --- a/vendor/github.com/butuzov/mirror/internal/checker/checker.go +++ b/vendor/github.com/butuzov/mirror/internal/checker/checker.go @@ -9,12 +9,20 @@ import ( "strings" ) +<<<<<<< HEAD // Checker will perform standard check on package and its methods. +======= +// Checker will perform standart check on package and its methods. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Checker struct { Violations []Violation // List of available violations Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct Type func(ast.Expr) string // Type Checker closure. +<<<<<<< HEAD Print func(ast.Node) []byte // String representation of the expression. +======= + Print func(ast.Node) []byte // String representation of the expresion. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func New(violations ...[]Violation) Checker { @@ -76,7 +84,11 @@ func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool continue } +<<<<<<< HEAD // is it conversion call +======= + // is it convertsion call +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !c.callConverts(call) { continue } diff --git a/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/vendor/github.com/butuzov/mirror/internal/checker/violation.go index c2c1492086..1827c30472 100644 --- a/vendor/github.com/butuzov/mirror/internal/checker/violation.go +++ b/vendor/github.com/butuzov/mirror/internal/checker/violation.go @@ -28,7 +28,11 @@ const ( UntypedRune string = "untyped rune" ) +<<<<<<< HEAD // Violation describes what message we going to give to a particular code violation +======= +// Violation describs what message we going to give to a particular code violation +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Violation struct { Type ViolationType // Args []int // Indexes of the arguments needs to be checked @@ -143,7 +147,11 @@ func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic { v.AltPackage = v.Package } +<<<<<<< HEAD // Hooray! we don't need to change package and redo imports. +======= + // Hooray! we dont need to change package and redo imports. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if v.Type == Function && v.AltPackage == v.Package && noNl { diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ Message: "Fix Issue With", @@ -166,7 +174,11 @@ type GolangIssue struct { Original string } +<<<<<<< HEAD // Issue intended to be used only within `golangci-lint`, but you can use it +======= +// Issue intended to be used only within `golangci-lint`, bu you can use use it +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // alongside Diagnostic if you wish. func (v *Violation) Issue(fSet *token.FileSet) GolangIssue { issue := GolangIssue{ diff --git a/vendor/github.com/butuzov/mirror/readme.md b/vendor/github.com/butuzov/mirror/readme.md index f5cfa47a68..c604d5e6cb 100644 --- a/vendor/github.com/butuzov/mirror/readme.md +++ b/vendor/github.com/butuzov/mirror/readme.md @@ -2,6 +2,7 @@ `mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib. +<<<<<<< HEAD --- [![United 24](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner-personal-page.svg)](https://u24.gov.ua/) @@ -9,6 +10,8 @@ --- +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Linter Use Cases ### `github.com/argoproj/argo-cd` @@ -93,13 +96,21 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi - flag `--tests` (e.g. `--tests=false`) - flag `--skip-files` (e.g. `--skip-files="_test.go"`) +<<<<<<< HEAD - yaml configuration `run.skip-files`: +======= + - yaml confguration `run.skip-files`: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ```yaml run: skip-files: - '(.+)_test\.go' ``` +<<<<<<< HEAD - yaml configuration `issues.exclude-rules`: +======= + - yaml confguration `issues.exclude-rules`: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ```yaml issues: exclude-rules: @@ -113,7 +124,11 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi ```shell # Update Assets (testdata/(strings|bytes|os|utf8|maphash|regexp|bufio).go) +<<<<<<< HEAD (task|make) generate +======= +(task|make) generated +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Run Tests (task|make) tests # Lint Code diff --git a/vendor/github.com/ckaznocha/intrange/.golangci.yml b/vendor/github.com/ckaznocha/intrange/.golangci.yml index b240f85ce9..f78118874e 100644 --- a/vendor/github.com/ckaznocha/intrange/.golangci.yml +++ b/vendor/github.com/ckaznocha/intrange/.golangci.yml @@ -1,9 +1,13 @@ linters-settings: gci: +<<<<<<< HEAD sections: - standard - default - localmodule +======= + local-prefixes: github.com/ckaznocha/intrange +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocritic: enabled-tags: - diagnostic @@ -13,7 +17,14 @@ linters-settings: - style goimports: local-prefixes: github.com/ckaznocha/intrange +<<<<<<< HEAD + govet: +======= + golint: + min-confidence: 0 govet: + check-shadowing: true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) enable: - asmdecl - assign @@ -24,7 +35,10 @@ linters-settings: - cgocall - composite - copylock +<<<<<<< HEAD - copyloopvar +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - deepequalerrors - errorsas - fieldalignment @@ -58,16 +72,28 @@ linters: - dupl - errcheck - errorlint +<<<<<<< HEAD +======= + - exportloopref +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - gci - gochecknoinits - goconst - gocritic - godot - godox +<<<<<<< HEAD - err113 - gofmt - gofumpt - goimports +======= + - goerr113 + - gofmt + - gofumpt + - goimports + - gomnd +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - goprintffuncname - gosec - gosimple @@ -93,6 +119,11 @@ linters: - wastedassign - whitespace - wsl +<<<<<<< HEAD issues: exclude-dirs: +======= +run: + skip-dirs: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - testdata/ diff --git a/vendor/github.com/ckaznocha/intrange/intrange.go b/vendor/github.com/ckaznocha/intrange/intrange.go index 229c847d5a..56c3569273 100644 --- a/vendor/github.com/ckaznocha/intrange/intrange.go +++ b/vendor/github.com/ckaznocha/intrange/intrange.go @@ -79,8 +79,11 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } +<<<<<<< HEAD initAssign := init.Tok == token.ASSIGN +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(init.Lhs) != 1 || len(init.Rhs) != 1 { return } @@ -99,6 +102,7 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } +<<<<<<< HEAD var ( operand ast.Expr hasEquivalentOperator bool @@ -106,6 +110,18 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { switch cond.Op { case token.LSS, token.LEQ: // ;i < n; || ;i <= n; +======= + var nExpr ast.Expr + + switch cond.Op { + case token.LSS: // ;i < n; + if isBenchmark(cond.Y) { + return + } + + nExpr = findNExpr(cond.Y) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) x, ok := cond.X.(*ast.Ident) if !ok { return @@ -114,10 +130,20 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if x.Name != initIdent.Name { return } +<<<<<<< HEAD hasEquivalentOperator = cond.Op == token.LEQ operand = cond.Y case token.GTR, token.GEQ: // ;n > i; || ;n >= i; +======= + case token.GTR: // ;n > i; + if isBenchmark(cond.X) { + return + } + + nExpr = findNExpr(cond.X) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) y, ok := cond.Y.(*ast.Ident) if !ok { return @@ -126,9 +152,12 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if y.Name != initIdent.Name { return } +<<<<<<< HEAD hasEquivalentOperator = cond.Op == token.GEQ operand = cond.X +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return } @@ -227,7 +256,11 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { bc := &bodyChecker{ initIdent: initIdent, +<<<<<<< HEAD nExpr: findNExpr(operand), +======= + nExpr: nExpr, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ast.Inspect(forStmt.Body, bc.check) @@ -236,6 +269,7 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } +<<<<<<< HEAD if initAssign { pass.Report(analysis.Diagnostic{ Pos: forStmt.Pos(), @@ -280,6 +314,11 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { }, }, }, +======= + pass.Report(analysis.Diagnostic{ + Pos: forStmt.Pos(), + Message: msg, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } @@ -403,6 +442,7 @@ func findNExpr(expr ast.Expr) ast.Expr { } } +<<<<<<< HEAD func recursiveOperandToString( expr ast.Expr, incrementInt bool, @@ -442,6 +482,28 @@ func recursiveOperandToString( default: return "" } +======= +func isBenchmark(expr ast.Expr) bool { + selectorExpr, ok := expr.(*ast.SelectorExpr) + if !ok { + return false + } + + if selectorExpr.Sel.Name != "N" { + return false + } + + ident, ok := selectorExpr.X.(*ast.Ident) + if !ok { + return false + } + + if ident.Name == "b" { + return true + } + + return false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func identEqual(a, b ast.Expr) bool { @@ -487,7 +549,10 @@ type bodyChecker struct { initIdent *ast.Ident nExpr ast.Expr modified bool +<<<<<<< HEAD accessed bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *bodyChecker) check(n ast.Node) bool { @@ -506,15 +571,19 @@ func (b *bodyChecker) check(n ast.Node) bool { return false } +<<<<<<< HEAD case *ast.Ident: if identEqual(stmt, b.initIdent) { b.accessed = true } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return true } +<<<<<<< HEAD func isNumberLit(exp ast.Expr) bool { switch lit := exp.(type) { case *ast.BasicLit: @@ -555,6 +624,8 @@ func isNumberLit(exp ast.Expr) bool { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func compareNumberLit(exp ast.Expr, val int) bool { switch lit := exp.(type) { case *ast.BasicLit: @@ -601,6 +672,7 @@ func compareNumberLit(exp ast.Expr, val int) bool { return false } } +<<<<<<< HEAD func operandToString( pass *analysis.Pass, @@ -625,3 +697,5 @@ func operandToString( return t.String() + "(" + s + ")" } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 6aba0ef1f6..972c725ca4 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -436,8 +436,14 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err != nil { if err == io.EOF { break +<<<<<<< HEAD } return nil, fmt.Errorf("failed to parse tar file, %w", err) +======= + } else { + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } switch cleanEntryName(h.Name) { case PrefetchLandmark, NoPrefetchLandmark: diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index ba650b4d1d..6476d5f02a 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -26,13 +26,20 @@ import ( "archive/tar" "bytes" "compress/gzip" +<<<<<<< HEAD "crypto/rand" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/sha256" "encoding/json" "errors" "fmt" "io" +<<<<<<< HEAD "math/big" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "path/filepath" "reflect" @@ -46,6 +53,13 @@ import ( digest "github.com/opencontainers/go-digest" ) +<<<<<<< HEAD +======= +func init() { + rand.Seed(time.Now().UnixNano()) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression @@ -917,11 +931,17 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } if sampleEntry == nil { t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") +<<<<<<< HEAD return } if targetEntry == nil { t.Fatalf("rewrite target not found") return +======= + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } targetEntry.Offset = sampleEntry.Offset }, @@ -2290,11 +2310,15 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX func randomContents(n int) string { b := make([]rune, n) for i := range b { +<<<<<<< HEAD bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) if err != nil { panic(err) } b[i] = runes[int(bi.Int64())] +======= + b[i] = runes[rand.Intn(len(runes))] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return string(b) } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go index f6a7ea8a58..dae389f98d 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -154,6 +154,7 @@ var supportedAlgorithms = map[string]bool{ EdDSA: true, } +<<<<<<< HEAD // ProviderConfig allows direct creation of a [Provider] from metadata // configuration. This is intended for interop with providers that don't support // discovery, or host the JSON discovery document at an off-spec path. @@ -178,10 +179,15 @@ var supportedAlgorithms = map[string]bool{ // For providers that implement discovery, use [NewProvider] instead. // // See: https://openid.net/specs/openid-connect-discovery-1_0.html +======= +// ProviderConfig allows creating providers when discovery isn't supported. It's +// generally easier to use NewProvider directly. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ProviderConfig struct { // IssuerURL is the identity of the provider, and the string it uses to sign // ID tokens with. For example "https://accounts.google.com". This value MUST // match ID tokens exactly. +<<<<<<< HEAD IssuerURL string `json:"issuer"` // AuthURL is the endpoint used by the provider to support the OAuth 2.0 // authorization endpoint. @@ -192,27 +198,54 @@ type ProviderConfig struct { // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0 // device authorization endpoint. DeviceAuthURL string `json:"device_authorization_endpoint"` +======= + IssuerURL string + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string + // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0 + // device authorization endpoint. + DeviceAuthURL string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // UserInfoURL is the endpoint used by the provider to support the OpenID // Connect UserInfo flow. // // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo +<<<<<<< HEAD UserInfoURL string `json:"userinfo_endpoint"` // JWKSURL is the endpoint used by the provider to advertise public keys to // verify issued ID tokens. This endpoint is polled as new keys are made // available. JWKSURL string `json:"jwks_uri"` +======= + UserInfoURL string + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign // ID tokens. If not provided, this defaults to the algorithms advertised by // the JWK endpoint, then the set of algorithms supported by this package. +<<<<<<< HEAD Algorithms []string `json:"id_token_signing_alg_values_supported"` +======= + Algorithms []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewProvider initializes a provider from a set of endpoints, rather than // through discovery. +<<<<<<< HEAD // // The provided context is only used for [http.Client] configuration through // [ClientContext], not cancelation. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { return &Provider{ issuer: p.IssuerURL, @@ -227,6 +260,7 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { } // NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. +<<<<<<< HEAD // The issuer is the URL identifier for the service. For example: "https://accounts.google.com" // or "https://login.salesforce.com". // @@ -235,6 +269,11 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { // should use [ProviderConfig] instead. // // See: https://openid.net/specs/openid-connect-discovery-1_0.html +======= +// +// The issuer is the URL identifier for the service. For example: "https://accounts.google.com" +// or "https://login.salesforce.com". +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewProvider(ctx context.Context, issuer string) (*Provider, error) { wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" req, err := http.NewRequest("GET", wellKnown, nil) diff --git a/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/vendor/github.com/curioswitch/go-reassign/.golangci.yml index fdf0bb2f22..0354537103 100644 --- a/vendor/github.com/curioswitch/go-reassign/.golangci.yml +++ b/vendor/github.com/curioswitch/go-reassign/.golangci.yml @@ -5,12 +5,23 @@ linters: - bodyclose - decorder - durationcheck +<<<<<<< HEAD - err113 - errchkjson - errname - errorlint - exhaustive - gocritic +======= + - errchkjson + - errname + - errorlint + - execinquery + - exhaustive + - exportloopref + - gocritic + - goerr113 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - gofmt - goimports - goprintffuncname @@ -18,6 +29,10 @@ linters: - importas - misspell - nolintlint +<<<<<<< HEAD +======= + - nosnakecase +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - prealloc - predeclared - promlinter diff --git a/vendor/github.com/curioswitch/go-reassign/README.md b/vendor/github.com/curioswitch/go-reassign/README.md index 190756f928..6e01aea4fd 100644 --- a/vendor/github.com/curioswitch/go-reassign/README.md +++ b/vendor/github.com/curioswitch/go-reassign/README.md @@ -47,8 +47,15 @@ Package variable reassignment is generally confusing, though, and we recommend a The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is recommended if it works with your code. +<<<<<<< HEAD ## Development [mage](https://magefile.org/) is used for development. Run `go run mage.go -l` to see available targets. For example, to run checks before sending a PR, run `go run mage.go check`. +======= +## Limitations + +If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing +can be confusing, so it's recommended to rename the variable. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go index c2a29c5299..21b22579a6 100644 --- a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go +++ b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go @@ -48,11 +48,19 @@ func run(pass *analysis.Pass) (interface{}, error) { func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) { switch x := expr.(type) { case *ast.SelectorExpr: +<<<<<<< HEAD +======= + if !checkRE.MatchString(x.Sel.Name) { + return + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) selectIdent, ok := x.X.(*ast.Ident) if !ok { return } +<<<<<<< HEAD var pkgPath string if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { pkg, ok := selectObj.(*types.PkgName) @@ -77,6 +85,16 @@ func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, if matches { pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) } +======= + if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { + if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg { + return + } + } + + pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *ast.Ident: use, ok := pass.TypesInfo.Uses[x].(*types.Var) if !ok { diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index 9540628150..578051b86d 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -25,6 +25,7 @@ func NewFileStore(file store) Store { return &fileStore{file: file} } +<<<<<<< HEAD // Erase removes the given credentials from the file store.This function is // idempotent and does not update the file if credentials did not change. func (c *fileStore) Erase(serverAddress string) error { @@ -32,6 +33,10 @@ func (c *fileStore) Erase(serverAddress string) error { // nothing to do; no credentials found for the given serverAddress return nil } +======= +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) delete(c.file.GetAuthConfigs(), serverAddress) return c.file.Save() } @@ -57,6 +62,7 @@ func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { return c.file.GetAuthConfigs(), nil } +<<<<<<< HEAD // Store saves the given credentials in the file store. This function is // idempotent and does not update the file if credentials did not change. func (c *fileStore) Store(authConfig types.AuthConfig) error { @@ -65,6 +71,11 @@ func (c *fileStore) Store(authConfig types.AuthConfig) error { // Credentials didn't change, so skip updating the configuration file. return nil } +======= +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + authConfigs := c.file.GetAuthConfigs() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) authConfigs[authConfig.ServerAddress] = authConfig return c.file.Save() } diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index 7ca5ab7222..7d0c70584a 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -16,9 +16,17 @@ func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } +<<<<<<< HEAD if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } +======= + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -34,10 +42,20 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { out, err := cmd.Output() if err != nil { +<<<<<<< HEAD if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) +======= + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil @@ -50,6 +68,7 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error out, err := cmd.Output() if err != nil { +<<<<<<< HEAD if credentials.IsErrCredentialsNotFoundMessage(string(out)) { return nil, credentials.NewErrCredentialsNotFound() } @@ -59,6 +78,19 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error } return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) +======= + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } resp := &credentials.Credentials{ diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index 2283d5a44c..f710703480 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,9 +1,13 @@ package credentials +<<<<<<< HEAD import ( "errors" "strings" ) +======= +import "errors" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns @@ -50,7 +54,11 @@ func IsErrCredentialsNotFound(err error) bool { // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { +<<<<<<< HEAD return strings.TrimSpace(err) == errCredentialsNotFoundMessage +======= + return err == errCredentialsNotFoundMessage +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // errCredentialsMissingServerURL represents an error raised @@ -107,7 +115,11 @@ func IsCredentialsMissingServerURL(err error) bool { // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { +<<<<<<< HEAD return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage +======= + return err == errCredentialsMissingServerURLMessage +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // IsCredentialsMissingUsername returns true if the error @@ -120,5 +132,9 @@ func IsCredentialsMissingUsername(err error) bool { // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { +<<<<<<< HEAD return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage +======= + return err == errCredentialsMissingUsernameMessage +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/edwarnicke/gitoid/.gitignore b/vendor/github.com/edwarnicke/gitoid/.gitignore new file mode 100644 index 0000000000..f3a1246a32 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/.gitignore @@ -0,0 +1,24 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# Goland files +.idea/ diff --git a/vendor/github.com/edwarnicke/gitoid/LICENSE b/vendor/github.com/edwarnicke/gitoid/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/edwarnicke/gitoid/README.md b/vendor/github.com/edwarnicke/gitoid/README.md new file mode 100644 index 0000000000..a4e8dae360 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/README.md @@ -0,0 +1,111 @@ +gitoid provides a simple library to compute gitoids (git object ids) + +## Creating GitOIDs + +### Default Usage +By default it produces gitoids for git object type blob using sha1: + +```go +var reader os.Reader +gitoidHash, err := gitoid.New(reader) +fmt.Println(gitoidHash) +// Output: 261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha1:261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 +``` + +### GitOid from string or []byte + +It's simple to compute the gitoid from a string or []byte by using bytes.NewBuffer: + +```go +input := []byte("example") +gitoidHash, _ := gitoid.New(bytes.NewBuffer(input)) +fmt.Println(gitoidHash) +// Output: 96236f8158b12701d5e75c14fb876c4a0f31b963 +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha1:96236f8158b12701d5e75c14fb876c4a0f31b963 +``` + +### GitOID from URIs + +GitOIDs can be represented as a [gitoid uri](https://www.iana.org/assignments/uri-schemes/prov/gitoid). + +```go +gitoidHash, _ := gitoid.FromURI("gitoid:blob:sha1:96236f8158b12701d5e75c14fb876c4a0f31b96") +fmt.Println(gitoidHash) +// Output: 96236f8158b12701d5e75c14fb876c4a0f31b963 +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha1:96236f8158b12701d5e75c14fb876c4a0f31b963 +``` + +## Variations on GitOIDs + +### SHA256 gitoids + +Git defaults to computing gitoids with sha1. Git also supports sha256 gitoids. Sha256 gitoids are supported using +an Option: + +```go +var reader os.Reader +gitoidHash, err := gitoid.New(reader, gitoid.WithSha256()) +fmt.Println(gitoidHash) +// Output: ed43975fbdc3084195eb94723b5f6df44eeeed1cdda7db0c7121edf5d84569ab +fmt.Println(gitoidHash.URI()) +// Output: gitoid:blob:sha256:ed43975fbdc3084195eb94723b5f6df44eeeed1cdda7db0c7121edf5d84569ab +``` + +### Other git object types + +git has four object types: blob, tree, commit, tag. By default gitoid using object type blob. +You may optionally specify another object type using an Option: + +```go +var reader os.Reader +gitoidHash, err := gitoid.New(reader, gitoid.WithGitObjectType(gitoid.COMMIT)) +``` + +### Assert ContentLength + +git object ids consist of hash over a header followed by the file contents. The header contains the length of the file +contents. By default, gitoid simply copies the reader into a buffer to establish its contentLength to compute the header. + +If you wish to assert the contentLength yourself, you may do so with an Option: + +```go +var reader os.Reader +var contentLength int64 +gitoidHash, _ := gitoid.New(reader, gitoid.WithContentLength(contentLength)) +fmt.Println(gitoidHash) +// Output: 261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 +``` + +gitoid will read the first contentLength bytes from the provided reader. If the reader is unable to provide +contentLength bytes a wrapper error around io.ErrUnexpectedEOF will be returned from gitoid.New + +## Using GitOIDs + +### Match contents to a GitOID + +```go +var reader io.Reader +var gitoidHash *gitoid.GitOID +if gitoidHash.Match(reader) { + fmt.Println("matched") +} +``` + +### Find files that match GitOID + +```go +var path1 fs.FS = os.DirFS("./relative/path") +var path2 fs.FS = os.DirFS("/absolute/path") +var gitoidHash *gitoid.GitOID + +// Find a file in path1 and path2 that matches gitoidHash +file,_ := gitoidHash.Find(path1, path2) + +// Find all files in path1 and path2 that matches gitoidHash +files, := gitoidHash.FindAll(path1, path2) +``` + diff --git a/vendor/github.com/edwarnicke/gitoid/gitoid.go b/vendor/github.com/edwarnicke/gitoid/gitoid.go new file mode 100644 index 0000000000..12d5168b46 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/gitoid.go @@ -0,0 +1,215 @@ +// Copyright (c) 2022 Cisco and/or its affiliates. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitoid + +import ( + "bytes" + "crypto/sha1" // #nosec G505 + "encoding/hex" + "errors" + "fmt" + "io" + "io/fs" + "strings" +) + +// GitObjectType type of git object - current values are "blob", "commit", "tag", "tree". +type GitObjectType string + +const ( + BLOB GitObjectType = "blob" + COMMIT GitObjectType = "commit" + TAG GitObjectType = "tag" + TREE GitObjectType = "tree" +) + +var ErrMayNotBeNil = errors.New("may not be nil") +var ErrInvalidGitOIDURI = errors.New("invalid uri in gitoid.FromURI") + +type GitOID struct { + gitObjectType GitObjectType + hashName string + hashValue []byte +} + +// New - create a new GitOID +// by default git object type is "blob" and hash is sha1 +func New(reader io.Reader, opts ...Option) (*GitOID, error) { + if reader == nil { + return nil, fmt.Errorf("reader in gitoid.New: %w", ErrMayNotBeNil) + } + + o := &option{ + gitObjectType: BLOB, + /* #nosec G401 */ + h: sha1.New(), + hashName: "sha1", + contentLength: 0, + } + + for _, opt := range opts { + opt(o) + } + + // If there is no declared o.contentLength, copy the entire reader into a buffer so we can compute + // the contentLength + if o.contentLength == 0 { + buf := bytes.NewBuffer(nil) + + contentLength, err := io.Copy(buf, reader) + if err != nil { + return nil, fmt.Errorf("error copying reader to buffer in gitoid.New: %w", err) + } + + reader = buf + o.contentLength = contentLength + } + + // Write the git object header + o.h.Write(Header(o.gitObjectType, o.contentLength)) + + // Copy the reader to the hash + n, err := io.Copy(o.h, io.LimitReader(reader, o.contentLength)) + if err != nil { + return nil, fmt.Errorf("error copying reader to hash.Hash.Writer in gitoid.New: %w", err) + } + + if n < o.contentLength { + return nil, fmt.Errorf("expected contentLength (%d) is less than actual contentLength (%d) in gitoid.New: %w", o.contentLength, n, io.ErrUnexpectedEOF) + } + + return &GitOID{ + gitObjectType: o.gitObjectType, + hashName: o.hashName, + hashValue: o.h.Sum(nil), + }, nil +} + +// Header - returns the git object header from the gitObjectType and contentLength. +func Header(gitObjectType GitObjectType, contentLength int64) []byte { + return []byte(fmt.Sprintf("%s %d\000", gitObjectType, contentLength)) +} + +// String - returns the gitoid in lowercase hex. +func (g *GitOID) String() string { + return fmt.Sprintf("%x", g.hashValue) +} + +// URI - returns the gitoid as a URI (https://www.iana.org/assignments/uri-schemes/prov/gitoid) +func (g *GitOID) URI() string { + return fmt.Sprintf("gitoid:%s:%s:%s", g.gitObjectType, g.hashName, g) +} + +func (g *GitOID) Bytes() []byte { + if g == nil { + return nil + } + + return g.hashValue +} + +// Equal - returns true of g == x. +func (g *GitOID) Equal(x *GitOID) bool { + if g == x { + return true + } + + if g == nil || x == nil || g.hashName != x.hashName { + return false + } + + if len(g.Bytes()) != len(x.Bytes()) { + return false + } + + for i, v := range g.Bytes() { + if x.Bytes()[i] != v { + return false + } + } + return true +} + +// FromURI - returns a *GitOID from a gitoid uri string - see https://www.iana.org/assignments/uri-schemes/prov/gitoid +func FromURI(uri string) (*GitOID, error) { + parts := strings.Split(uri, ":") + if len(parts) != 4 || parts[0] != "gitoid" { + return nil, fmt.Errorf("%w: %q in gitoid.FromURI", ErrInvalidGitOIDURI, uri) + } + hashValue, err := hex.DecodeString(parts[3]) + if err != nil { + return nil, fmt.Errorf("error decoding hash value (%s) in gitoid.FromURI: %w", parts[3], err) + } + return &GitOID{ + gitObjectType: GitObjectType(parts[1]), + hashName: parts[2], + hashValue: hashValue, + }, nil +} + +// Match - returns true if contents of reader generates a GitOID equal to g. +func (g *GitOID) Match(reader io.Reader) bool { + g2, err := New(reader, WithGitObjectType(g.gitObjectType)) + if err != nil { + return false + } + return g.Equal(g2) +} + +// Find - return the first fs.File in paths that Matches the *GitOID g. +func (g *GitOID) Find(paths ...fs.FS) fs.File { + foundFiles := g.findN(1, paths...) + if len(foundFiles) != 1 { + return nil + } + return foundFiles[0] +} + +// FindAll - return all fs.Files in paths that Matches the *GitOID g. +func (g *GitOID) FindAll(paths ...fs.FS) []fs.File { + return g.findN(0, paths...) +} + +func (g *GitOID) findN(n int, paths ...fs.FS) []fs.File { + var foundFiles []fs.File + for _, fsys := range paths { + _ = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if d == nil || d.IsDir() || err != nil { + //lint:ignore nilerr - returning non-nil error will stop the walk + return nil + } + file, err := fsys.Open(path) + defer func() { _ = file.Close() }() + if err != nil { + //lint:ignore nilerr - returning non-nil error will stop the walk + return nil + } + if !g.Match(file) { + return nil + } + foundFile, err := fsys.Open(path) + if err == nil { + foundFiles = append(foundFiles, foundFile) + } + if n > 0 && len(foundFiles) == n { + return io.EOF + } + return nil + }) + } + return foundFiles +} diff --git a/vendor/github.com/edwarnicke/gitoid/options.go b/vendor/github.com/edwarnicke/gitoid/options.go new file mode 100644 index 0000000000..f104e198b5 --- /dev/null +++ b/vendor/github.com/edwarnicke/gitoid/options.go @@ -0,0 +1,56 @@ +// Copyright (c) 2022 Cisco and/or its affiliates. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitoid + +import ( + "crypto/sha256" + "hash" +) + +type option struct { + gitObjectType GitObjectType + h hash.Hash + hashName string + contentLength int64 +} + +// Option - option for GitOID creation. +type Option func(o *option) + +// WithSha256 - use sha256 for computing gitoids instead of the default sha1. +func WithSha256() Option { + return func(o *option) { + o.hashName = "sha256" + o.h = sha256.New() + } +} + +// WithGitObjectType - set the GitOobjectType to a value different than the default gitoid.BLOB type. +func WithGitObjectType(gitObjectType GitObjectType) Option { + return func(o *option) { + o.gitObjectType = gitObjectType + } +} + +// WithContentLength - allows the assertion of a contentLength to be read from the provided reader +// only the first contentLength of data will be read from the reader +// if contentLength bytes are unavailable from the reader, an error will be returned. +func WithContentLength(contentLength int64) Option { + return func(o *option) { + o.contentLength = contentLength + } +} diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go index b82b06506a..8ae9c536d0 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go @@ -672,7 +672,11 @@ func (x *ClusterCollection) GetEntries() *v3.CollectionEntry { } // Configuration for a single upstream cluster. +<<<<<<< HEAD // [#next-free-field: 59] +======= +// [#next-free-field: 58] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Cluster struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -859,6 +863,7 @@ type Cluster struct { // and :ref:`LOGICAL_DNS` // this setting is ignored. DnsRefreshRate *durationpb.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"` +<<<<<<< HEAD // DNS jitter can be optionally specified if the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`. @@ -869,6 +874,8 @@ type Cluster struct { // and :ref:`LOGICAL_DNS` // this setting is ignored. DnsJitter *durationpb.Duration `protobuf:"bytes,58,opt,name=dns_jitter,json=dnsJitter,proto3" json:"dns_jitter,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, @@ -1038,6 +1045,7 @@ type Cluster struct { // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] LrsServer *v32.ConfigSource `protobuf:"bytes,42,opt,name=lrs_server,json=lrsServer,proto3" json:"lrs_server,omitempty"` +<<<<<<< HEAD // A list of metric names from :ref:`ORCA load reports ` to propagate to LRS. // // If not specified, then ORCA load reports will not be propagated to LRS. @@ -1045,6 +1053,14 @@ type Cluster struct { // For map fields in the ORCA proto, the string will be of the form “.“. // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA // :ref:`named_metrics ` field. +======= + // [#not-implemented-hide:] + // A list of metric names from ORCA load reports to propagate to LRS. + // + // For map fields in the ORCA proto, the string will be of the form “.“. + // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA + // “named_metrics“ field. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // The special map key “*“ means to report all entries in the map (e.g., “named_metrics.*“ means to // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted @@ -1272,6 +1288,7 @@ func (x *Cluster) GetDnsRefreshRate() *durationpb.Duration { return nil } +<<<<<<< HEAD func (x *Cluster) GetDnsJitter() *durationpb.Duration { if x != nil { return x.DnsJitter @@ -1279,6 +1296,8 @@ func (x *Cluster) GetDnsJitter() *durationpb.Duration { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *Cluster) GetDnsFailureRefreshRate() *Cluster_RefreshRate { if x != nil { return x.DnsFailureRefreshRate @@ -3389,7 +3408,11 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, +<<<<<<< HEAD 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x90, 0x54, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, +======= + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd6, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, @@ -3503,6 +3526,7 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0e, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, +<<<<<<< HEAD 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x6e, 0x73, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, @@ -3839,10 +3863,328 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, +======= + 0x12, 0x65, 0x0a, 0x18, 0x64, 0x6e, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, + 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x52, 0x15, 0x64, 0x6e, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x44, 0x6e, 0x73, 0x54, 0x74, 0x6c, 0x12, + 0x66, 0x0a, 0x11, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x66, 0x61, + 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x44, 0x6e, 0x73, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x92, + 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x64, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x41, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, + 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x73, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, + 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x64, + 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x37, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x44, 0x6e, 0x73, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, + 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x77, 0x61, 0x69, 0x74, 0x46, + 0x6f, 0x72, 0x57, 0x61, 0x72, 0x6d, 0x4f, 0x6e, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x56, 0x0a, 0x11, + 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, + 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, + 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x6c, 0x62, 0x5f, 0x73, + 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0e, 0x6c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x10, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x6d, 0x61, 0x67, 0x6c, 0x65, + 0x76, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x34, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x16, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, + 0x5f, 0x64, 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x13, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x25, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x14, 0x6c, 0x65, 0x61, + 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x68, 0x0a, 0x15, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, + 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x12, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, + 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x75, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, + 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1b, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x55, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x23, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x4f, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4f, 0x6e, 0x48, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x28, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x72, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, + 0x72, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x72, 0x73, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x39, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6c, + 0x72, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, + 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x30, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, + 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x32, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x29, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x70, 0x65, 0x72, + 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x25, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x65, 0x72, 0x44, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xe6, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, + 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, + 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x98, 0x01, 0x0a, + 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, + 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x10, 0x45, 0x64, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0a, + 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, + 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, + 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x6b, + 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x61, + 0x77, 0x61, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x41, 0x77, 0x61, 0x72, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, 0x6e, + 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xda, + 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x48, + 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, 0x0a, + 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, 0x46, + 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, + 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, + 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, 0x3b, + 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, + 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, 0x4c, + 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, + 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, + 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, 0x1e, + 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, 0x4c, + 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, + 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, + 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, + 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, +<<<<<<< HEAD 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, @@ -4148,6 +4490,331 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, + 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, + 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, + 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, + 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, + 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, + 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, + 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, + 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, + 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, + 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xbf, 0x02, 0x0a, 0x13, 0x4f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, + 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, + 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, + 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, + 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x50, + 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, 0x0a, + 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, + 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, + 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, + 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x75, + 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, 0x6f, + 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, 0x12, + 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, + 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x8a, + 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, + 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, + 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, + 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, + 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, + 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, + 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, + 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, + 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, + 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x27, + 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, 0x1d, + 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x65, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, + 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, + 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, 0x0a, + 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, 0x53, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, + 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, 0xa4, + 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x52, + 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, + 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, + 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, + 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, + 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, 0x04, + 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, + 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, + 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, + 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x00, + 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, + 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, 0x9a, + 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, 0x52, + 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbb, 0x05, 0x0a, + 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x63, + 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, + 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x73, + 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x15, 0x68, 0x61, 0x70, + 0x70, 0x79, 0x5f, 0x65, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x61, 0x70, + 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x13, 0x68, 0x61, 0x70, 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x89, 0x02, 0x0a, 0x13, 0x48, 0x61, 0x70, 0x70, 0x79, 0x45, + 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x8d, 0x01, + 0x0a, 0x1c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, + 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, + 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, + 0x1a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x66, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x17, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x38, 0x0a, 0x19, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0b, + 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, + 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x36, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, + 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x54, + 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x65, 0x72, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x89, 0x01, + 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, + 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -4248,6 +4915,7 @@ var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ 43, // 15: envoy.config.cluster.v3.Cluster.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions 27, // 16: envoy.config.cluster.v3.Cluster.typed_extension_protocol_options:type_name -> envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry 35, // 17: envoy.config.cluster.v3.Cluster.dns_refresh_rate:type_name -> google.protobuf.Duration +<<<<<<< HEAD 35, // 18: envoy.config.cluster.v3.Cluster.dns_jitter:type_name -> google.protobuf.Duration 25, // 19: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate 2, // 20: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily @@ -4322,6 +4990,81 @@ var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ 87, // [87:87] is the sub-list for extension type_name 87, // [87:87] is the sub-list for extension extendee 0, // [0:87] is the sub-list for field type_name +======= + 25, // 18: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate + 2, // 19: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily + 44, // 20: envoy.config.cluster.v3.Cluster.dns_resolvers:type_name -> envoy.config.core.v3.Address + 45, // 21: envoy.config.cluster.v3.Cluster.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig + 46, // 22: envoy.config.cluster.v3.Cluster.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 47, // 23: envoy.config.cluster.v3.Cluster.wait_for_warm_on_init:type_name -> google.protobuf.BoolValue + 48, // 24: envoy.config.cluster.v3.Cluster.outlier_detection:type_name -> envoy.config.cluster.v3.OutlierDetection + 35, // 25: envoy.config.cluster.v3.Cluster.cleanup_interval:type_name -> google.protobuf.Duration + 49, // 26: envoy.config.cluster.v3.Cluster.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig + 17, // 27: envoy.config.cluster.v3.Cluster.lb_subset_config:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig + 21, // 28: envoy.config.cluster.v3.Cluster.ring_hash_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig + 22, // 29: envoy.config.cluster.v3.Cluster.maglev_lb_config:type_name -> envoy.config.cluster.v3.Cluster.MaglevLbConfig + 23, // 30: envoy.config.cluster.v3.Cluster.original_dst_lb_config:type_name -> envoy.config.cluster.v3.Cluster.OriginalDstLbConfig + 20, // 31: envoy.config.cluster.v3.Cluster.least_request_lb_config:type_name -> envoy.config.cluster.v3.Cluster.LeastRequestLbConfig + 19, // 32: envoy.config.cluster.v3.Cluster.round_robin_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RoundRobinLbConfig + 24, // 33: envoy.config.cluster.v3.Cluster.common_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig + 50, // 34: envoy.config.cluster.v3.Cluster.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 51, // 35: envoy.config.cluster.v3.Cluster.metadata:type_name -> envoy.config.core.v3.Metadata + 3, // 36: envoy.config.cluster.v3.Cluster.protocol_selection:type_name -> envoy.config.cluster.v3.Cluster.ClusterProtocolSelection + 12, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions + 52, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter + 11, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy + 53, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource + 46, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats + 26, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy + 32, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy + 54, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive + 33, // 46: envoy.config.cluster.v3.UpstreamConnectionOptions.happy_eyeballs_config:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig + 55, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct + 50, // 48: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket + 56, // 49: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any + 53, // 50: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource + 4, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy + 55, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct + 28, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector + 5, // 54: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy + 35, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration + 57, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble + 58, // 57: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent + 18, // 58: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 36, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.choice_count:type_name -> google.protobuf.UInt32Value + 57, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble + 18, // 61: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig + 59, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value + 7, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction + 59, // 64: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value + 59, // 65: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value + 36, // 66: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value + 60, // 67: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey + 58, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent + 29, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig + 30, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig + 35, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration + 31, // 72: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig + 61, // 73: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet + 35, // 74: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration + 35, // 75: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration + 62, // 76: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 62, // 77: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue + 56, // 78: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any + 6, // 79: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy + 58, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent + 59, // 81: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value + 36, // 82: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value + 46, // 83: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 8, // 84: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_version:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion + 36, // 85: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_count:type_name -> google.protobuf.UInt32Value + 86, // [86:86] is the sub-list for method output_type + 86, // [86:86] is the sub-list for method input_type + 86, // [86:86] is the sub-list for extension type_name + 86, // [86:86] is the sub-list for extension extendee + 0, // [0:86] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_envoy_config_cluster_v3_cluster_proto_init() } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go index e651f5bd99..1cd718c9d3 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go @@ -648,6 +648,7 @@ func (m *Cluster) validate(all bool) error { } if all { +<<<<<<< HEAD switch v := interface{}(m.GetDnsJitter()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { @@ -677,6 +678,8 @@ func (m *Cluster) validate(all bool) error { } if all { +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch v := interface{}(m.GetDnsFailureRefreshRate()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go index c877196656..0a6399ee2a 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go @@ -1295,6 +1295,7 @@ func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } +<<<<<<< HEAD if m.DnsJitter != nil { size, err := (*durationpb.Duration)(m.DnsJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -1307,6 +1308,8 @@ func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { i-- dAtA[i] = 0xd2 } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(m.LrsReportEndpointMetrics) > 0 { for iNdEx := len(m.LrsReportEndpointMetrics) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.LrsReportEndpointMetrics[iNdEx]) @@ -3254,10 +3257,13 @@ func (m *Cluster) SizeVT() (n int) { n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) } } +<<<<<<< HEAD if m.DnsJitter != nil { l = (*durationpb.Duration)(m.DnsJitter).SizeVT() n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go index 87af0321f9..20ee6a6ed4 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go @@ -197,7 +197,11 @@ func (x *QuicKeepAliveSettings) GetInitialInterval() *durationpb.Duration { } // QUIC protocol options which apply to both downstream and upstream connections. +<<<<<<< HEAD // [#next-free-field: 10] +======= +// [#next-free-field: 9] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -243,9 +247,12 @@ type QuicProtocolOptions struct { // For internal corporate network, a long timeout is often fine. // But for client facing network, 30s is usually a good choice. IdleNetworkTimeout *durationpb.Duration `protobuf:"bytes,8,opt,name=idle_network_timeout,json=idleNetworkTimeout,proto3" json:"idle_network_timeout,omitempty"` +<<<<<<< HEAD // Maximum packet length for QUIC connections. It refers to the largest size of a QUIC packet that can be transmitted over the connection. // If not specified, one of the `default values in QUICHE `_ is used. MaxPacketLength *wrapperspb.UInt64Value `protobuf:"bytes,9,opt,name=max_packet_length,json=maxPacketLength,proto3" json:"max_packet_length,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QuicProtocolOptions) Reset() { @@ -336,6 +343,7 @@ func (x *QuicProtocolOptions) GetIdleNetworkTimeout() *durationpb.Duration { return nil } +<<<<<<< HEAD func (x *QuicProtocolOptions) GetMaxPacketLength() *wrapperspb.UInt64Value { if x != nil { return x.MaxPacketLength @@ -343,6 +351,8 @@ func (x *QuicProtocolOptions) GetMaxPacketLength() *wrapperspb.UInt64Value { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type UpstreamHttpProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -535,7 +545,11 @@ func (x *AlternateProtocolsCacheOptions) GetCanonicalSuffixes() []string { return nil } +<<<<<<< HEAD // [#next-free-field: 8] +======= +// [#next-free-field: 7] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type HttpProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -565,6 +579,7 @@ type HttpProtocolOptions struct { // if there are no active streams. See :ref:`drain_timeout // `. MaxConnectionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` +<<<<<<< HEAD // The maximum number of headers (request headers if configured on HttpConnectionManager, // response headers when configured on a cluster). // If unconfigured, the default maximum number of headers allowed is 100. @@ -588,6 +603,12 @@ type HttpProtocolOptions struct { // HTTP/2 (when using nghttp2) limits a single header to around 100kb. // HTTP/3 limits a single header to around 1024kb. MaxResponseHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_response_headers_kb,json=maxResponseHeadersKb,proto3" json:"max_response_headers_kb,omitempty"` +======= + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + MaxHeadersCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. MaxStreamDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` @@ -657,6 +678,7 @@ func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrapperspb.UInt32Value { return nil } +<<<<<<< HEAD func (x *HttpProtocolOptions) GetMaxResponseHeadersKb() *wrapperspb.UInt32Value { if x != nil { return x.MaxResponseHeadersKb @@ -664,6 +686,8 @@ func (x *HttpProtocolOptions) GetMaxResponseHeadersKb() *wrapperspb.UInt32Value return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *HttpProtocolOptions) GetMaxStreamDuration() *durationpb.Duration { if x != nil { return x.MaxStreamDuration @@ -1767,7 +1791,11 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, +<<<<<<< HEAD 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xbb, 0x06, 0x0a, +======= + 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xf1, 0x05, 0x0a, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, @@ -1815,6 +1843,7 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0xaa, 0x01, 0x09, 0x22, 0x03, 0x08, 0xd8, 0x04, 0x32, 0x02, 0x08, 0x01, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, +<<<<<<< HEAD 0x12, 0x48, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, @@ -2178,6 +2207,360 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, + 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x18, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x72, 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xd0, + 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, + 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, + 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, + 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, + 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, + 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, + 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, + 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, + 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, + 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, + 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, + 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, + 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, + 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, + 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, + 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, + 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, + 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, + 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, + 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xd0, 0x0e, 0x0a, + 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, + 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, + 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, + 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, + 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, + 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, + 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, + 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, + 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, + 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, + 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, + 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, + 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, + 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, + 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, + 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, + 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, + 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, + 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, + 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, + 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, + 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x1a, 0xe2, 0x01, 0x0a, 0x11, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x12, 0x4e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x2a, 0x06, 0x18, + 0xff, 0xff, 0x03, 0x28, 0x00, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, + 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, + 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, + 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, + 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, + 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, 0x74, 0x74, + 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, 0x4f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x10, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, + 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, + 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -2214,10 +2597,16 @@ var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{ (*Http2ProtocolOptions_SettingsParameter)(nil), // 16: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter (*durationpb.Duration)(nil), // 17: google.protobuf.Duration (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value +<<<<<<< HEAD (*wrapperspb.UInt64Value)(nil), // 19: google.protobuf.UInt64Value (*TypedExtensionConfig)(nil), // 20: envoy.config.core.v3.TypedExtensionConfig (*wrapperspb.BoolValue)(nil), // 21: google.protobuf.BoolValue (*v3.Percent)(nil), // 22: envoy.type.v3.Percent +======= + (*TypedExtensionConfig)(nil), // 19: envoy.config.core.v3.TypedExtensionConfig + (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue + (*v3.Percent)(nil), // 21: envoy.type.v3.Percent +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 17, // 0: envoy.config.core.v3.QuicKeepAliveSettings.max_interval:type_name -> google.protobuf.Duration @@ -2228,6 +2617,7 @@ var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 18, // 5: envoy.config.core.v3.QuicProtocolOptions.num_timeouts_to_trigger_port_migration:type_name -> google.protobuf.UInt32Value 2, // 6: envoy.config.core.v3.QuicProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.QuicKeepAliveSettings 17, // 7: envoy.config.core.v3.QuicProtocolOptions.idle_network_timeout:type_name -> google.protobuf.Duration +<<<<<<< HEAD 19, // 8: envoy.config.core.v3.QuicProtocolOptions.max_packet_length:type_name -> google.protobuf.UInt64Value 18, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value 20, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig @@ -2272,6 +2662,50 @@ var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 47, // [47:47] is the sub-list for extension type_name 47, // [47:47] is the sub-list for extension extendee 0, // [0:47] is the sub-list for field type_name +======= + 18, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value + 19, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig + 13, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry + 17, // 11: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration + 17, // 12: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration + 18, // 13: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value + 17, // 14: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration + 0, // 15: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction + 18, // 16: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value + 20, // 17: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue + 14, // 18: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat + 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 20, // 20: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue + 17, // 21: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration + 17, // 22: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration + 21, // 23: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent + 17, // 24: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration + 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value + 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value + 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value + 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value + 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value + 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value + 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value + 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value + 18, // 33: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value + 20, // 34: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 16, // 35: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter + 8, // 36: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings + 20, // 37: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue + 9, // 38: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions + 3, // 39: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions + 20, // 40: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue + 15, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords + 19, // 42: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig + 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value + 18, // 44: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value + 45, // [45:45] is the sub-list for method output_type + 45, // [45:45] is the sub-list for method input_type + 45, // [45:45] is the sub-list for extension type_name + 45, // [45:45] is the sub-list for extension extendee + 0, // [0:45] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_envoy_config_core_v3_protocol_proto_init() } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go index 1b7d8342dd..bb1f07eae9 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go @@ -448,6 +448,7 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } +<<<<<<< HEAD if all { switch v := interface{}(m.GetMaxPacketLength()).(type) { case interface{ ValidateAll() error }: @@ -477,6 +478,8 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } @@ -968,6 +971,7 @@ func (m *HttpProtocolOptions) validate(all bool) error { } +<<<<<<< HEAD if wrapper := m.GetMaxResponseHeadersKb(); wrapper != nil { if val := wrapper.GetValue(); val <= 0 || val > 8192 { @@ -983,6 +987,8 @@ func (m *HttpProtocolOptions) validate(all bool) error { } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if all { switch v := interface{}(m.GetMaxStreamDuration()).(type) { case interface{ ValidateAll() error }: diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go index 85f4d3e4eb..129807ef95 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go @@ -137,6 +137,7 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } +<<<<<<< HEAD if m.MaxPacketLength != nil { size, err := (*wrapperspb.UInt64Value)(m.MaxPacketLength).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -147,6 +148,8 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i-- dAtA[i] = 0x4a } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.IdleNetworkTimeout != nil { size, err := (*durationpb.Duration)(m.IdleNetworkTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -440,6 +443,7 @@ func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } +<<<<<<< HEAD if m.MaxResponseHeadersKb != nil { size, err := (*wrapperspb.UInt32Value)(m.MaxResponseHeadersKb).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -450,6 +454,8 @@ func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i-- dAtA[i] = 0x3a } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.MaxRequestsPerConnection != nil { size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -1345,10 +1351,13 @@ func (m *QuicProtocolOptions) SizeVT() (n int) { l = (*durationpb.Duration)(m.IdleNetworkTimeout).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } +<<<<<<< HEAD if m.MaxPacketLength != nil { l = (*wrapperspb.UInt64Value)(m.MaxPacketLength).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } @@ -1453,10 +1462,13 @@ func (m *HttpProtocolOptions) SizeVT() (n int) { l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } +<<<<<<< HEAD if m.MaxResponseHeadersKb != nil { l = (*wrapperspb.UInt32Value)(m.MaxResponseHeadersKb).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go index ea97b5987e..1760301f09 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go @@ -27,7 +27,11 @@ const ( ) // Configuration specific to the UDP QUIC listener. +<<<<<<< HEAD // [#next-free-field: 14] +======= +// [#next-free-field: 12] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -78,6 +82,7 @@ type QuicProtocolOptions struct { // If not specified, no debug visitor will be attached to connections. // [#extension-category: envoy.quic.connection_debug_visitor] ConnectionDebugVisitorConfig *v3.TypedExtensionConfig `protobuf:"bytes,11,opt,name=connection_debug_visitor_config,json=connectionDebugVisitorConfig,proto3" json:"connection_debug_visitor_config,omitempty"` +<<<<<<< HEAD // Configure a type of UDP cmsg to pass to listener filters via QuicReceivedPacket. // Both level and type must be specified for cmsg to be saved. // Cmsg may be truncated or omitted if expected size is not set. @@ -87,6 +92,8 @@ type QuicProtocolOptions struct { // QUIC layer by replying with an empty version negotiation packet to the // client. RejectNewConnections bool `protobuf:"varint,13,opt,name=reject_new_connections,json=rejectNewConnections,proto3" json:"reject_new_connections,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QuicProtocolOptions) Reset() { @@ -198,6 +205,7 @@ func (x *QuicProtocolOptions) GetConnectionDebugVisitorConfig() *v3.TypedExtensi return nil } +<<<<<<< HEAD func (x *QuicProtocolOptions) GetSaveCmsgConfig() []*v3.SocketCmsgHeaders { if x != nil { return x.SaveCmsgConfig @@ -212,6 +220,8 @@ func (x *QuicProtocolOptions) GetRejectNewConnections() bool { return false } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var File_envoy_config_listener_v3_quic_config_proto protoreflect.FileDescriptor var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ @@ -226,9 +236,12 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, +<<<<<<< HEAD 0x74, 0x6f, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6d, 0x73, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -240,8 +253,13 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, +<<<<<<< HEAD 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x0a, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, +======= + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, + 0x08, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, @@ -309,6 +327,7 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x56, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x43, +<<<<<<< HEAD 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x73, 0x61, 0x76, 0x65, 0x5f, 0x63, 0x6d, 0x73, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, @@ -332,6 +351,22 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, + 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, + 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -355,7 +390,10 @@ var file_envoy_config_listener_v3_quic_config_proto_goTypes = []interface{}{ (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value (*v3.TypedExtensionConfig)(nil), // 5: envoy.config.core.v3.TypedExtensionConfig (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue +<<<<<<< HEAD (*v3.SocketCmsgHeaders)(nil), // 7: envoy.config.core.v3.SocketCmsgHeaders +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ 1, // 0: envoy.config.listener.v3.QuicProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions @@ -369,12 +407,20 @@ var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig 6, // 9: envoy.config.listener.v3.QuicProtocolOptions.send_disable_active_migration:type_name -> google.protobuf.BoolValue 5, // 10: envoy.config.listener.v3.QuicProtocolOptions.connection_debug_visitor_config:type_name -> envoy.config.core.v3.TypedExtensionConfig +<<<<<<< HEAD 7, // 11: envoy.config.listener.v3.QuicProtocolOptions.save_cmsg_config:type_name -> envoy.config.core.v3.SocketCmsgHeaders 12, // [12:12] is the sub-list for method output_type 12, // [12:12] is the sub-list for method input_type 12, // [12:12] is the sub-list for extension type_name 12, // [12:12] is the sub-list for extension extendee 0, // [0:12] is the sub-list for field type_name +======= + 11, // [11:11] is the sub-list for method output_type + 11, // [11:11] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_envoy_config_listener_v3_quic_config_proto_init() } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go index efd3db9c46..cc25c3418a 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go @@ -363,6 +363,7 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } +<<<<<<< HEAD if len(m.GetSaveCmsgConfig()) > 1 { err := QuicProtocolOptionsValidationError{ field: "SaveCmsgConfig", @@ -410,6 +411,8 @@ func (m *QuicProtocolOptions) validate(all bool) error { // no validation rules for RejectNewConnections +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go index 7dba379882..bf1d4e0c99 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go @@ -51,6 +51,7 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } +<<<<<<< HEAD if m.RejectNewConnections { i-- if m.RejectNewConnections { @@ -85,6 +86,8 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er dAtA[i] = 0x62 } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.ConnectionDebugVisitorConfig != nil { if vtmsg, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { MarshalToSizedBufferVTStrict([]byte) (int, error) @@ -374,6 +377,7 @@ func (m *QuicProtocolOptions) SizeVT() (n int) { } n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } +<<<<<<< HEAD if len(m.SaveCmsgConfig) > 0 { for _, e := range m.SaveCmsgConfig { if size, ok := interface{}(e).(interface { @@ -389,6 +393,8 @@ func (m *QuicProtocolOptions) SizeVT() (n int) { if m.RejectNewConnections { n += 2 } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go index 300c39a128..90e524f450 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go @@ -8,6 +8,10 @@ package routev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" +<<<<<<< HEAD +======= + _ "github.com/cncf/xds/go/xds/annotations/v3" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v3 "github.com/cncf/xds/go/xds/type/matcher/v3" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -379,6 +383,10 @@ type VirtualHost struct { // The first route that matches will be used. // Only one of this and “matcher“ can be specified. Routes []*Route `protobuf:"bytes,3,rep,name=routes,proto3" json:"routes,omitempty"` +<<<<<<< HEAD +======= + // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The match tree to use when resolving route actions for incoming requests. Only one of this and “routes“ // can be specified. Matcher *v3.Matcher `protobuf:"bytes,21,opt,name=matcher,proto3" json:"matcher,omitempty"` @@ -6612,6 +6620,7 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, +<<<<<<< HEAD 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, @@ -8030,6 +8039,1426 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, + 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x0f, 0x0a, 0x0b, 0x56, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, + 0x08, 0x01, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x07, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, + 0x06, 0x02, 0x08, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x60, 0x0a, + 0x0b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x2e, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x54, 0x6c, 0x73, 0x12, + 0x50, 0x0a, 0x10, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x0f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, + 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, + 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, + 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, + 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, + 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, + 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x73, 0x0a, 0x17, + 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, + 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, + 0x1f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x3a, 0x0a, 0x12, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, + 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, + 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, + 0x6f, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x52, + 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0x64, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0x0a, 0x09, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0xaf, 0x0b, 0x0a, 0x05, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x05, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a, + 0x0f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x60, 0x0a, 0x15, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x13, + 0x6e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x3e, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x6d, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, + 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x67, + 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, + 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, + 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, + 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, + 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, + 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, + 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x1e, + 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, + 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, + 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, + 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1f, + 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x42, + 0x0d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf3, 0x0a, + 0x0a, 0x0f, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, + 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, + 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x4b, 0x65, 0x79, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x08, 0x0a, 0x0d, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x45, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, + 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, + 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, + 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, + 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, + 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, + 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x14, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x00, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x5d, 0x0a, + 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x37, 0x9a, 0xc5, + 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, + 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, + 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, + 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x72, 0x61, 0x6e, + 0x64, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x16, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x52, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xc5, 0x0a, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x18, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, + 0x65, 0x78, 0x12, 0x5b, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, + 0x4b, 0x0a, 0x15, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x15, + 0xfa, 0x42, 0x12, 0x72, 0x10, 0x32, 0x0e, 0x5e, 0x5b, 0x5e, 0x3f, 0x23, 0x5d, 0x2b, 0x5b, 0x5e, + 0x3f, 0x23, 0x2f, 0x5d, 0x24, 0x48, 0x00, 0x52, 0x13, 0x70, 0x61, 0x74, 0x68, 0x53, 0x65, 0x70, + 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x58, 0x0a, 0x11, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x73, + 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x63, 0x61, 0x73, 0x65, + 0x53, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, + 0x04, 0x67, 0x72, 0x70, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, + 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, 0x67, 0x72, 0x70, 0x63, 0x12, 0x59, 0x0a, 0x0b, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x53, 0x0a, 0x15, 0x47, 0x72, 0x70, 0x63, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc9, 0x01, + 0x0a, 0x16, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, + 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, + 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, 0x24, 0x9a, 0xc5, 0x88, + 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xa8, 0x06, 0x0a, 0x0a, + 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, 0x0a, 0x19, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x52, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, + 0x78, 0x70, 0x6f, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, + 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x57, + 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, + 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x5b, + 0x0a, 0x1c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x61, 0x0a, 0x1f, 0x66, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x1c, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x3a, 0x24, + 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa6, 0x2d, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, + 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, + 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, + 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, + 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0d, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, + 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, + 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x5a, 0x0a, 0x13, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, 0x68, + 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, + 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, + 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, + 0x48, 0x01, 0x52, 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, + 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, + 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, + 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, + 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, + 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, + 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0f, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, + 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, + 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, + 0x30, 0x18, 0x01, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, + 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcb, 0x03, 0x0a, 0x13, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, + 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, + 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, + 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x12, + 0x48, 0x0a, 0x21, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x64, 0x6f, + 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x61, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x75, 0x66, + 0x66, 0x69, 0x78, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, + 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0xd6, 0x0b, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, + 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, + 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, + 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, + 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x5f, 0x0a, 0x0f, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, + 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x1a, 0xfe, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x5d, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, + 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, + 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, + 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, + 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, + 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, + 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, + 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, + 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, + 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, + 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, + 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, + 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, + 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, + 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, + 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, + 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, + 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, + 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, + 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, + 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, + 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, + 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, + 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, + 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, + 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, + 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, + 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, + 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, + 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, + 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, + 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, + 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, + 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, + 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, + 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, + 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, + 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, + 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, + 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, + 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, + 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x68, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, + 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, + 0x01, 0x02, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, + 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, + 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, + 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, + 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, + 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, + 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, + 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, + 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, + 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, + 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, + 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, + 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, + 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, + 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, + 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, + 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, + 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, + 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, + 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xb5, 0x18, 0x0a, 0x06, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, + 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, + 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, + 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, + 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, + 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x53, + 0x0a, 0x12, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, + 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, + 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, + 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, + 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x1a, 0xbe, 0x01, 0x0a, 0x13, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x76, 0x34, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x6c, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x20, 0x52, 0x0f, 0x76, + 0x34, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, 0x4c, 0x65, 0x6e, 0x12, 0x53, + 0x0a, 0x12, 0x76, 0x36, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, + 0x80, 0x01, 0x52, 0x0f, 0x76, 0x36, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, + 0x4c, 0x65, 0x6e, 0x1a, 0x9e, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x3a, 0x35, 0x9a, + 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x1a, 0xb3, 0x02, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, + 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x3b, 0x9a, + 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xb8, 0x01, 0x0a, 0x0f, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, + 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x50, + 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, + 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, + 0x65, 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, + 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x66, 0x5f, + 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, + 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x26, 0x0a, 0x06, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x61, 0x0a, 0x10, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, + 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, + 0x01, 0x1a, 0xf2, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x66, + 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x63, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x12, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe6, 0x05, 0x0a, 0x0d, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, + 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, + 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x5c, 0x0a, 0x10, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, + 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, + 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3c, + 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, + 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, + 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x0a, + 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, + 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x74, 0x72, 0x65, 0x61, 0x74, 0x4d, 0x69, + 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1e, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, + 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x53, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, + 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x21, 0x0a, 0x1f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x86, 0x03, 0x0a, 0x16, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, 0x72, 0x65, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, + 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, + 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x18, 0x01, + 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x70, + 0x79, 0x22, 0x79, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x8b, 0x01, 0xba, + 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, + 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, + 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, + 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go index 02027795c3..fee3bad823 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go @@ -48,8 +48,14 @@ const ( // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. // Prior knowledge is allowed). HttpConnectionManager_HTTP2 HttpConnectionManager_CodecType = 2 +<<<<<<< HEAD // The connection manager will assume that the client is speaking HTTP/3. // This needs to be consistent with listener and transport socket config. +======= + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) HttpConnectionManager_HTTP3 HttpConnectionManager_CodecType = 3 ) @@ -393,6 +399,10 @@ type HttpConnectionManager struct { // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. Http2ProtocolOptions *v3.Http2ProtocolOptions `protobuf:"bytes,9,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. +<<<<<<< HEAD +======= + // [#not-implemented-hide:] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Http3ProtocolOptions *v3.Http3ProtocolOptions `protobuf:"bytes,44,opt,name=http3_protocol_options,json=http3ProtocolOptions,proto3" json:"http3_protocol_options,omitempty"` // An optional override that the connection manager will write to the server // header in responses. If not set, the default is “envoy“. @@ -407,6 +417,7 @@ type HttpConnectionManager struct { SchemeHeaderTransformation *v3.SchemeHeaderTransformation `protobuf:"bytes,48,opt,name=scheme_header_transformation,json=schemeHeaderTransformation,proto3" json:"scheme_header_transformation,omitempty"` // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. +<<<<<<< HEAD // The default value can be overridden by setting runtime key “envoy.reloadable_features.max_request_headers_size_kb“. // Requests that exceed this limit will receive a 431 response. // @@ -414,6 +425,9 @@ type HttpConnectionManager struct { // // HTTP/2 (when using nghttp2) limits a single header to around 100kb. // HTTP/3 limits a single header to around 1024kb. +======= + // Requests that exceed this limit will receive a 431 response. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MaxRequestHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,29,opt,name=max_request_headers_kb,json=maxRequestHeadersKb,proto3" json:"max_request_headers_kb,omitempty"` // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected @@ -585,6 +599,7 @@ type HttpConnectionManager struct { // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. +<<<<<<< HEAD // // .. warning:: // @@ -614,6 +629,8 @@ type HttpConnectionManager struct { // cidr_ranges: // address_prefix: ::1 // prefix_len: 128 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) InternalAddressConfig *HttpConnectionManager_InternalAddressConfig `protobuf:"bytes,25,opt,name=internal_address_config,json=internalAddressConfig,proto3" json:"internal_address_config,omitempty"` // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go index d21286cf88..8ef3ccbf93 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go @@ -37,12 +37,21 @@ const ( // regardless of result. Only failed queries count toward eps. A config // parameter error_utilization_penalty controls the penalty to adjust endpoint // weights using eps and qps. The weight of a given endpoint is computed as: +<<<<<<< HEAD // “qps / (utilization + eps/qps * error_utilization_penalty)“. // // See the :ref:`load balancing architecture // overview` for more information. // // [#next-free-field: 8] +======= +// +// qps / (utilization + eps/qps * error_utilization_penalty) +// +// See the :ref:`load balancing architecture overview` for more information. +// +// [#next-free-field: 7] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ClientSideWeightedRoundRobin struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -75,11 +84,14 @@ type ClientSideWeightedRoundRobin struct { // calculated as eps/qps. Configuration is rejected if this value is negative. // Default is 1.0. ErrorUtilizationPenalty *wrapperspb.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` +<<<<<<< HEAD // By default, endpoint weight is computed based on the :ref:`application_utilization ` field reported by the endpoint. // If that field is not set, then utilization will instead be computed by taking the max of the values of the metrics specified here. // For map fields in the ORCA proto, the string will be of the form “.“. For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA :ref:`named_metrics ` field. // If none of the specified metrics are present in the load report, then :ref:`cpu_utilization ` is used instead. MetricNamesForComputingUtilization []string `protobuf:"bytes,7,rep,name=metric_names_for_computing_utilization,json=metricNamesForComputingUtilization,proto3" json:"metric_names_for_computing_utilization,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientSideWeightedRoundRobin) Reset() { @@ -156,6 +168,7 @@ func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrapperspb. return nil } +<<<<<<< HEAD func (x *ClientSideWeightedRoundRobin) GetMetricNamesForComputingUtilization() []string { if x != nil { return x.MetricNamesForComputingUtilization @@ -163,6 +176,8 @@ func (x *ClientSideWeightedRoundRobin) GetMetricNamesForComputingUtilization() [ return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto protoreflect.FileDescriptor var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = []byte{ @@ -185,7 +200,11 @@ var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_rob 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, +<<<<<<< HEAD 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdb, 0x04, 0x0a, 0x1c, +======= + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x04, 0x0a, 0x1c, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x4f, 0x0a, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, @@ -218,6 +237,7 @@ var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_rob 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x0a, 0x05, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x17, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, +<<<<<<< HEAD 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x26, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, @@ -243,6 +263,28 @@ var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_rob 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x42, 0xa2, 0x02, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, + 0x0a, 0x5a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x21, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, + 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x96, 0x01, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, + 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, + 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go index f7e73831a5..cb42a3cf51 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go @@ -50,6 +50,7 @@ func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } +<<<<<<< HEAD if len(m.MetricNamesForComputingUtilization) > 0 { for iNdEx := len(m.MetricNamesForComputingUtilization) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MetricNamesForComputingUtilization[iNdEx]) @@ -59,6 +60,8 @@ func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) dAtA[i] = 0x3a } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.ErrorUtilizationPenalty != nil { size, err := (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -152,12 +155,15 @@ func (m *ClientSideWeightedRoundRobin) SizeVT() (n int) { l = (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } +<<<<<<< HEAD if len(m.MetricNamesForComputingUtilization) > 0 { for _, s := range m.MetricNamesForComputingUtilization { l = len(s) n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go index f9aa84130a..3021c194e2 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go @@ -705,7 +705,11 @@ type CertificateProviderPluginInstance struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields +<<<<<<< HEAD // Provider instance name. +======= + // Provider instance name. If not present, defaults to "default". +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Instance names should generally be defined not in terms of the underlying provider // implementation (e.g., "file_watcher") but rather in terms of the function of the @@ -1345,6 +1349,7 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, +<<<<<<< HEAD 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7c, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, @@ -1492,6 +1497,155 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x73, 0x0a, 0x21, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0xc6, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x08, 0x73, 0x61, 0x6e, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x48, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x61, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, + 0x00, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, + 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x22, 0x60, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, + 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, + 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, + 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, + 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x05, 0x22, 0xa9, 0x0d, 0x0a, 0x1c, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, + 0x43, 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, + 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x11, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, + 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, + 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, + 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, + 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, + 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, + 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, + 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, + 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, + 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, + 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x1a, 0x11, 0x0a, + 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, + 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, + 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, + 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, + 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, + 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, + 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, + 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go index 6e32f5af51..23a3be78da 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go @@ -826,6 +826,7 @@ func (m *CertificateProviderPluginInstance) validate(all bool) error { var errors []error +<<<<<<< HEAD if utf8.RuneCountInString(m.GetInstanceName()) < 1 { err := CertificateProviderPluginInstanceValidationError{ field: "InstanceName", @@ -836,6 +837,9 @@ func (m *CertificateProviderPluginInstance) validate(all bool) error { } errors = append(errors, err) } +======= + // no validation rules for InstanceName +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // no validation rules for CertificateName diff --git a/vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go b/vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go new file mode 100644 index 0000000000..ddad5c96d5 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go @@ -0,0 +1,147 @@ +/*- + * Copyright 2018 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cryptosigner implements an OpaqueSigner that wraps a "crypto".Signer +// +// https://godoc.org/crypto#Signer +package cryptosigner + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "encoding/asn1" + "io" + "math/big" + + "github.com/go-jose/go-jose/v3" +) + +// Opaque creates an OpaqueSigner from a "crypto".Signer +func Opaque(s crypto.Signer) jose.OpaqueSigner { + pk := &jose.JSONWebKey{ + Key: s.Public(), + } + return &cryptoSigner{signer: s, rand: rand.Reader, pk: pk} +} + +type cryptoSigner struct { + pk *jose.JSONWebKey + signer crypto.Signer + rand io.Reader +} + +func (s *cryptoSigner) Public() *jose.JSONWebKey { + return s.pk +} + +func (s *cryptoSigner) Algs() []jose.SignatureAlgorithm { + switch key := s.signer.Public().(type) { + case ed25519.PublicKey: + return []jose.SignatureAlgorithm{jose.EdDSA} + case *ecdsa.PublicKey: + switch key.Curve { + case elliptic.P256(): + return []jose.SignatureAlgorithm{jose.ES256} + case elliptic.P384(): + return []jose.SignatureAlgorithm{jose.ES384} + case elliptic.P521(): + return []jose.SignatureAlgorithm{jose.ES512} + default: + return nil + } + case *rsa.PublicKey: + return []jose.SignatureAlgorithm{jose.RS256, jose.RS384, jose.RS512, jose.PS256, jose.PS384, jose.PS512} + default: + return nil + } +} + +func (s *cryptoSigner) SignPayload(payload []byte, alg jose.SignatureAlgorithm) ([]byte, error) { + var hash crypto.Hash + switch alg { + case jose.EdDSA: + case jose.RS256, jose.PS256, jose.ES256: + hash = crypto.SHA256 + case jose.RS384, jose.PS384, jose.ES384: + hash = crypto.SHA384 + case jose.RS512, jose.PS512, jose.ES512: + hash = crypto.SHA512 + default: + return nil, jose.ErrUnsupportedAlgorithm + } + + var hashed []byte + if hash != crypto.Hash(0) { + hasher := hash.New() + if _, err := hasher.Write(payload); err != nil { + return nil, err + } + hashed = hasher.Sum(nil) + } + + var ( + out []byte + err error + ) + switch alg { + case jose.EdDSA: + out, err = s.signer.Sign(s.rand, payload, crypto.Hash(0)) + case jose.ES256, jose.ES384, jose.ES512: + var byteLen int + switch alg { + case jose.ES256: + byteLen = 32 + case jose.ES384: + byteLen = 48 + case jose.ES512: + byteLen = 66 + } + var b []byte + b, err = s.signer.Sign(s.rand, hashed, hash) + if err != nil { + return nil, err + } + + sig := struct { + R, S *big.Int + }{} + if _, err = asn1.Unmarshal(b, &sig); err != nil { + return nil, err + } + + rBytes := sig.R.Bytes() + out = make([]byte, byteLen) + copy(out[byteLen-len(rBytes):], rBytes) + + sBytes := sig.S.Bytes() + sBytesPadded := make([]byte, byteLen) + copy(sBytesPadded[byteLen-len(sBytes):], sBytes) + + out = append(out, sBytesPadded...) + case jose.RS256, jose.RS384, jose.RS512: + out, err = s.signer.Sign(s.rand, hashed, hash) + case jose.PS256, jose.PS384, jose.PS512: + out, err = s.signer.Sign(s.rand, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: hash, + }) + } + return out, err +} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go index 365a1d0477..1cf8bb5703 100644 --- a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go +++ b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go @@ -14,10 +14,14 @@ import ( ) var ( +<<<<<<< HEAD reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) reXMLComments = regexp.MustCompile(`(?s)()`) reSpaces = regexp.MustCompile(`(?s)>\s+<`) reNewlines = regexp.MustCompile(`\r*\n`) +======= + reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NL is the newline string used in XML output. NL = "\n" ) @@ -36,19 +40,33 @@ func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string if len(nestedTagsInComments) > 0 { nestedTagsInComment = nestedTagsInComments[0] } +<<<<<<< HEAD src := reSpaces.ReplaceAllString(xmls, "><") if nestedTagsInComment { src = reXMLComments.ReplaceAllStringFunc(src, func(m string) string { parts := reXMLComments.FindStringSubmatch(m) p2 := reNewlines.ReplaceAllString(parts[2], " ") +======= + reXmlComments := regexp.MustCompile(`(?s)()`) + src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + if nestedTagsInComment { + src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { + parts := reXmlComments.FindStringSubmatch(m) + p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return parts[1] + html.EscapeString(p2) + parts[3] }) } rf := replaceTag(prefix, indent) r := prefix + reg.ReplaceAllStringFunc(src, rf) if nestedTagsInComment { +<<<<<<< HEAD r = reXMLComments.ReplaceAllStringFunc(r, func(m string) string { parts := reXMLComments.FindStringSubmatch(m) +======= + r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { + parts := reXmlComments.FindStringSubmatch(m) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return parts[1] + html.UnescapeString(parts[2]) + parts[3] }) } diff --git a/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/vendor/github.com/golangci/gofmt/gofmt/golangci.go index 459e872199..054028f5e6 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/golangci.go +++ b/vendor/github.com/golangci/gofmt/gofmt/golangci.go @@ -14,11 +14,14 @@ import ( "github.com/golangci/gofmt/gofmt/internal/diff" ) +<<<<<<< HEAD type Options struct { NeedSimplify bool RewriteRules []RewriteRule } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var parserModeMu sync.RWMutex type RewriteRule struct { @@ -78,6 +81,7 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) return diff.Diff(oldName, src, newName, res), nil } +<<<<<<< HEAD func Source(filename string, src []byte, opts Options) ([]byte, error) { fset := token.NewFileSet() @@ -104,6 +108,8 @@ func Source(filename string, src []byte, opts Options) ([]byte, error) { return format(fset, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) { for _, rewriteRule := range rewriteRules { pattern, err := parseExpression(rewriteRule.Pattern, "pattern") diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go index bf235bf17f..7f5f9cca05 100644 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -1,7 +1,10 @@ package main import ( +<<<<<<< HEAD "cmp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "os" "runtime/debug" @@ -64,9 +67,23 @@ func createBuildInfo() commands.BuildInfo { } } +<<<<<<< HEAD revision = cmp.Or(revision, "unknown") modified = cmp.Or(modified, "?") info.Date = cmp.Or(info.Date, "(unknown)") +======= + if revision == "" { + revision = "unknown" + } + + if modified == "" { + modified = "?" + } + + if info.Date == "" { + info.Date = "(unknown)" + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) info.Commit = fmt.Sprintf("(%s, modified: %s, mod sum: %q)", revision, modified, buildInfo.Main.Sum) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go index 5a26c75aed..e4efa6b7c1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go @@ -1,20 +1,31 @@ package commands import ( +<<<<<<< HEAD "context" "encoding/json" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "errors" "fmt" "net/http" "os" "path/filepath" +<<<<<<< HEAD "strconv" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "time" hcversion "github.com/hashicorp/go-version" "github.com/pelletier/go-toml/v2" +<<<<<<< HEAD "github.com/santhosh-tekuri/jsonschema/v6" +======= + "github.com/santhosh-tekuri/jsonschema/v5" + "github.com/santhosh-tekuri/jsonschema/v5/httploader" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v3" @@ -45,7 +56,13 @@ func (c *configCommand) executeVerify(cmd *cobra.Command, _ []string) error { return fmt.Errorf("[%s] validate: %w", usedConfigFile, err) } +<<<<<<< HEAD printValidationDetail(cmd, v.DetailedOutput()) +======= + detail := v.DetailedOutput() + + printValidationDetail(cmd, &detail) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.New("the configuration contains invalid elements") } @@ -100,12 +117,19 @@ func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error) } func validateConfiguration(schemaPath, targetFile string) error { +<<<<<<< HEAD compiler := jsonschema.NewCompiler() compiler.UseLoader(jsonschema.SchemeURLLoader{ "file": jsonschema.FileLoader{}, "https": newJSONSchemaHTTPLoader(), }) compiler.DefaultDraft(jsonschema.Draft7) +======= + httploader.Client = &http.Client{Timeout: 2 * time.Second} + + compiler := jsonschema.NewCompiler() + compiler.Draft = jsonschema.Draft7 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) schema, err := compiler.Compile(schemaPath) if err != nil { @@ -135,6 +159,7 @@ func validateConfiguration(schemaPath, targetFile string) error { return schema.Validate(m) } +<<<<<<< HEAD func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) { if detail.Error != nil { data, _ := json.Marshal(detail.Error) @@ -142,6 +167,12 @@ func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) { cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, details) +======= +func printValidationDetail(cmd *cobra.Command, detail *jsonschema.Detailed) { + if detail.Error != "" { + cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", + strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, detail.Error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, d := range detail.Errors { @@ -182,6 +213,7 @@ func decodeTomlFile(filename string) (any, error) { return m, nil } +<<<<<<< HEAD type jsonschemaHTTPLoader struct { *http.Client @@ -212,3 +244,5 @@ func (l jsonschemaHTTPLoader) Load(url string) (any, error) { return jsonschema.UnmarshalJSON(resp.Body) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go index 9f17018b2f..44bd56e81b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go @@ -28,11 +28,19 @@ func setupLintersFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Enable only fast linters from enabled linters set (first run won't be fast)")) internal.AddHackedStringSliceP(fs, "presets", "p", +<<<<<<< HEAD formatList("Enable presets of linters:", lintersdb.AllPresets(), "Run 'golangci-lint help linters' to see them.", "This option implies option --disable-all", ), ) +======= + color.GreenString(fmt.Sprintf("Enable presets (%s) of linters.\n"+ + "Run 'golangci-lint help linters' to see them.\n"+ + "This option implies option --disable-all", + strings.Join(lintersdb.AllPresets(), "|"), + ))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fs.StringSlice("enable-only", nil, color.GreenString("Override linters configuration section to only run the specific linter(s)")) // Flags only. @@ -49,13 +57,22 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "go", "run.go", "", color.GreenString("Targeted Go version")) internal.AddHackedStringSlice(fs, "build-tags", color.GreenString("Build tags")) +<<<<<<< HEAD internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work. If <= 0, the timeout is disabled")) +======= + internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work")) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.Bool, "tests", "run.tests", true, color.GreenString("Analyze tests (*_test.go)")) internal.AddDeprecatedHackedStringSlice(fs, "skip-files", color.GreenString("Regexps of files to skip")) internal.AddDeprecatedHackedStringSlice(fs, "skip-dirs", color.GreenString("Regexps of directories to skip")) +<<<<<<< HEAD +======= + internal.AddDeprecatedFlagAndBind(v, fs, fs.Bool, "skip-dirs-use-default", "run.skip-dirs-use-default", true, + getDefaultDirectoryExcludeHelp()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const allowParallelDesc = "Allow multiple parallel golangci-lint instances running.\n" + "If false (default) - golangci-lint acquires file lock on start." @@ -68,11 +85,20 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { func setupOutputFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "out-format", "output.formats", config.OutFormatColoredLineNumber, +<<<<<<< HEAD formatList("Formats of output:", config.AllOutputFormats)) +======= + color.GreenString(fmt.Sprintf("Formats of output: %s", strings.Join(config.AllOutputFormats, "|")))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.Bool, "print-issued-lines", "output.print-issued-lines", true, color.GreenString("Print lines of code with issue")) internal.AddFlagAndBind(v, fs, fs.Bool, "print-linter-name", "output.print-linter-name", true, color.GreenString("Print linter name in issue line")) +<<<<<<< HEAD +======= + internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "output.uniq-by-line", true, + color.GreenString("Make issues output unique by line")) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.Bool, "sort-results", "output.sort-results", false, color.GreenString("Sort linter results")) internal.AddFlagAndBind(v, fs, fs.StringSlice, "sort-order", "output.sort-order", nil, @@ -94,13 +120,20 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Maximum issues count per one linter. Set to 0 to disable")) internal.AddFlagAndBind(v, fs, fs.Int, "max-same-issues", "issues.max-same-issues", 3, color.GreenString("Maximum count of issues with the same text. Set to 0 to disable")) +<<<<<<< HEAD internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "issues.uniq-by-line", true, color.GreenString("Make issues output unique by line")) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddHackedStringSlice(fs, "exclude-files", color.GreenString("Regexps of files to exclude")) internal.AddHackedStringSlice(fs, "exclude-dirs", color.GreenString("Regexps of directories to exclude")) internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-dirs-use-default", "issues.exclude-dirs-use-default", true, +<<<<<<< HEAD formatList("Use or not use default excluded directories:", processors.StdExcludeDirRegexps)) +======= + getDefaultDirectoryExcludeHelp()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.String, "exclude-generated", "issues.exclude-generated", processors.AutogeneratedModeLax, color.GreenString("Mode of the generated files analysis")) @@ -122,6 +155,7 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Fix found issues (if it's supported by the linter)")) } +<<<<<<< HEAD func formatList(head string, items []string, foot ...string) string { parts := []string{color.GreenString(head)} for _, p := range items { @@ -139,6 +173,8 @@ func formatList(head string, items []string, foot ...string) string { return strings.Join(parts, "\n") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func getDefaultIssueExcludeHelp() string { parts := []string{color.GreenString("Use or not use default excludes:")} @@ -151,3 +187,15 @@ func getDefaultIssueExcludeHelp() string { return strings.Join(parts, "\n") } +<<<<<<< HEAD +======= + +func getDefaultDirectoryExcludeHelp() string { + parts := []string{color.GreenString("Use or not use default excluded directories:")} + for _, dir := range processors.StdExcludeDirRegexps { + parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) + } + parts = append(parts, "") + return strings.Join(parts, "\n") +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go index 02a586f4c1..4a3fb56737 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -1,13 +1,19 @@ package commands import ( +<<<<<<< HEAD "encoding/json" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "slices" "sort" "strings" +<<<<<<< HEAD "unicode" "unicode/utf8" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/fatih/color" "github.com/spf13/cobra" @@ -18,6 +24,7 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) +<<<<<<< HEAD type linterHelp struct { Name string `json:"name"` Desc string `json:"description"` @@ -39,6 +46,11 @@ type helpCommand struct { opts helpOptions +======= +type helpCommand struct { + cmd *cobra.Command + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dbManager *lintersdb.Manager log logutils.Log @@ -56,6 +68,7 @@ func newHelpCommand(logger logutils.Log) *helpCommand { }, } +<<<<<<< HEAD lintersCmd := &cobra.Command{ Use: "linters", Short: "Help about linters", @@ -71,6 +84,18 @@ func newHelpCommand(logger logutils.Log) *helpCommand { fs.SortFlags = false // sort them as they are defined here fs.BoolVar(&c.opts.JSON, "json", false, color.GreenString("Display as JSON")) +======= + helpCmd.AddCommand( + &cobra.Command{ + Use: "linters", + Short: "Help about linters", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: c.execute, + PreRunE: c.preRunE, + }, + ) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.cmd = helpCmd @@ -90,6 +115,7 @@ func (c *helpCommand) preRunE(_ *cobra.Command, _ []string) error { return nil } +<<<<<<< HEAD func (c *helpCommand) execute(_ *cobra.Command, _ []string) error { if c.opts.JSON { return c.printJSON() @@ -125,6 +151,9 @@ func (c *helpCommand) printJSON() error { } func (c *helpCommand) print() { +======= +func (c *helpCommand) execute(_ *cobra.Command, _ []string) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var enabledLCs, disabledLCs []*linter.Config for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { if lc.Internal { @@ -184,13 +213,23 @@ func printLinters(lcs []*linter.Config) { }) for _, lc := range lcs { +<<<<<<< HEAD desc := formatDescription(lc.Linter.Desc()) +======= + // If the linter description spans multiple lines, truncate everything following the first newline + linterDescription := lc.Linter.Desc() + firstNewline := strings.IndexRune(linterDescription, '\n') + if firstNewline > 0 { + linterDescription = linterDescription[:firstNewline] + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) deprecatedMark := "" if lc.IsDeprecated() { deprecatedMark = " [" + color.RedString("deprecated") + "]" } +<<<<<<< HEAD var capabilities []string if !lc.IsSlowLinter() { capabilities = append(capabilities, color.BlueString("fast")) @@ -227,3 +266,9 @@ func formatDescription(desc string) string { return string(rawDesc) } +======= + _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", + color.YellowString(lc.Name()), deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) + } +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index d9aa7578cd..72bbb4a7ba 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -238,21 +238,31 @@ func (c *runCommand) execute(_ *cobra.Command, args []string) { needTrackResources := logutils.IsVerbose() || c.opts.PrintResourcesUsage trackResourcesEndCh := make(chan struct{}) +<<<<<<< HEAD // Note: this defer must be before ctx.cancel defer defer func() { // wait until resource tracking finished to print properly if needTrackResources { +======= + defer func() { // XXX: this defer must be before ctx.cancel defer + if needTrackResources { // wait until resource tracking finished to print properly +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) <-trackResourcesEndCh } }() +<<<<<<< HEAD ctx := context.Background() if c.cfg.Run.Timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, c.cfg.Run.Timeout) defer cancel() } +======= + ctx, cancel := context.WithTimeout(context.Background(), c.cfg.Run.Timeout) + defer cancel() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if needTrackResources { go watchResources(ctx, trackResourcesEndCh, c.log, c.debugf) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 579eddf594..571e8e304a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -1,6 +1,7 @@ package config import ( +<<<<<<< HEAD "cmp" "fmt" "os" @@ -11,6 +12,13 @@ import ( hcversion "github.com/hashicorp/go-version" "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" +======= + "os" + "strings" + + hcversion "github.com/hashicorp/go-version" + "github.com/ldez/gomoddirectives" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Config encapsulates the config data specified in the golangci-lint YAML config file. @@ -85,7 +93,16 @@ func detectGoVersion() string { return goVersion } +<<<<<<< HEAD return cmp.Or(os.Getenv("GOVERSION"), "1.17") +======= + v := os.Getenv("GOVERSION") + if v != "" { + return v + } + + return "1.17" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // detectGoVersionFromGoMod tries to get Go version from go.mod. @@ -93,6 +110,7 @@ func detectGoVersion() string { // else it returns `go` version if present, // else it returns empty. func detectGoVersionFromGoMod() string { +<<<<<<< HEAD modPath, err := gomod.GetGoModPath() if err != nil { modPath = detectGoModFallback() @@ -103,6 +121,10 @@ func detectGoVersionFromGoMod() string { file, err := parseGoMod(modPath) if err != nil { +======= + file, _ := gomoddirectives.GetModuleFile() + if file == nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "" } @@ -118,6 +140,7 @@ func detectGoVersionFromGoMod() string { return "" } +<<<<<<< HEAD func parseGoMod(goMod string) (*modfile.File, error) { raw, err := os.ReadFile(filepath.Clean(goMod)) @@ -156,3 +179,5 @@ func detectGoModFallback() string { return goMod.GoMod } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go index 081b87624d..5cfba4ce6e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -117,9 +117,14 @@ type Issues struct { UseDefaultExcludeDirs bool `mapstructure:"exclude-dirs-use-default"` +<<<<<<< HEAD MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` MaxSameIssues int `mapstructure:"max-same-issues"` UniqByLine bool `mapstructure:"uniq-by-line"` +======= + MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` + MaxSameIssues int `mapstructure:"max-same-issues"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DiffFromRevision string `mapstructure:"new-from-rev"` DiffPatchFilePath string `mapstructure:"new-from-patch"` @@ -128,7 +133,11 @@ type Issues struct { NeedFix bool `mapstructure:"fix"` +<<<<<<< HEAD ExcludeGeneratedStrict *bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. +======= + ExcludeGeneratedStrict bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (i *Issues) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go index 8e6c184ca4..a91b293de8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -105,7 +105,10 @@ var defaultLintersSettings = LintersSettings{ Kitlog: true, Klog: true, Logr: true, +<<<<<<< HEAD Slog: true, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Zap: true, RequireStringKey: false, NoPrintfLike: false, @@ -169,6 +172,10 @@ var defaultLintersSettings = LintersSettings{ Unused: UnusedSettings{ FieldWritesAreUses: true, PostStatementsAreReads: false, +<<<<<<< HEAD +======= + ExportedIsUsed: true, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExportedFieldsAreUsed: true, ParametersAreUsed: true, LocalVariablesAreUsed: true, @@ -178,6 +185,7 @@ var defaultLintersSettings = LintersSettings{ HTTPMethod: true, HTTPStatusCode: true, }, +<<<<<<< HEAD UseTesting: UseTestingSettings{ ContextBackground: true, ContextTodo: true, @@ -187,6 +195,8 @@ var defaultLintersSettings = LintersSettings{ OSTempDir: false, OSCreateTemp: true, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Varnamelen: VarnamelenSettings{ MaxDistance: 5, MinNameLength: 3, @@ -270,7 +280,10 @@ type LintersSettings struct { Promlinter PromlinterSettings ProtoGetter ProtoGetterSettings Reassign ReassignSettings +<<<<<<< HEAD Recvcheck RecvcheckSettings +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Revive ReviveSettings RowsErrCheck RowsErrCheckSettings SlogLint SlogLintSettings @@ -287,7 +300,10 @@ type LintersSettings struct { Unparam UnparamSettings Unused UnusedSettings UseStdlibVars UseStdlibVarsSettings +<<<<<<< HEAD UseTesting UseTestingSettings +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Varnamelen VarnamelenSettings Whitespace WhitespaceSettings Wrapcheck WrapcheckSettings @@ -329,10 +345,15 @@ type BiDiChkSettings struct { } type CopyLoopVarSettings struct { +<<<<<<< HEAD CheckAlias bool `mapstructure:"check-alias"` // Deprecated: use CheckAlias IgnoreAlias *bool `mapstructure:"ignore-alias"` +======= + IgnoreAlias bool `mapstructure:"ignore-alias"` // Deprecated: use CheckAlias + CheckAlias bool `mapstructure:"check-alias"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type Cyclop struct { @@ -479,12 +500,19 @@ type FunlenSettings struct { } type GciSettings struct { +<<<<<<< HEAD Sections []string `mapstructure:"sections"` NoInlineComments bool `mapstructure:"no-inline-comments"` NoPrefixComments bool `mapstructure:"no-prefix-comments"` SkipGenerated bool `mapstructure:"skip-generated"` CustomOrder bool `mapstructure:"custom-order"` NoLexOrder bool `mapstructure:"no-lex-order"` +======= + Sections []string `mapstructure:"sections"` + SkipGenerated bool `mapstructure:"skip-generated"` + CustomOrder bool `mapstructure:"custom-order"` + NoLexOrder bool `mapstructure:"no-lex-order"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated: use Sections instead. LocalPrefixes string `mapstructure:"local-prefixes"` @@ -507,7 +535,10 @@ type GinkgoLinterSettings struct { type GoChecksumTypeSettings struct { DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` +<<<<<<< HEAD IncludeSharedInterfaces bool `mapstructure:"include-shared-interfaces"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type GocognitSettings struct { @@ -550,7 +581,11 @@ type GodotSettings struct { Period bool `mapstructure:"period"` // Deprecated: use Scope instead +<<<<<<< HEAD CheckAll *bool `mapstructure:"check-all"` +======= + CheckAll bool `mapstructure:"check-all"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type GodoxSettings struct { @@ -590,11 +625,14 @@ type GoModDirectivesSettings struct { ReplaceLocal bool `mapstructure:"replace-local"` ExcludeForbidden bool `mapstructure:"exclude-forbidden"` RetractAllowNoExplanation bool `mapstructure:"retract-allow-no-explanation"` +<<<<<<< HEAD ToolchainForbidden bool `mapstructure:"toolchain-forbidden"` ToolchainPattern string `mapstructure:"toolchain-pattern"` ToolForbidden bool `mapstructure:"tool-forbidden"` GoDebugForbidden bool `mapstructure:"go-debug-forbidden"` GoVersionPattern string `mapstructure:"go-version-pattern"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type GoModGuardSettings struct { @@ -643,7 +681,11 @@ type GovetSettings struct { Settings map[string]map[string]any // Deprecated: the linter should be enabled inside Enable. +<<<<<<< HEAD CheckShadowing *bool `mapstructure:"check-shadowing"` +======= + CheckShadowing bool `mapstructure:"check-shadowing"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (cfg *GovetSettings) Validate() error { @@ -708,7 +750,10 @@ type LoggerCheckSettings struct { Kitlog bool `mapstructure:"kitlog"` Klog bool `mapstructure:"klog"` Logr bool `mapstructure:"logr"` +<<<<<<< HEAD Slog bool `mapstructure:"slog"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Zap bool `mapstructure:"zap"` RequireStringKey bool `mapstructure:"require-string-key"` NoPrintfLike bool `mapstructure:"no-printf-like"` @@ -820,11 +865,14 @@ type ReassignSettings struct { Patterns []string `mapstructure:"patterns"` } +<<<<<<< HEAD type RecvcheckSettings struct { DisableBuiltin bool `mapstructure:"disable-builtin"` Exclusions []string `mapstructure:"exclusions"` } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ReviveSettings struct { Go string `mapstructure:"-"` MaxOpenFiles int `mapstructure:"max-open-files"` @@ -864,7 +912,11 @@ type SlogLintSettings struct { ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"` // Deprecated: use Context instead. +<<<<<<< HEAD ContextOnly *bool `mapstructure:"context-only"` +======= + ContextOnly bool `mapstructure:"context-only"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type SpancheckSettings struct { @@ -895,6 +947,7 @@ type TagAlignSettings struct { } type TagliatelleSettings struct { +<<<<<<< HEAD Case TagliatelleCase } @@ -920,6 +973,12 @@ type TagliatelleExtendedRule struct { Case string ExtraInitialisms bool InitialismOverrides map[string]bool +======= + Case struct { + Rules map[string]string + UseFieldName bool `mapstructure:"use-field-name"` + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type TestifylintSettings struct { @@ -984,6 +1043,7 @@ type UseStdlibVarsSettings struct { TimeLayout bool `mapstructure:"time-layout"` CryptoHash bool `mapstructure:"crypto-hash"` DefaultRPCPath bool `mapstructure:"default-rpc-path"` +<<<<<<< HEAD SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` ConstantKind bool `mapstructure:"constant-kind"` @@ -1002,6 +1062,13 @@ type UseTestingSettings struct { OSSetenv bool `mapstructure:"os-setenv"` OSTempDir bool `mapstructure:"os-temp-dir"` OSCreateTemp bool `mapstructure:"os-create-temp"` +======= + OSDevNull bool `mapstructure:"os-dev-null"` // Deprecated + SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` + TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` + ConstantKind bool `mapstructure:"constant-kind"` + SyslogPriority bool `mapstructure:"syslog-priority"` // Deprecated +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type UnconvertSettings struct { @@ -1017,13 +1084,20 @@ type UnparamSettings struct { type UnusedSettings struct { FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"` PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"` +<<<<<<< HEAD +======= + ExportedIsUsed bool `mapstructure:"exported-is-used"` // Deprecated +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"` ParametersAreUsed bool `mapstructure:"parameters-are-used"` LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"` GeneratedIsUsed bool `mapstructure:"generated-is-used"` +<<<<<<< HEAD // Deprecated ExportedIsUsed *bool `mapstructure:"exported-is-used"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type VarnamelenSettings struct { @@ -1045,7 +1119,10 @@ type WhitespaceSettings struct { } type WrapcheckSettings struct { +<<<<<<< HEAD ExtraIgnoreSigs []string `mapstructure:"extra-ignore-sigs"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(ldez): v2 the options must be renamed to use hyphen. IgnoreSigs []string `mapstructure:"ignoreSigs"` IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"` diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go index 56f57d9d5d..4697a177f5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go @@ -1,7 +1,10 @@ package config import ( +<<<<<<< HEAD "cmp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "errors" "fmt" "os" @@ -293,7 +296,13 @@ func (l *Loader) handleGoVersion() { l.cfg.LintersSettings.ParallelTest.Go = l.cfg.Run.Go +<<<<<<< HEAD l.cfg.LintersSettings.Gofumpt.LangVersion = cmp.Or(l.cfg.LintersSettings.Gofumpt.LangVersion, l.cfg.Run.Go) +======= + if l.cfg.LintersSettings.Gofumpt.LangVersion == "" { + l.cfg.LintersSettings.Gofumpt.LangVersion = l.cfg.Run.Go + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) trimmedGoVersion := goutil.TrimGoVersion(l.cfg.Run.Go) @@ -321,6 +330,7 @@ func (l *Loader) handleDeprecation() error { l.cfg.Issues.ExcludeDirs = l.cfg.Run.SkipDirs } +<<<<<<< HEAD // Deprecated since v1.57.0 if l.cfg.Run.UseDefaultSkipDirs != nil { l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") @@ -338,6 +348,21 @@ func (l *Loader) handleDeprecation() error { l.log.Warnf("The configuration option `output.uniq-by-line` is deprecated, please use `issues.uniq-by-line`") l.cfg.Issues.UniqByLine = *l.cfg.Output.UniqByLine } +======= + // The 2 options are true by default. + // Deprecated since v1.57.0 + if !l.cfg.Run.UseDefaultSkipDirs { + l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") + } + l.cfg.Issues.UseDefaultExcludeDirs = l.cfg.Run.UseDefaultSkipDirs && l.cfg.Issues.UseDefaultExcludeDirs + + // The 2 options are false by default. + // Deprecated since v1.57.0 + if l.cfg.Run.ShowStats { + l.log.Warnf("The configuration option `run.show-stats` is deprecated, please use `output.show-stats`") + } + l.cfg.Output.ShowStats = l.cfg.Run.ShowStats || l.cfg.Output.ShowStats +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated since v1.57.0 if l.cfg.Output.Format != "" { @@ -360,11 +385,17 @@ func (l *Loader) handleDeprecation() error { } // Deprecated since v1.59.0 +<<<<<<< HEAD if l.cfg.Issues.ExcludeGeneratedStrict != nil { l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`") if !*l.cfg.Issues.ExcludeGeneratedStrict { l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. } +======= + if l.cfg.Issues.ExcludeGeneratedStrict { + l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`") + l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } l.handleLinterOptionDeprecations() @@ -372,15 +403,27 @@ func (l *Loader) handleDeprecation() error { return nil } +<<<<<<< HEAD func (l *Loader) handleLinterOptionDeprecations() { // Deprecated since v1.57.0, // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). if l.cfg.LintersSettings.Govet.CheckShadowing != nil { +======= +//nolint:gocyclo // the complexity cannot be reduced. +func (l *Loader) handleLinterOptionDeprecations() { + // Deprecated since v1.57.0, + // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). + if l.cfg.LintersSettings.Govet.CheckShadowing { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.govet.check-shadowing` is deprecated. " + "Please enable `shadow` instead, if you are not using `enable-all`.") } +<<<<<<< HEAD if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias != nil { +======= + if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.copyloopvar.ignore-alias` is deprecated and ignored," + "please use `linters.copyloopvar.check-alias`.") } @@ -402,7 +445,11 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.33.0. +<<<<<<< HEAD if l.cfg.LintersSettings.Godot.CheckAll != nil { +======= + if l.cfg.LintersSettings.Godot.CheckAll { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.godot.check-all` is deprecated, please use `linters.godot.scope: all`.") } @@ -427,11 +474,16 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.60.0 +<<<<<<< HEAD if l.cfg.LintersSettings.Unused.ExportedIsUsed != nil { +======= + if !l.cfg.LintersSettings.Unused.ExportedIsUsed { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.unused.exported-is-used` is deprecated.") } // Deprecated since v1.58.0 +<<<<<<< HEAD if l.cfg.LintersSettings.SlogLint.ContextOnly != nil { l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.") l.cfg.LintersSettings.SlogLint.Context = cmp.Or(l.cfg.LintersSettings.SlogLint.Context, "all") @@ -439,11 +491,26 @@ func (l *Loader) handleLinterOptionDeprecations() { // Deprecated since v1.51.0 if l.cfg.LintersSettings.UseStdlibVars.OSDevNull != nil { +======= + if l.cfg.LintersSettings.SlogLint.ContextOnly { + l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.") + if l.cfg.LintersSettings.SlogLint.Context == "" { + l.cfg.LintersSettings.SlogLint.Context = "all" + } + } + + // Deprecated since v1.51.0 + if l.cfg.LintersSettings.UseStdlibVars.OSDevNull { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.usestdlibvars.os-dev-null` is deprecated.") } // Deprecated since v1.51.0 +<<<<<<< HEAD if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority != nil { +======= + if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.usestdlibvars.syslog-priority` is deprecated.") } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go index aaa5183ec4..8cfc95c59a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -43,6 +43,10 @@ type Output struct { Formats OutputFormats `mapstructure:"formats"` PrintIssuedLine bool `mapstructure:"print-issued-lines"` PrintLinterName bool `mapstructure:"print-linter-name"` +<<<<<<< HEAD +======= + UniqByLine bool `mapstructure:"uniq-by-line"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SortResults bool `mapstructure:"sort-results"` SortOrder []string `mapstructure:"sort-order"` PathPrefix string `mapstructure:"path-prefix"` @@ -50,9 +54,12 @@ type Output struct { // Deprecated: use Formats instead. Format string `mapstructure:"format"` +<<<<<<< HEAD // Deprecated: use [Issues.UniqByLine] instead. UniqByLine *bool `mapstructure:"uniq-by-line"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Output) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go index 784e8c2fad..c0b648e0e3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -29,10 +29,17 @@ type Run struct { // Deprecated: use Issues.ExcludeDirs instead. SkipDirs []string `mapstructure:"skip-dirs"` // Deprecated: use Issues.UseDefaultExcludeDirs instead. +<<<<<<< HEAD UseDefaultSkipDirs *bool `mapstructure:"skip-dirs-use-default"` // Deprecated: use Output.ShowStats instead. ShowStats *bool `mapstructure:"show-stats"` +======= + UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` + + // Deprecated: use Output.ShowStats instead. + ShowStats bool `mapstructure:"show-stats"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *Run) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go index 91d12e5cd3..3a7415d8c4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -61,7 +61,11 @@ func EvalSymlinks(path string) (string, error) { } var er evalSymlinkRes +<<<<<<< HEAD er.path, er.err = evalSymlinks(path) +======= + er.path, er.err = filepath.EvalSymlinks(path) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) evalSymlinkCache.Store(path, er) return er.path, er.err diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go index 854e7d15f0..7db8c589f9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go @@ -26,7 +26,11 @@ type EncodingIssue struct { Severity string Pos token.Position LineRange *result.Range +<<<<<<< HEAD SuggestedFixes []analysis.SuggestedFix +======= + Replacement *result.Replacement +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExpectNoLint bool ExpectedNoLintLinter string } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go index 3a8652486c..75e33dab38 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go @@ -42,7 +42,10 @@ type Diagnostic struct { Analyzer *analysis.Analyzer Position token.Position Pkg *packages.Package +<<<<<<< HEAD File *token.File +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type runner struct { @@ -122,9 +125,15 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, } act = actAlloc.alloc() +<<<<<<< HEAD act.Analyzer = a act.Package = pkg act.runner = r +======= + act.a = a + act.pkg = pkg + act.r = r +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) act.isInitialPkg = initialPkgs[pkg] act.needAnalyzeSource = initialPkgs[pkg] act.analysisDoneCh = make(chan struct{}) @@ -133,11 +142,19 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, if len(a.FactTypes) > 0 { depsCount += len(pkg.Imports) } +<<<<<<< HEAD act.Deps = make([]*action, 0, depsCount) // Add a dependency on each required analyzers. for _, req := range a.Requires { act.Deps = append(act.Deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) +======= + act.deps = make([]*action, 0, depsCount) + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.deps = append(act.deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } r.buildActionFactDeps(act, a, pkg, initialPkgs, actions, actAlloc) @@ -163,7 +180,11 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac sort.Strings(paths) // for determinism for _, path := range paths { dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) +<<<<<<< HEAD act.Deps = append(act.Deps, dep) +======= + act.deps = append(act.deps, dep) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Need to register fact types for pkgcache proper gob encoding. @@ -204,7 +225,11 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, for _, a := range analyzers { for _, pkg := range pkgs { root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc) +<<<<<<< HEAD root.IsRoot = true +======= + root.isroot = true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) roots = append(roots, root) } } @@ -221,7 +246,11 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze actionPerPkg := map[*packages.Package][]*action{} for _, act := range actions { +<<<<<<< HEAD actionPerPkg[act.Package] = append(actionPerPkg[act.Package], act) +======= + actionPerPkg[act.pkg] = append(actionPerPkg[act.pkg], act) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Fill Imports field. @@ -251,7 +280,11 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze } } for _, act := range actions { +<<<<<<< HEAD dfs(act.Package) +======= + dfs(act.pkg) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Limit memory and IO usage. @@ -283,7 +316,11 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err for _, act := range actions { if !extracted[act] { extracted[act] = true +<<<<<<< HEAD visitAll(act.Deps) +======= + visitAll(act.deps) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) extract(act) } } @@ -300,6 +337,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err seen := make(map[key]bool) extract = func(act *action) { +<<<<<<< HEAD if act.Err != nil { if pe, ok := act.Err.(*errorutil.PanicError); ok { panic(pe) @@ -317,17 +355,41 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err file := act.Package.Fset.File(diag.Pos) k := key{Position: position, Analyzer: act.Analyzer, message: diag.Message} +======= + if act.err != nil { + if pe, ok := act.err.(*errorutil.PanicError); ok { + panic(pe) + } + retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err)) + return + } + + if act.isroot { + for _, diag := range act.diagnostics { + // We don't display a.Name/f.Category + // as most users don't care. + + posn := act.pkg.Fset.Position(diag.Pos) + k := key{posn, act.a, diag.Message} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if seen[k] { continue // duplicate } seen[k] = true retDiag := Diagnostic{ +<<<<<<< HEAD File: file, Diagnostic: diag, Analyzer: act.Analyzer, Position: position, Pkg: act.Package, +======= + Diagnostic: diag, + Analyzer: act.a, + Position: posn, + Pkg: act.pkg, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } retDiags = append(retDiags, retDiag) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go index 2e1c414228..ed488e2e40 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go @@ -29,8 +29,13 @@ func (actAlloc *actionAllocator) alloc() *action { } func (act *action) waitUntilDependingAnalyzersWorked() { +<<<<<<< HEAD for _, dep := range act.Deps { if dep.Package == act.Package { +======= + for _, dep := range act.deps { + if dep.pkg == act.pkg { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) <-dep.analysisDoneCh } } @@ -39,6 +44,7 @@ func (act *action) waitUntilDependingAnalyzersWorked() { func (act *action) analyzeSafe() { defer func() { if p := recover(); p != nil { +<<<<<<< HEAD if !act.IsRoot { // This line allows to display "hidden" panic with analyzers like buildssa. // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, @@ -52,13 +58,33 @@ func (act *action) analyzeSafe() { }() act.runner.sw.TrackStage(act.Analyzer.Name, act.analyze) +======= + if !act.isroot { + // This line allows to display "hidden" panic with analyzers like buildssa. + // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, + // this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA"). + act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack())) + } + + act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", + act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) + } + }() + + act.r.sw.TrackStage(act.a.Name, act.analyze) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (act *action) markDepsForAnalyzingSource() { // Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing // this action. +<<<<<<< HEAD for _, dep := range act.Deps { if dep.Package == act.Package { +======= + for _, dep := range act.deps { + if dep.pkg == act.pkg { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Analyze source only for horizontal dependencies, e.g. from "buildssa". dep.needAnalyzeSource = true // can't be set in parallel } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go index e06ea2979c..7333cfc207 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go @@ -26,7 +26,11 @@ func (act *action) loadCachedFacts() bool { return true // load cached facts only for non-initial packages } +<<<<<<< HEAD if len(act.Analyzer.FactTypes) == 0 { +======= + if len(act.a.FactTypes) == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true // no need to load facts } @@ -38,7 +42,11 @@ func (act *action) loadCachedFacts() bool { } func (act *action) persistFactsToCache() error { +<<<<<<< HEAD analyzer := act.Analyzer +======= + analyzer := act.a +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(analyzer.FactTypes) == 0 { return nil } @@ -46,7 +54,11 @@ func (act *action) persistFactsToCache() error { // Merge new facts into the package and persist them. var facts []Fact for key, fact := range act.packageFacts { +<<<<<<< HEAD if key.pkg != act.Package.Types { +======= + if key.pkg != act.pkg.Types { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The fact is from inherited facts from another package continue } @@ -57,7 +69,11 @@ func (act *action) persistFactsToCache() error { } for key, fact := range act.objectFacts { obj := key.obj +<<<<<<< HEAD if obj.Pkg() != act.Package.Types { +======= + if obj.Pkg() != act.pkg.Types { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The fact is from inherited facts from another package continue } @@ -74,14 +90,21 @@ func (act *action) persistFactsToCache() error { }) } +<<<<<<< HEAD factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name) return act.runner.pkgCache.Put(act.Package, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) +======= + factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + + return act.r.pkgCache.Put(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (act *action) loadPersistedFacts() bool { var facts []Fact +<<<<<<< HEAD err := act.runner.pkgCache.Get(act.Package, cache.HashModeNeedAllDeps, factCacheKey(act.Analyzer), &facts) if err != nil { if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) { @@ -101,6 +124,27 @@ func (act *action) loadPersistedFacts() bool { continue } obj, err := objectpath.Object(act.Package.Types, objectpath.Path(f.Path)) +======= + err := act.r.pkgCache.Get(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(act.a), &facts) + if err != nil { + if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) { + act.r.log.Warnf("Failed to get persisted facts: %s", err) + } + + factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) + return false + } + + factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + + for _, f := range facts { + if f.Path == "" { // this is a package fact + key := packageFactKey{act.pkg.Types, act.factType(f.Fact)} + act.packageFacts[key] = f.Fact + continue + } + obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { // Be lenient about these errors. For example, when // analyzing io/ioutil from source, we may get a fact diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go new file mode 100644 index 0000000000..d868f8f5da --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go @@ -0,0 +1,370 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Partial copy of https://github.com/golang/tools/blob/dba5486c2a1d03519930812112b23ed2c45c04fc/go/analysis/internal/checker/checker.go + +package goanalysis + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "go/types" + "reflect" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors" +) + +// NOTE(ldez) altered: custom fields; remove 'once' and 'duration'. +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + a *analysis.Analyzer + pkg *packages.Package + pass *analysis.Pass + isroot bool + deps []*action + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + result any + diagnostics []analysis.Diagnostic + err error + + // NOTE(ldez) custom fields. + r *runner + analysisDoneCh chan struct{} + loadCachedFactsDone bool + loadCachedFactsOk bool + isInitialPkg bool + needAnalyzeSource bool +} + +// NOTE(ldez) no alteration. +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +// NOTE(ldez) no alteration. +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +// NOTE(ldez) no alteration. +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a, act.pkg) +} + +// NOTE(ldez) altered version of `func (act *action) execOnce()`. +func (act *action) analyze() { + defer close(act.analysisDoneCh) // unblock actions depending on this action + + if !act.needAnalyzeSource { + return + } + + defer func(now time.Time) { + analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) + }(time.Now()) + + // Report an error if any dependency failures. + var depErrors error + for _, dep := range act.deps { + if dep.err != nil { + depErrors = errors.Join(depErrors, errors.Unwrap(dep.err)) + } + } + + if depErrors != nil { + act.err = fmt.Errorf("failed prerequisites: %w", depErrors) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]any) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + startedAt := time.Now() + + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.a] = dep.result + } else if dep.a == act.a { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt)) + + module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. + if mod := act.pkg.Module; mod != nil { + module.Path = mod.Path + module.Version = mod.Version + module.GoVersion = mod.GoVersion + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + OtherFiles: act.pkg.OtherFiles, + IgnoredFiles: act.pkg.IgnoredFiles, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + TypesSizes: act.pkg.TypesSizes, + TypeErrors: act.pkg.TypeErrors, + Module: module, + + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: act.importObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.importPackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.allObjectFacts, + AllPackageFacts: act.allPackageFacts, + } + + act.pass = pass + act.r.passToPkgGuard.Lock() + act.r.passToPkg[pass] = act.pkg + act.r.passToPkgGuard.Unlock() + + if act.pkg.IllTyped { + // It looks like there should be !pass.Analyzer.RunDespiteErrors + // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, + // but it exits before it if packages.Load have failed. + act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg}) + } else { + startedAt = time.Now() + + act.result, act.err = pass.Analyzer.Run(pass) + + analyzedIn := time.Since(startedAt) + if analyzedIn > time.Millisecond*10 { + debugf("%s: run analyzer in %s", act, analyzedIn) + } + } + + // disallow calls after Run + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil + + err := act.persistFactsToCache() + if err != nil { + act.r.log.Warnf("Failed to persist facts to cache: %s", err) + } +} + +// NOTE(ldez) altered: logger; serialize. +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *action) { + const serialize = false + + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.pkg.Types) { + factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + act.r.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) + } + fact = encodedFact + } + + factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + + act.packageFacts[key] = fact + } +} + +// NOTE(ldez) no alteration. +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + newFact := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(newFact); err != nil { + return nil, err + } + return newFact, nil +} + +// NOTE(ldez) no alteration. +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to other packages, +// that find there way into the API. +// This is an over-approximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + if obj.IsField() { + return true + } + // we can't filter more aggressively than this because we need + // to consider function parameters exported, but have no way + // of telling apart function parameters from local variables. + return obj.Pkg() == pkg + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// NOTE(ldez) altered: logger; `act.factType` +// importObjectFact implements Pass.ImportObjectFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// importObjectFact copies the fact value to *ptr. +func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, act.factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. +// exportObjectFact implements Pass.ExportObjectFact. +func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if obj.Pkg() != act.pkg.Types { + act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.a, act.pkg, obj, fact) + } + + key := objectFactKey{obj, act.factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if isFactsExportDebug { + objstr := types.ObjectString(obj, (*types.Package).Name) + + factsExportDebugf("%s: object %s has fact %s\n", + act.pkg.Fset.Position(obj.Pos()), objstr, fact) + } +} + +// NOTE(ldez) no alteration. +func (act *action) allObjectFacts() []analysis.ObjectFact { + facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for k := range act.objectFacts { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) + } + return facts +} + +// NOTE(ldez) altered: `act.factType` +// importPackageFact implements Pass.ImportPackageFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// fact copies the fact value to *ptr. +func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, act.factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. +// exportPackageFact implements Pass.ExportPackageFact. +func (act *action) exportPackageFact(fact analysis.Fact) { + key := packageFactKey{act.pass.Pkg, act.factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + + factsDebugf("%s: package %s has fact %s\n", + act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) +} + +// NOTE(ldez) altered: add receiver to handle logs. +func (act *action) factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + act.r.log.Fatalf("invalid Fact type: got %T, want pointer", fact) + } + return t +} + +// NOTE(ldez) no alteration. +func (act *action) allPackageFacts() []analysis.PackageFact { + facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for k := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) + } + return facts +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go index fca4b8c3ad..c84aaeb9bb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go @@ -67,7 +67,11 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { // Unblock depending on actions and propagate error. for _, act := range lp.actions { close(act.analysisDoneCh) +<<<<<<< HEAD act.Err = werr +======= + act.err = werr +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return } @@ -125,6 +129,7 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { pkg.IllTyped = true pkg.TypesInfo = &types.Info{ +<<<<<<< HEAD Types: make(map[ast.Expr]types.TypeAndValue), Instances: make(map[*ast.Ident]types.Instance), Defs: make(map[*ast.Ident]types.Object), @@ -133,6 +138,15 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { Selections: make(map[*ast.SelectorExpr]*types.Selection), Scopes: make(map[ast.Node]*types.Scope), FileVersions: make(map[*ast.File]string), +======= + Types: make(map[ast.Expr]types.TypeAndValue), + Instances: make(map[*ast.Ident]types.Instance), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } importer := func(path string) (*types.Package, error) { @@ -364,12 +378,21 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { pass.ImportPackageFact = nil pass.ExportPackageFact = nil act.pass = nil +<<<<<<< HEAD act.Deps = nil if act.Result != nil { if isMemoryDebug { debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.Result)) } act.Result = nil +======= + act.deps = nil + if act.result != nil { + if isMemoryDebug { + debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result)) + } + act.result = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -400,7 +423,11 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { for _, act := range lp.actions { if !lp.isInitial { +<<<<<<< HEAD act.Package = nil +======= + act.pkg = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } act.packageFacts = nil act.objectFacts = nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go index 3a9a35dec1..3b94b0da5a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go @@ -2,8 +2,11 @@ package goanalysis import ( "fmt" +<<<<<<< HEAD "go/token" "strings" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -83,7 +86,10 @@ func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Iss func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { var issues []result.Issue +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i := range diags { diag := &diags[i] linterName := linterNameBuilder(diag) @@ -95,6 +101,7 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) } +<<<<<<< HEAD var suggestedFixes []analysis.SuggestedFix for _, sf := range diag.SuggestedFixes { @@ -132,6 +139,13 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st Pos: diag.Position, Pkg: diag.Pkg, SuggestedFixes: suggestedFixes, +======= + issues = append(issues, result.Issue{ + FromLinter: linterName, + Text: text, + Pos: diag.Position, + Pkg: diag.Pkg, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if len(diag.Related) > 0 { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go index 4366155b02..8e86b70eec 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go @@ -48,7 +48,11 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages. Severity: i.Severity, Pos: i.Pos, LineRange: i.LineRange, +<<<<<<< HEAD SuggestedFixes: i.SuggestedFixes, +======= + Replacement: i.Replacement, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExpectNoLint: i.ExpectNoLint, ExpectedNoLintLinter: i.ExpectedNoLintLinter, }) @@ -123,7 +127,11 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, Severity: issue.Severity, Pos: issue.Pos, LineRange: issue.LineRange, +<<<<<<< HEAD SuggestedFixes: issue.SuggestedFixes, +======= + Replacement: issue.Replacement, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Pkg: pkg, ExpectNoLint: issue.ExpectNoLint, ExpectedNoLintLinter: issue.ExpectedNoLintLinter, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go index ccc58fee40..7c0846405b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go @@ -9,12 +9,21 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) +<<<<<<< HEAD func New(settings *config.AsasalintSettings) *goanalysis.Linter { cfg := asasalint.LinterSetting{} if settings != nil { cfg.Exclude = settings.Exclude cfg.NoBuiltinExclusions = !settings.UseBuiltinExclusions cfg.IgnoreTest = settings.IgnoreTest +======= +func New(setting *config.AsasalintSettings) *goanalysis.Linter { + cfg := asasalint.LinterSetting{} + if setting != nil { + cfg.Exclude = setting.Exclude + cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions + cfg.IgnoreTest = setting.IgnoreTest +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } a, err := asasalint.NewAnalyzer(cfg) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go index c6315965c4..eda1298957 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go @@ -10,6 +10,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.BiDiChkSettings) *goanalysis.Linter { a := bidichk.NewAnalyzer() @@ -46,6 +47,44 @@ func New(settings *config.BiDiChkSettings) *goanalysis.Linter { } cfg[a.Name] = map[string]any{ +======= +func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { + a := bidichk.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if cfg != nil { + var opts []string + + if cfg.LeftToRightEmbedding { + opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING") + } + if cfg.RightToLeftEmbedding { + opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING") + } + if cfg.PopDirectionalFormatting { + opts = append(opts, "POP-DIRECTIONAL-FORMATTING") + } + if cfg.LeftToRightOverride { + opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE") + } + if cfg.RightToLeftOverride { + opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE") + } + if cfg.LeftToRightIsolate { + opts = append(opts, "LEFT-TO-RIGHT-ISOLATE") + } + if cfg.RightToLeftIsolate { + opts = append(opts, "RIGHT-TO-LEFT-ISOLATE") + } + if cfg.FirstStrongIsolate { + opts = append(opts, "FIRST-STRONG-ISOLATE") + } + if cfg.PopDirectionalIsolate { + opts = append(opts, "POP-DIRECTIONAL-ISOLATE") + } + + cfgMap[a.Name] = map[string]any{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "disallowed-runes": strings.Join(opts, ","), } } @@ -54,6 +93,10 @@ func New(settings *config.BiDiChkSettings) *goanalysis.Linter { a.Name, "Checks for dangerous unicode character sequences", []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go index c520e88db3..6f7f7d6aaa 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go @@ -12,7 +12,11 @@ func New() *goanalysis.Linter { return goanalysis.NewLinter( a.Name, +<<<<<<< HEAD a.Doc, +======= + "checks whether HTTP response body is closed successfully", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go index 772b5601ca..ca9e2b4be7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go @@ -33,5 +33,9 @@ func New(settings *config.Cyclop) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, cfg, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) +======= + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go index afa8152fac..f46f68affe 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go @@ -1,6 +1,7 @@ package dogsled import ( +<<<<<<< HEAD "go/ast" "golang.org/x/tools/go/analysis" @@ -9,18 +10,52 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +======= + "fmt" + "go/ast" + "go/token" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "dogsled" func New(settings *config.DogsledSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD return run(pass, settings.MaxBlankIdentifiers) }, Requires: []*analysis.Analyzer{inspect.Analyzer}, +======= + issues := runDogsled(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return goanalysis.NewLinter( @@ -28,6 +63,7 @@ func New(settings *config.DogsledSettings) *goanalysis.Linter { "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -75,4 +111,70 @@ func run(pass *analysis.Pass, maxBlanks int) (any, error) { }) return nil, nil +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue { + var reports []goanalysis.Issue + for _, f := range pass.Files { + v := &returnsVisitor{ + maxBlanks: settings.MaxBlankIdentifiers, + f: pass.Fset, + } + + ast.Walk(v, f) + + for i := range v.issues { + reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass)) + } + } + + return reports +} + +type returnsVisitor struct { + f *token.FileSet + maxBlanks int + issues []result.Issue +} + +func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return v + } + if funcDecl.Body == nil { + return v + } + + for _, expr := range funcDecl.Body.List { + assgnStmt, ok := expr.(*ast.AssignStmt) + if !ok { + continue + } + + numBlank := 0 + for _, left := range assgnStmt.Lhs { + ident, ok := left.(*ast.Ident) + if !ok { + continue + } + if ident.Name == "_" { + numBlank++ + } + } + + if numBlank > v.maxBlanks { + v.issues = append(v.issues, result.Issue{ + FromLinter: linterName, + Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), + Pos: v.f.Position(assgnStmt.Pos()), + }) + } + } + return v +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go index d2bb3d8d84..c7fee28e05 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go @@ -54,7 +54,13 @@ func New(settings *config.DuplSettings) *goanalysis.Linter { } func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) { +<<<<<<< HEAD issues, err := duplAPI.Run(internal.GetGoFileNames(pass), settings.Threshold) +======= + fileNames := internal.GetFileNames(pass) + + issues, err := duplAPI.Run(fileNames, settings.Threshold) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go index a2bcc34d40..e2820587a8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go @@ -10,6 +10,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.DupWordSettings) *goanalysis.Linter { a := dupword.NewAnalyzer() @@ -18,6 +19,16 @@ func New(settings *config.DupWordSettings) *goanalysis.Linter { cfg[a.Name] = map[string]any{ "keyword": strings.Join(settings.Keywords, ","), "ignore": strings.Join(settings.Ignore, ","), +======= +func New(setting *config.DupWordSettings) *goanalysis.Linter { + a := dupword.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if setting != nil { + cfgMap[a.Name] = map[string]any{ + "keyword": strings.Join(setting.Keywords, ","), + "ignore": strings.Join(setting.Ignore, ","), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -25,6 +36,10 @@ func New(settings *config.DupWordSettings) *goanalysis.Linter { a.Name, "checks for duplicate words in the source code", []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go index 67a1b2ca8d..6ff6cb9cea 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go @@ -2,7 +2,10 @@ package errcheck import ( "bufio" +<<<<<<< HEAD "cmp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "os" "os/user" @@ -91,7 +94,14 @@ func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck text := "Error return value is not checked" if err.FuncName != "" { +<<<<<<< HEAD code := cmp.Or(err.SelectorName, err.FuncName) +======= + code := err.SelectorName + if err.SelectorName == "" { + code = err.FuncName + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) text = fmt.Sprintf("Error return value of %s is not checked", internal.FormatCode(code, lintCtx.Cfg)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go index 506113d6d5..37d04f2d9d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go @@ -8,6 +8,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { a := errchkjson.NewAnalyzer() @@ -19,6 +20,19 @@ func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { cfg[a.Name] = map[string]any{ "omit-safe": !settings.CheckErrorFreeEncoding, "report-no-exported": settings.ReportNoExported, +======= +func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { + a := errchkjson.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + cfgMap[a.Name] = map[string]any{ + "omit-safe": true, + } + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + "omit-safe": !cfg.CheckErrorFreeEncoding, + "report-no-exported": cfg.ReportNoExported, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -26,6 +40,10 @@ func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go index 14851adc28..40fa7d44ea 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go @@ -8,16 +8,28 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.ErrorLintSettings) *goanalysis.Linter { var opts []errorlint.Option if settings != nil { ae := toAllowPairs(settings.AllowedErrors) +======= +func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { + var opts []errorlint.Option + + if cfg != nil { + ae := toAllowPairs(cfg.AllowedErrors) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(ae) > 0 { opts = append(opts, errorlint.WithAllowedErrors(ae)) } +<<<<<<< HEAD aew := toAllowPairs(settings.AllowedErrorsWildcard) +======= + aew := toAllowPairs(cfg.AllowedErrorsWildcard) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(aew) > 0 { opts = append(opts, errorlint.WithAllowedWildcard(aew)) } @@ -25,6 +37,7 @@ func New(settings *config.ErrorLintSettings) *goanalysis.Linter { a := errorlint.NewAnalyzer(opts...) +<<<<<<< HEAD cfg := map[string]map[string]any{} if settings != nil { @@ -33,6 +46,16 @@ func New(settings *config.ErrorLintSettings) *goanalysis.Linter { "errorf-multi": settings.ErrorfMulti, "asserts": settings.Asserts, "comparison": settings.Comparison, +======= + cfgMap := map[string]map[string]any{} + + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + "errorf": cfg.Errorf, + "errorf-multi": cfg.ErrorfMulti, + "asserts": cfg.Asserts, + "comparison": cfg.Comparison, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -41,7 +64,11 @@ func New(settings *config.ErrorLintSettings) *goanalysis.Linter { "errorlint is a linter for that can be used to find code "+ "that will cause problems with the error wrapping scheme introduced in Go 1.13.", []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go index 3b410359d0..7f7e68a783 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go @@ -2,27 +2,57 @@ package forbidigo import ( "fmt" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ashanbrown/forbidigo/forbidigo" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/logutils" +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "forbidigo" func New(settings *config.ForbidigoSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runForbidigo(pass, settings) +======= + issues, err := runForbidigo(pass, settings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -35,10 +65,19 @@ func New(settings *config.ForbidigoSettings) *goanalysis.Linter { "Forbids identifiers", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) } func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error { +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) options := []forbidigo.Option{ forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples), // disable "//permit" directives so only "//nolint" directives matters within golangci-lint @@ -51,22 +90,35 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error for _, pattern := range settings.Forbid { buffer, err := pattern.MarshalString() if err != nil { +<<<<<<< HEAD return err } +======= + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) patterns = append(patterns, string(buffer)) } forbid, err := forbidigo.NewLinter(patterns, options...) if err != nil { +<<<<<<< HEAD return fmt.Errorf("failed to create linter %q: %w", linterName, err) } +======= + return nil, fmt.Errorf("failed to create linter %q: %w", linterName, err) + } + + var issues []goanalysis.Issue +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, file := range pass.Files { runConfig := forbidigo.RunConfig{ Fset: pass.Fset, DebugLog: logutils.Debug(logutils.DebugKeyForbidigo), } +<<<<<<< HEAD if settings.AnalyzeTypes { runConfig.TypesInfo = pass.TypesInfo @@ -86,4 +138,24 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error } return nil +======= + if settings != nil && settings.AnalyzeTypes { + runConfig.TypesInfo = pass.TypesInfo + } + hints, err := forbid.RunWithConfig(runConfig, file) + if err != nil { + return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) + } + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: linterName, + }, pass)) + } + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go index bdadcece46..e0ed551587 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go @@ -1,11 +1,19 @@ package funlen import ( +<<<<<<< HEAD +======= + "go/token" + "strings" + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ultraware/funlen" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD ) type Config struct { @@ -30,4 +38,68 @@ func New(settings *config.FunlenSettings) *goanalysis.Linter { []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const linterName = "funlen" + +func New(settings *config.FunlenSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runFunlen(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + return goanalysis.NewLinter( + linterName, + "Tool for detection of long functions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue { + var lintIssues []funlen.Message + for _, file := range pass.Files { + fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments) + lintIssues = append(lintIssues, fileIssues...) + } + + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issues[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: linterName, + }, pass) + } + + return issues +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go index 841ee81b0d..e63bc6a0c8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go @@ -1,6 +1,7 @@ package gci import ( +<<<<<<< HEAD "bytes" "fmt" "io" @@ -10,6 +11,22 @@ import ( "github.com/daixiang0/gci/pkg/gci" "github.com/daixiang0/gci/pkg/log" "github.com/shazow/go-diff/difflib" +======= + "fmt" + "sort" + "strings" + "sync" + + gcicfg "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/gci" + "github.com/daixiang0/gci/pkg/io" + "github.com/daixiang0/gci/pkg/log" + "github.com/daixiang0/gci/pkg/section" + "github.com/golangci/modinfo" + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -20,6 +37,7 @@ import ( const linterName = "gci" +<<<<<<< HEAD type differ interface { Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error } @@ -44,10 +62,60 @@ func New(settings *config.GciSettings) *goanalysis.Linter { ).WithContextSetter(func(lintCtx *linter.Context) { a.Run = func(pass *analysis.Pass) (any, error) { err := run(lintCtx, pass, settings, diff) +======= +func New(settings *config.GciSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, + Requires: []*analysis.Analyzer{ + modinfo.Analyzer, + }, + } + + var cfg *gcicfg.Config + if settings != nil { + rawCfg := gcicfg.YamlConfig{ + Cfg: gcicfg.BoolConfig{ + SkipGenerated: settings.SkipGenerated, + CustomOrder: settings.CustomOrder, + NoLexOrder: settings.NoLexOrder, + }, + SectionStrings: settings.Sections, + } + + if settings.LocalPrefixes != "" { + prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)} + rawCfg.SectionStrings = prefix + } + + var err error + cfg, err = YamlConfig{origin: rawCfg}.Parse() + if err != nil { + internal.LinterLogger.Fatalf("gci: configuration parsing: %v", err) + } + } + + var lock sync.Mutex + + return goanalysis.NewLinter( + linterName, + "Gci controls Go package import order and makes it always deterministic.", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (any, error) { + var err error + cfg.Sections, err = hackSectionList(pass, cfg) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -113,4 +181,177 @@ func run(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GciSetti } return nil +======= + issues, err := runGci(pass, lintCtx, cfg, &lock) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) { + fileNames := internal.GetFileNames(pass) + + var diffs []string + err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock) + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + + for _, diff := range diffs { + if diff == "" { + continue + } + + is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGci) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} + +func getIssuedTextGci(settings *config.LintersSettings) string { + text := "File is not `gci`-ed" + + hasOptions := settings.Gci.SkipGenerated || len(settings.Gci.Sections) > 0 + if !hasOptions { + return text + } + + text += " with" + + if settings.Gci.SkipGenerated { + text += " --skip-generated" + } + + if len(settings.Gci.Sections) > 0 { + for _, sect := range settings.Gci.Sections { + text += " -s " + sect + } + } + + if settings.Gci.CustomOrder { + text += " --custom-order" + } + + return text +} + +func hackSectionList(pass *analysis.Pass, cfg *gcicfg.Config) (section.SectionList, error) { + var sections section.SectionList + + for _, sect := range cfg.Sections { + // local module hack + if v, ok := sect.(*section.LocalModule); ok { + info, err := modinfo.FindModuleFromPass(pass) + if err != nil { + return nil, err + } + + if info.Path == "" { + continue + } + + v.Path = info.Path + } + + sections = append(sections, sect) + } + + return sections, nil +} + +// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator. +// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI. +// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75 +// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80 +func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error { + log.InitLogger() + defer func() { _ = log.L().Sync() }() + + return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + lock.Lock() + *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) + lock.Unlock() + return nil + }) +} + +// Code below this comment is borrowed and modified from gci. +// https://github.com/daixiang0/gci/blob/v0.13.5/pkg/config/config.go + +var defaultOrder = map[string]int{ + section.StandardType: 0, + section.DefaultType: 1, + section.CustomType: 2, + section.BlankType: 3, + section.DotType: 4, + section.AliasType: 5, + section.LocalModuleType: 6, +} + +type YamlConfig struct { + origin gcicfg.YamlConfig +} + +//nolint:gocritic // code borrowed from gci and modified to fix LocalModule section behavior. +func (g YamlConfig) Parse() (*gcicfg.Config, error) { + var err error + + sections, err := section.Parse(g.origin.SectionStrings) + if err != nil { + return nil, err + } + + if sections == nil { + sections = section.DefaultSections() + } + + // if default order sorted sections + if !g.origin.Cfg.CustomOrder { + sort.Slice(sections, func(i, j int) bool { + sectionI, sectionJ := sections[i].Type(), sections[j].Type() + + if g.origin.Cfg.NoLexOrder || strings.Compare(sectionI, sectionJ) != 0 { + return defaultOrder[sectionI] < defaultOrder[sectionJ] + } + + return strings.Compare(sections[i].String(), sections[j].String()) < 0 + }) + } + + sectionSeparators, err := section.Parse(g.origin.SectionSeparatorStrings) + if err != nil { + return nil, err + } + if sectionSeparators == nil { + sectionSeparators = section.DefaultSectionSeparators() + } + + return &gcicfg.Config{BoolConfig: g.origin.Cfg, Sections: sections, SectionSeparators: sectionSeparators}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go index 6826b77b6b..be6121c46b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go @@ -14,6 +14,7 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter { if settings != nil { cfg = &types.Config{ +<<<<<<< HEAD SuppressLen: settings.SuppressLenAssertion, SuppressNil: settings.SuppressNilAssertion, SuppressErr: settings.SuppressErrAssertion, @@ -26,6 +27,20 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter { ValidateAsyncIntervals: settings.ValidateAsyncIntervals, ForbidSpecPollution: settings.ForbidSpecPollution, ForceSucceedForFuncs: settings.ForceSucceedForFuncs, +======= + SuppressLen: types.Boolean(settings.SuppressLenAssertion), + SuppressNil: types.Boolean(settings.SuppressNilAssertion), + SuppressErr: types.Boolean(settings.SuppressErrAssertion), + SuppressCompare: types.Boolean(settings.SuppressCompareAssertion), + SuppressAsync: types.Boolean(settings.SuppressAsyncAssertion), + ForbidFocus: types.Boolean(settings.ForbidFocusContainer), + SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning), + AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero), + ForceExpectTo: types.Boolean(settings.ForceExpectTo), + ValidateAsyncIntervals: types.Boolean(settings.ValidateAsyncIntervals), + ForbidSpecPollution: types.Boolean(settings.ForbidSpecPollution), + ForceSucceedForFuncs: types.Boolean(settings.ForceSucceedForFuncs), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go index 510a06c91d..9555bfdc29 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go @@ -1,6 +1,7 @@ package gochecknoinits import ( +<<<<<<< HEAD "go/ast" "golang.org/x/tools/go/analysis" @@ -9,16 +10,56 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" +======= + "fmt" + "go/ast" + "go/token" + "sync" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "gochecknoinits" func New() *goanalysis.Linter { +<<<<<<< HEAD analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: run, Requires: []*analysis.Analyzer{inspect.Analyzer}, +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + var res []goanalysis.Issue + for _, file := range pass.Files { + fileIssues := checkFileForInits(file, pass.Fset) + for i := range fileIssues { + res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) + } + } + if len(res) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, res...) + mu.Unlock() + + return nil, nil + }, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return goanalysis.NewLinter( @@ -26,6 +67,7 @@ func New() *goanalysis.Linter { "Checks that no init functions are present in Go code", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -43,13 +85,38 @@ func run(pass *analysis.Pass) (any, error) { funcDecl, ok := decl.(*ast.FuncDecl) if !ok { return +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func checkFileForInits(f *ast.File, fset *token.FileSet) []result.Issue { + var res []result.Issue + for _, decl := range f.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } fnName := funcDecl.Name.Name if fnName == "init" && funcDecl.Recv.NumFields() == 0 { +<<<<<<< HEAD pass.Reportf(funcDecl.Pos(), "don't use %s function", internal.FormatCode(fnName, nil)) } }) return nil, nil +======= + res = append(res, result.Issue{ + Pos: fset.Position(funcDecl.Pos()), + Text: fmt.Sprintf("don't use %s function", internal.FormatCode(fnName, nil)), + FromLinter: linterName, + }) + } + } + + return res +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go index cbc5873126..2f88bb7e9d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go @@ -61,6 +61,7 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti TypesInfo: pass.TypesInfo, } +<<<<<<< HEAD cfg := gochecksumtype.Config{ DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive, IncludeSharedInterfaces: settings.IncludeSharedInterfaces, @@ -68,6 +69,11 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti var unknownError error errors := gochecksumtype.Run([]*packages.Package{pkg}, cfg) +======= + var unknownError error + errors := gochecksumtype.Run([]*packages.Package{pkg}, + gochecksumtype.Config{DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, err := range errors { err, ok := err.(gochecksumtype.Error) if !ok { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go index 087ddc1df0..ad996a3b7d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go @@ -5,6 +5,10 @@ import ( "fmt" "go/ast" "go/types" +<<<<<<< HEAD +======= + "path/filepath" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "reflect" "runtime" "slices" @@ -22,6 +26,10 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "gocritic" @@ -32,6 +40,12 @@ var ( ) func New(settings *config.GoCriticSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) wrapper := &goCriticWrapper{ sizes: types.SizesFor("gc", runtime.GOARCH), } @@ -40,11 +54,26 @@ func New(settings *config.GoCriticSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := wrapper.run(pass) +======= + issues, err := wrapper.run(pass) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -62,6 +91,12 @@ Dynamic rules are written declaratively with AST patterns, filters, report messa wrapper.init(context.Log, settings) }). +<<<<<<< HEAD +======= + WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -95,9 +130,15 @@ func (w *goCriticWrapper) init(logger logutils.Log, settings *config.GoCriticSet w.settingsWrapper = settingsWrapper } +<<<<<<< HEAD func (w *goCriticWrapper) run(pass *analysis.Pass) error { if w.settingsWrapper == nil { return errors.New("the settings wrapper is nil") +======= +func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { + if w.settingsWrapper == nil { + return nil, errors.New("the settings wrapper is nil") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes) @@ -106,14 +147,29 @@ func (w *goCriticWrapper) run(pass *analysis.Pass) error { enabledCheckers, err := w.buildEnabledCheckers(linterCtx) if err != nil { +<<<<<<< HEAD return err +======= + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) +<<<<<<< HEAD runOnPackage(pass, enabledCheckers, pass.Files) return nil +======= + pkgIssues := runOnPackage(linterCtx, enabledCheckers, pass.Files) + + issues := make([]goanalysis.Issue, 0, len(pkgIssues)) + for i := range pkgIssues { + issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass)) + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { @@ -193,6 +249,7 @@ func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any { } } +<<<<<<< HEAD func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files []*ast.File) { for _, f := range files { runOnFile(pass, f, checks) @@ -200,10 +257,28 @@ func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files [ } func runOnFile(pass *analysis.Pass, f *ast.File, checks []*gocriticlinter.Checker) { +======= +func runOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue { + var res []result.Issue + for _, f := range files { + filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) + linterCtx.SetFileInfo(filename, f) + + issues := runOnFile(linterCtx, f, checks) + res = append(res, issues...) + } + return res +} + +func runOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue { + var res []result.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, c := range checks { // All checkers are expected to use *lint.Context // as read-only structure, so no copying is required. for _, warn := range c.Check(f) { +<<<<<<< HEAD diag := analysis.Diagnostic{ Pos: warn.Pos, Category: c.Info.Name, @@ -223,6 +298,30 @@ func runOnFile(pass *analysis.Pass, f *ast.File, checks []*gocriticlinter.Checke pass.Report(diag) } } +======= + pos := linterCtx.FileSet.Position(warn.Pos) + issue := result.Issue{ + Pos: pos, + Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), + FromLinter: linterName, + } + + if warn.HasQuickFix() { + issue.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: pos.Column - 1, + Length: int(warn.Suggestion.To - warn.Suggestion.From), + NewString: string(warn.Suggestion.Replacement), + }, + } + } + + res = append(res, issue) + } + } + + return res +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type goCriticChecks[T any] map[string]T diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go index 3194b3d3ac..25bb92b1f3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go @@ -1,18 +1,33 @@ package godot import ( +<<<<<<< HEAD "cmp" +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/tetafro/godot" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "godot" func New(settings *config.GodotSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var dotSettings godot.Settings if settings != nil { @@ -24,22 +39,47 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { } // Convert deprecated setting +<<<<<<< HEAD if settings.CheckAll != nil && *settings.CheckAll { +======= + if settings.CheckAll { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dotSettings.Scope = godot.AllScope } } +<<<<<<< HEAD dotSettings.Scope = cmp.Or(dotSettings.Scope, godot.DeclScope) +======= + if dotSettings.Scope == "" { + dotSettings.Scope = godot.DeclScope + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runGodot(pass, dotSettings) +======= + issues, err := runGodot(pass, dotSettings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -49,6 +89,7 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { "Check if comments end in a period", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -85,4 +126,40 @@ func runGodot(pass *analysis.Pass, settings godot.Settings) error { } return nil +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) { + var lintIssues []godot.Issue + for _, file := range pass.Files { + iss, err := godot.Run(file, pass.Fset, settings) + if err != nil { + return nil, err + } + lintIssues = append(lintIssues, iss...) + } + + if len(lintIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issue := result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: linterName, + Replacement: &result.Replacement{ + NewLines: []string{i.Replacement}, + }, + } + + issues[k] = goanalysis.NewIssue(&issue, pass) + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go index 181e0a73ab..10f5dc845c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go @@ -3,22 +3,49 @@ package godox import ( "go/token" "strings" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/matoous/godox" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "godox" func New(settings *config.GodoxSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD runGodox(pass, settings) +======= + issues := runGodox(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, @@ -29,6 +56,7 @@ func New(settings *config.GodoxSettings) *goanalysis.Linter { "Tool for detection of FIXME, TODO and other comment keywords", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -55,4 +83,35 @@ func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) { }) } } +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue { + var messages []godox.Message + for _, file := range pass.Files { + messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...) + } + + if len(messages) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(messages)) + + for k, i := range messages { + issues[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: linterName, + }, pass) + } + + return issues +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go index b6531d5314..1d993f5cc6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go @@ -2,6 +2,10 @@ package gofmt import ( "fmt" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gofmtAPI "github.com/golangci/gofmt/gofmt" "golang.org/x/tools/go/analysis" @@ -15,6 +19,12 @@ import ( const linterName = "gofmt" func New(settings *config.GoFmtSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -23,27 +33,58 @@ func New(settings *config.GoFmtSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, +<<<<<<< HEAD "Checks if the code is formatted according to 'gofmt' command.", +======= + "Gofmt checks whether code was gofmt-ed. By default "+ + "this tool runs with -s option to check for code simplification", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runGofmt(lintCtx, pass, settings) +======= + issues, err := runGofmt(lintCtx, pass, settings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) } func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) error { +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) { + fileNames := internal.GetFileNames(pass) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var rewriteRules []gofmtAPI.RewriteRule for _, rule := range settings.RewriteRules { rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule)) } +<<<<<<< HEAD for _, file := range pass.Files { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -53,11 +94,20 @@ func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoF diff, err := gofmtAPI.RunRewrite(position.Filename, settings.Simplify, rewriteRules) if err != nil { // TODO: skip return err +======= + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules) + if err != nil { // TODO: skip + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if diff == nil { continue } +<<<<<<< HEAD err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx) if err != nil { return fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) @@ -65,4 +115,29 @@ func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoF } return nil +======= + is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoFmt) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} + +func getIssuedTextGoFmt(settings *config.LintersSettings) string { + text := "File is not `gofmt`-ed" + if settings.Gofmt.Simplify { + text += " with `-s`" + } + for _, rule := range settings.Gofmt.RewriteRules { + text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement) + } + + return text +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go index 7a11a9074d..a148d00bff 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go @@ -6,6 +6,10 @@ import ( "io" "os" "strings" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" @@ -24,6 +28,12 @@ type differ interface { } func New(settings *config.GofumptSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff := difflib.New() var options format.Options @@ -44,16 +54,25 @@ func New(settings *config.GofumptSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, +<<<<<<< HEAD "Checks if code and import statements are formatted, with additional rules.", +======= + "Gofumpt checks whether code was gofumpt-ed.", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runGofumpt(lintCtx, pass, diff, options) +======= + issues, err := runGofumpt(lintCtx, pass, diff, options) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -69,10 +88,37 @@ func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, optio input, err := os.ReadFile(position.Filename) if err != nil { return fmt.Errorf("unable to open file %s: %w", position.Filename, err) +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) { + fileNames := internal.GetFileNames(pass) + + var issues []goanalysis.Issue + + for _, f := range fileNames { + input, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to open file %s: %w", f, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } output, err := format.Source(input, options) if err != nil { +<<<<<<< HEAD return fmt.Errorf("error while running gofumpt: %w", err) } @@ -89,11 +135,36 @@ func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, optio err = internal.ExtractDiagnosticFromPatch(pass, file, diff, lintCtx) if err != nil { return fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) +======= + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + if !bytes.Equal(input, output) { + out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) + + err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + diff := out.String() + is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGoFumpt) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } +<<<<<<< HEAD return nil +======= + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func getLangVersion(settings *config.GofumptSettings) string { @@ -104,3 +175,16 @@ func getLangVersion(settings *config.GofumptSettings) string { return "go" + strings.TrimPrefix(settings.LangVersion, "go") } +<<<<<<< HEAD +======= + +func getIssuedTextGoFumpt(settings *config.LintersSettings) string { + text := "File is not `gofumpt`-ed" + + if settings.Gofumpt.ExtraRules { + text += " with `-extra`" + } + + return text +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go index 5043381143..330207a9e2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go @@ -2,18 +2,33 @@ package goheader import ( "go/token" +<<<<<<< HEAD "strings" +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goheader "github.com/denis-tingaikin/go-header" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "goheader" func New(settings *config.GoHeaderSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) conf := &goheader.Configuration{} if settings != nil { conf = &goheader.Configuration{ @@ -27,11 +42,26 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runGoHeader(pass, conf) +======= + issues, err := runGoHeader(pass, conf) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -41,6 +71,7 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { "Checks if file header matches to pattern", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -48,20 +79,40 @@ func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) error { if conf.TemplatePath == "" && conf.Template == "" { // User did not pass template, so then do not run go-header linter return nil +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) { + if conf.TemplatePath == "" && conf.Template == "" { + // User did not pass template, so then do not run go-header linter + return nil, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } template, err := conf.GetTemplate() if err != nil { +<<<<<<< HEAD return err +======= + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } values, err := conf.GetValues() if err != nil { +<<<<<<< HEAD return err +======= + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) +<<<<<<< HEAD for _, file := range pass.Files { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -128,4 +179,41 @@ func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) error { } return nil +======= + var issues []goanalysis.Issue + for _, file := range pass.Files { + path := pass.Fset.Position(file.Pos()).Filename + + i := a.Analyze(&goheader.Target{File: file, Path: path}) + + if i == nil { + continue + } + + issue := result.Issue{ + Pos: token.Position{ + Line: i.Location().Line + 1, + Column: i.Location().Position, + Filename: path, + }, + Text: i.Message(), + FromLinter: linterName, + } + + if fix := i.Fix(); fix != nil { + issue.LineRange = &result.Range{ + From: issue.Line(), + To: issue.Line() + len(fix.Actual) - 1, + } + issue.Replacement = &result.Replacement{ + NeedOnlyDelete: len(fix.Expected) == 0, + NewLines: fix.Expected, + } + } + + issues = append(issues, goanalysis.NewIssue(&issue, pass)) + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go index 6ddc9a75b1..433130430d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go @@ -2,6 +2,10 @@ package goimports import ( "fmt" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goimportsAPI "github.com/golangci/gofmt/goimports" "golang.org/x/tools/go/analysis" @@ -16,6 +20,12 @@ import ( const linterName = "goimports" func New(settings *config.GoImportsSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -24,18 +34,28 @@ func New(settings *config.GoImportsSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, +<<<<<<< HEAD "Checks if the code and import statements are formatted according to the 'goimports' command.", +======= + "Check import statements are formatted according to the 'goimport' command. "+ + "Reformat imports in autofix mode.", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { imports.LocalPrefix = settings.LocalPrefixes analyzer.Run = func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runGoImports(lintCtx, pass) +======= + issues, err := runGoImports(lintCtx, pass) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -51,11 +71,38 @@ func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) error { diff, err := goimportsAPI.Run(position.Filename) if err != nil { // TODO: skip return err +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { + fileNames := internal.GetFileNames(pass) + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := goimportsAPI.Run(f) + if err != nil { // TODO: skip + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if diff == nil { continue } +<<<<<<< HEAD err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx) if err != nil { return fmt.Errorf("can't extract issues from goimports diff output %q: %w", string(diff), err) @@ -63,4 +110,27 @@ func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) error { } return nil +======= + is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoImports) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} + +func getIssuedTextGoImports(settings *config.LintersSettings) string { + text := "File is not `goimports`-ed" + + if settings.Goimports.LocalPrefixes != "" { + text += " with -local " + settings.Goimports.LocalPrefixes + } + + return text +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go index f8f47ba2b4..9d09239750 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go @@ -1,7 +1,10 @@ package gomoddirectives import ( +<<<<<<< HEAD "regexp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "github.com/ldez/gomoddirectives" @@ -9,7 +12,10 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/internal" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) @@ -26,6 +32,7 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { opts.ReplaceAllowList = settings.ReplaceAllowList opts.RetractAllowNoExplanation = settings.RetractAllowNoExplanation opts.ExcludeForbidden = settings.ExcludeForbidden +<<<<<<< HEAD opts.ToolchainForbidden = settings.ToolchainForbidden opts.ToolForbidden = settings.ToolForbidden opts.GoDebugForbidden = settings.GoDebugForbidden @@ -47,6 +54,8 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { opts.GoVersionPattern = exp } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } analyzer := &analysis.Analyzer{ @@ -63,7 +72,11 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { once.Do(func() { +<<<<<<< HEAD results, err := gomoddirectives.AnalyzePass(pass, opts) +======= + results, err := gomoddirectives.Analyze(opts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { lintCtx.Log.Warnf("running %s failed: %s: "+ "if you are not using go modules it is suggested to disable this linter", linterName, err) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go index 8bddebc162..1194a74b85 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go @@ -73,7 +73,11 @@ func New(settings *config.GoModGuardSettings) *goanalysis.Linter { } analyzer.Run = func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD gomodguardIssues := processor.ProcessFiles(internal.GetGoFileNames(pass)) +======= + gomodguardIssues := processor.ProcessFiles(internal.GetFileNames(pass)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) mu.Lock() defer mu.Unlock() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go index 6b46beaccf..cc8fd26ed6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go @@ -184,6 +184,7 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { } for k, v := range globalOptionMap { +<<<<<<< HEAD option := gosec.GlobalOption(k) // Set nosec global option only if the value is true @@ -193,6 +194,9 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { } conf.SetGlobal(option, fmt.Sprintf("%v", v)) +======= + conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go index bf9b19f129..77f9ae57be 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go @@ -10,6 +10,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { a := gosmopolitan.NewAnalyzer() @@ -20,6 +21,18 @@ func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { "escapehatches": strings.Join(settings.EscapeHatches, ","), "lookattests": !settings.IgnoreTests, "watchforscripts": strings.Join(settings.WatchForScripts, ","), +======= +func New(s *config.GosmopolitanSettings) *goanalysis.Linter { + a := gosmopolitan.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if s != nil { + cfgMap[a.Name] = map[string]any{ + "allowtimelocal": s.AllowTimeLocal, + "escapehatches": strings.Join(s.EscapeHatches, ","), + "lookattests": !s.IgnoreTests, + "watchforscripts": strings.Join(s.WatchForScripts, ","), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -27,6 +40,10 @@ func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go index 73bf63af73..e6155e5049 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go @@ -40,7 +40,10 @@ import ( "golang.org/x/tools/go/analysis/passes/slog" "golang.org/x/tools/go/analysis/passes/sortslice" "golang.org/x/tools/go/analysis/passes/stdmethods" +<<<<<<< HEAD "golang.org/x/tools/go/analysis/passes/stdversion" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis/passes/stringintconv" "golang.org/x/tools/go/analysis/passes/structtag" "golang.org/x/tools/go/analysis/passes/testinggoroutine" @@ -51,7 +54,10 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/unusedwrite" +<<<<<<< HEAD "golang.org/x/tools/go/analysis/passes/waitgroup" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" @@ -91,7 +97,10 @@ var ( slog.Analyzer, sortslice.Analyzer, stdmethods.Analyzer, +<<<<<<< HEAD stdversion.Analyzer, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -102,10 +111,16 @@ var ( unsafeptr.Analyzer, unusedresult.Analyzer, unusedwrite.Analyzer, +<<<<<<< HEAD waitgroup.Analyzer, } // https://github.com/golang/go/blob/go1.23.0/src/cmd/vet/main.go#L55-L87 +======= + } + + // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defaultAnalyzers = []*analysis.Analyzer{ appends.Analyzer, asmdecl.Analyzer, @@ -130,7 +145,10 @@ var ( sigchanyzer.Analyzer, slog.Analyzer, stdmethods.Analyzer, +<<<<<<< HEAD stdversion.Analyzer, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -190,7 +208,11 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } // Keeping for backward compatibility. +<<<<<<< HEAD if cfg.CheckShadowing != nil && *cfg.CheckShadowing && name == shadow.Analyzer.Name { +======= + if cfg.CheckShadowing && name == shadow.Analyzer.Name { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go index e0a3f794a7..8e1e66ed47 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go @@ -11,9 +11,15 @@ import ( func New(settings *config.GrouperSettings) *goanalysis.Linter { a := grouper.New() +<<<<<<< HEAD cfg := map[string]map[string]any{} if settings != nil { cfg[a.Name] = map[string]any{ +======= + linterCfg := map[string]map[string]any{} + if settings != nil { + linterCfg[a.Name] = map[string]any{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "const-require-single-const": settings.ConstRequireSingleConst, "const-require-grouping": settings.ConstRequireGrouping, "import-require-single-import": settings.ImportRequireSingleImport, @@ -29,6 +35,10 @@ func New(settings *config.GrouperSettings) *goanalysis.Linter { a.Name, "Analyze expression groups.", []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + linterCfg, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go index b7c6c35aea..833fb2d337 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go @@ -51,11 +51,16 @@ func New(settings *config.ImportAsSettings) *goanalysis.Linter { uniqPackages[a.Pkg] = a } +<<<<<<< HEAD // Skips the duplication check when: // - the alias is empty. // - the alias is a regular expression replacement pattern (ie. contains `$`). v, ok := uniqAliases[a.Alias] if ok && a.Alias != "" && !strings.Contains(a.Alias, "$") { +======= + // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`). + if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg) } else { uniqAliases[a.Alias] = a diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go index 8e5e1f0e73..e72f7d4948 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go @@ -3,6 +3,7 @@ package internal import ( "bytes" "fmt" +<<<<<<< HEAD "go/ast" "go/token" "slices" @@ -19,6 +20,22 @@ import ( type Change struct { From, To int NewLines []string +======= + "go/token" + "strings" + + diffpkg "github.com/sourcegraph/go-diff/diff" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +type Change struct { + LineRange result.Range + Replacement result.Replacement +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type diffLineType string @@ -29,6 +46,11 @@ const ( diffLineDeleted diffLineType = "deleted" ) +<<<<<<< HEAD +======= +type fmtTextFormatter func(settings *config.LintersSettings) string + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type diffLine struct { originalNumber int // 1-based original line number typ diffLineType @@ -44,6 +66,7 @@ type hunkChangesParser struct { log logutils.Log +<<<<<<< HEAD changes []Change } @@ -86,6 +109,60 @@ func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { } func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, i *int) { +======= + lines []diffLine + + ret []Change +} + +func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { + lines := bytes.Split(h.Body, []byte{'\n'}) + currentOriginalLineNumber := int(h.OrigStartLine) + var ret []diffLine + + for i, line := range lines { + dl := diffLine{ + originalNumber: currentOriginalLineNumber, + } + + lineStr := string(line) + + if strings.HasPrefix(lineStr, "-") { + dl.typ = diffLineDeleted + dl.data = strings.TrimPrefix(lineStr, "-") + currentOriginalLineNumber++ + } else if strings.HasPrefix(lineStr, "+") { + dl.typ = diffLineAdded + dl.data = strings.TrimPrefix(lineStr, "+") + } else { + if i == len(lines)-1 && lineStr == "" { + // handle last \n: don't add an empty original line + break + } + + dl.typ = diffLineOriginal + dl.data = strings.TrimPrefix(lineStr, " ") + currentOriginalLineNumber++ + } + + ret = append(ret, dl) + } + + // if > 0, then the original file had a 'No newline at end of file' mark + if h.OrigNoNewlineAt > 0 { + dl := diffLine{ + originalNumber: currentOriginalLineNumber + 1, + typ: diffLineAdded, + data: "", + } + ret = append(ret, dl) + } + + p.lines = ret +} + +func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(p.replacementLinesToPrepend) == 0 { p.lastOriginalLine = &line *i++ @@ -99,6 +176,7 @@ func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, *i++ var followingAddedLines []string +<<<<<<< HEAD for ; *i < len(lines) && lines[*i].typ == diffLineAdded; *i++ { followingAddedLines = append(followingAddedLines, lines[*i].data) } @@ -110,12 +188,28 @@ func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, } p.changes = append(p.changes, change) +======= + for ; *i < len(p.lines) && p.lines[*i].typ == diffLineAdded; *i++ { + followingAddedLines = append(followingAddedLines, p.lines[*i].data) + } + + p.ret = append(p.ret, Change{ + LineRange: result.Range{ + From: line.originalNumber, + To: line.originalNumber, + }, + Replacement: result.Replacement{ + NewLines: append(p.replacementLinesToPrepend, append([]string{line.data}, followingAddedLines...)...), + }, + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.replacementLinesToPrepend = nil p.lastOriginalLine = &line } func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) { change := Change{ +<<<<<<< HEAD From: deletedLines[0].originalNumber, To: deletedLines[len(deletedLines)-1].originalNumber, } @@ -132,6 +226,34 @@ func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLin } p.changes = append(p.changes, change) +======= + LineRange: result.Range{ + From: deletedLines[0].originalNumber, + To: deletedLines[len(deletedLines)-1].originalNumber, + }, + } + + if len(addedLines) != 0 { + change.Replacement.NewLines = append([]string{}, p.replacementLinesToPrepend...) + change.Replacement.NewLines = append(change.Replacement.NewLines, addedLines...) + if len(p.replacementLinesToPrepend) != 0 { + p.replacementLinesToPrepend = nil + } + + p.ret = append(p.ret, change) + return + } + + // delete-only change with possible prepending + if len(p.replacementLinesToPrepend) != 0 { + change.Replacement.NewLines = p.replacementLinesToPrepend + p.replacementLinesToPrepend = nil + } else { + change.Replacement.NeedOnlyDelete = true + } + + p.ret = append(p.ret, change) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { @@ -144,11 +266,15 @@ func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { // 2. ... p.replacementLinesToPrepend = addedLines +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } // add-only change merged into the last original line with possible prepending +<<<<<<< HEAD change := Change{ From: p.lastOriginalLine.originalNumber, To: p.lastOriginalLine.originalNumber, @@ -230,6 +356,68 @@ func ExtractDiagnosticFromPatch( adjLine := pass.Fset.PositionFor(file.Pos(), false).Line - pass.Fset.PositionFor(file.Pos(), true).Line +======= + p.ret = append(p.ret, Change{ + LineRange: result.Range{ + From: p.lastOriginalLine.originalNumber, + To: p.lastOriginalLine.originalNumber, + }, + Replacement: result.Replacement{ + NewLines: append(p.replacementLinesToPrepend, append([]string{p.lastOriginalLine.data}, addedLines...)...), + }, + }) + p.replacementLinesToPrepend = nil +} + +func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { + p.parseDiffLines(h) + + for i := 0; i < len(p.lines); { + line := p.lines[i] + if line.typ == diffLineOriginal { + p.handleOriginalLine(line, &i) + continue + } + + var deletedLines []diffLine + for ; i < len(p.lines) && p.lines[i].typ == diffLineDeleted; i++ { + deletedLines = append(deletedLines, p.lines[i]) + } + + var addedLines []string + for ; i < len(p.lines) && p.lines[i].typ == diffLineAdded; i++ { + addedLines = append(addedLines, p.lines[i].data) + } + + if len(deletedLines) != 0 { + p.handleDeletedLines(deletedLines, addedLines) + continue + } + + // no deletions, only additions + p.handleAddedOnlyLines(addedLines) + } + + if len(p.replacementLinesToPrepend) != 0 { + p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", p.lines) + return nil + } + + return p.ret +} + +func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string, formatter fmtTextFormatter) ([]result.Issue, error) { + diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) + if err != nil { + return nil, fmt.Errorf("can't parse patch: %w", err) + } + + if len(diffs) == 0 { + return nil, fmt.Errorf("got no diffs from patch parser: %v", patch) + } + + var issues []result.Issue +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, d := range diffs { if len(d.Hunks) == 0 { lintCtx.Log.Warnf("Got no hunks in diff %+v", d) @@ -242,11 +430,29 @@ func ExtractDiagnosticFromPatch( changes := p.parse(hunk) for _, change := range changes { +<<<<<<< HEAD pass.Report(toDiagnostic(ft, change, adjLine)) +======= + i := result.Issue{ + FromLinter: linterName, + Pos: token.Position{ + Filename: d.NewName, + Line: change.LineRange.From, + }, + Text: formatter(lintCtx.Settings()), + Replacement: &change.Replacement, + } + if change.LineRange.From != change.LineRange.To { + i.LineRange = &change.LineRange + } + + issues = append(issues, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } +<<<<<<< HEAD return nil } @@ -272,4 +478,7 @@ func toDiagnostic(ft *token.File, change Change, adjLine int) analysis.Diagnosti }}, }}, } +======= + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go index 7525f2f2c5..943cf57181 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go @@ -2,12 +2,19 @@ package internal import ( "fmt" +<<<<<<< HEAD +======= + "path/filepath" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/goanalysis" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func FormatCode(code string, _ *config.Config) string { @@ -18,6 +25,7 @@ func FormatCode(code string, _ *config.Config) string { return fmt.Sprintf("`%s`", code) } +<<<<<<< HEAD func GetGoFileNames(pass *analysis.Pass) []string { var filenames []string @@ -31,4 +39,18 @@ func GetGoFileNames(pass *analysis.Pass) []string { } return filenames +======= +func GetFileNames(pass *analysis.Pass) []string { + var fileNames []string + for _, f := range pass.Files { + fileName := pass.Fset.PositionFor(f.Pos(), true).Filename + ext := filepath.Ext(fileName) + if ext != "" && ext != ".go" { + // position has been adjusted to a non-go file, revert to original file + fileName = pass.Fset.PositionFor(f.Pos(), false).Filename + } + fileNames = append(fileNames, fileName) + } + return fileNames +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go index bad3b0c4e2..6fa0d520fc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go @@ -4,15 +4,28 @@ import ( "bufio" "errors" "fmt" +<<<<<<< HEAD "go/ast" "os" "strings" +======= + "go/token" + "os" + "strings" + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "unicode/utf8" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/golinters/internal" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "lll" @@ -20,15 +33,36 @@ const linterName = "lll" const goCommentDirectivePrefix = "//go:" func New(settings *config.LllSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runLll(pass, settings) +======= + issues, err := runLll(pass, settings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -38,6 +72,7 @@ func New(settings *config.LllSettings) *goanalysis.Linter { "Reports long lines", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -71,6 +106,42 @@ func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, ta ft := pass.Fset.File(file.Pos()) +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) { + fileNames := internal.GetFileNames(pass) + + spaces := strings.Repeat(" ", settings.TabWidth) + + var issues []goanalysis.Issue + for _, f := range fileNames { + lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces) + if err != nil { + return nil, err + } + + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + } + + return issues, nil +} + +func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { + var res []result.Issue + + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("can't open file %s: %w", filename, err) + } + defer f.Close() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lineNumber := 0 multiImportEnabled := false @@ -100,15 +171,26 @@ func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, ta lineLen := utf8.RuneCountInString(line) if lineLen > maxLineLen { +<<<<<<< HEAD pass.Report(analysis.Diagnostic{ Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)), Message: fmt.Sprintf("The line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen), +======= + res = append(res, result.Issue{ + Pos: token.Position{ + Filename: filename, + Line: lineNumber, + }, + Text: fmt.Sprintf("the line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen), + FromLinter: linterName, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } } if err := scanner.Err(); err != nil { +<<<<<<< HEAD // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize // we can return this line as a long line instead of returning an error. @@ -130,4 +212,32 @@ func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, ta } return nil +======= + if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize { + // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize + // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize + // we can return this line as a long line instead of returning an error. + // The reason for this change is that this case might happen with autogenerated files + // The go-bindata tool for instance might generate a file with a very long line. + // In this case, as it's an auto generated file, the warning returned by lll will + // be ignored. + // But if we return a linter error here, and this error happens for an autogenerated + // file the error will be discarded (fine), but all the subsequent errors for lll will + // be discarded for other files, and we'll miss legit error. + res = append(res, result.Issue{ + Pos: token.Position{ + Filename: filename, + Line: lineNumber, + Column: 1, + }, + Text: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), + FromLinter: linterName, + }) + } else { + return nil, fmt.Errorf("can't scan file %s: %w", filename, err) + } + } + + return res, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go index 84c8d73635..96c4af7ee7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go @@ -22,9 +22,12 @@ func New(settings *config.LoggerCheckSettings) *goanalysis.Linter { if !settings.Logr { disable = append(disable, "logr") } +<<<<<<< HEAD if !settings.Slog { disable = append(disable, "slog") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !settings.Zap { disable = append(disable, "zap") } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go index 799c51c874..4dba674fb9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go @@ -8,6 +8,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.MaintIdxSettings) *goanalysis.Linter { analyzer := maintidx.Analyzer @@ -18,6 +19,18 @@ func New(settings *config.MaintIdxSettings) *goanalysis.Linter { if settings != nil { cfg[analyzer.Name] = map[string]any{ "under": settings.Under, +======= +func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { + analyzer := maintidx.Analyzer + + cfgMap := map[string]map[string]any{ + analyzer.Name: {"under": 20}, + } + + if cfg != nil { + cfgMap[analyzer.Name] = map[string]any{ + "under": cfg.Under, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -25,6 +38,10 @@ func New(settings *config.MaintIdxSettings) *goanalysis.Linter { analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go index b5ab4515e5..3cfd3fac89 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go @@ -2,26 +2,56 @@ package makezero import ( "fmt" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ashanbrown/makezero/makezero" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "makezero" func New(settings *config.MakezeroSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runMakeZero(pass, settings) +======= + issues, err := runMakeZero(pass, settings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -31,6 +61,7 @@ func New(settings *config.MakezeroSettings) *goanalysis.Linter { "Finds slice declarations with non-zero initial length", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -52,4 +83,32 @@ func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) error { } return nil +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) { + zero := makezero.NewLinter(settings.Always) + + var issues []goanalysis.Issue + + for _, file := range pass.Files { + hints, err := zero.Run(pass.Fset, pass.TypesInfo, file) + if err != nil { + return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) + } + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: linterName, + }, pass)) + } + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go index e15dfa3a5a..ad57aa6a49 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go @@ -1,10 +1,16 @@ package mirror import ( +<<<<<<< HEAD +======= + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/butuzov/mirror" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD ) func New() *goanalysis.Linter { @@ -27,4 +33,66 @@ func New() *goanalysis.Linter { []*analysis.Analyzer{a}, linterCfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func New() *goanalysis.Linter { + var ( + mu sync.Mutex + issues []goanalysis.Issue + ) + + a := mirror.NewAnalyzer() + a.Run = func(pass *analysis.Pass) (any, error) { + // mirror only lints test files if the `--with-tests` flag is passed, + // so we pass the `with-tests` flag as true to the analyzer before running it. + // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` + // or can be disabled per linter via exclude rules. + // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) + violations := mirror.Run(pass, true) + + if len(violations) == 0 { + return nil, nil + } + + for index := range violations { + i := violations[index].Issue(pass.Fset) + + issue := result.Issue{ + FromLinter: a.Name, + Text: i.Message, + Pos: i.Start, + } + + if i.InlineFix != "" { + issue.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: i.Start.Column - 1, + Length: len(i.Original), + NewString: i.InlineFix, + }, + } + } + + mu.Lock() + issues = append(issues, goanalysis.NewIssue(&issue, pass)) + mu.Unlock() + } + + return nil, nil + } + + analyzer := goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) + + return analyzer +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go index 3ace5fddb9..119baea2f6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go @@ -2,9 +2,15 @@ package misspell import ( "fmt" +<<<<<<< HEAD "go/ast" "go/token" "strings" +======= + "go/token" + "strings" + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "unicode" "github.com/golangci/misspell" @@ -12,12 +18,24 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD + "github.com/golangci/golangci-lint/pkg/lint/linter" +======= + "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "misspell" func New(settings *config.MisspellSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -37,11 +55,16 @@ func New(settings *config.MisspellSettings) *goanalysis.Linter { return nil, ruleErr } +<<<<<<< HEAD err := runMisspell(lintCtx, pass, replacer, settings.Mode) +======= + issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -56,6 +79,39 @@ func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspel } return nil +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) { + fileNames := internal.GetFileNames(pass) + + var issues []goanalysis.Issue + for _, filename := range fileNames { + lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode) + if err != nil { + return nil, err + } + + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) { @@ -90,6 +146,7 @@ func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replac return replacer, nil } +<<<<<<< HEAD func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.File, replacer *misspell.Replacer, mode string) error { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -99,6 +156,12 @@ func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.F fileContent, err := lintCtx.FileCache.GetFileBytes(position.Filename) if err != nil { return fmt.Errorf("can't get file %s contents: %w", position.Filename, err) +======= +func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) { + fileContent, err := lintCtx.FileCache.GetFileBytes(filename) + if err != nil { + return nil, fmt.Errorf("can't get file %s contents: %w", filename, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments. @@ -112,6 +175,7 @@ func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.F replace = replacer.Replace } +<<<<<<< HEAD f := pass.Fset.File(file.Pos()) _, diffs := replace(string(fileContent)) @@ -137,6 +201,38 @@ func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.F } return nil +======= + _, diffs := replace(string(fileContent)) + + var res []result.Issue + + for _, diff := range diffs { + text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) + + pos := token.Position{ + Filename: filename, + Line: diff.Line, + Column: diff.Column + 1, + } + + replacement := &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: diff.Column, + Length: len(diff.Original), + NewString: diff.Corrected, + }, + } + + res = append(res, result.Issue{ + Pos: pos, + Text: text, + FromLinter: linterName, + Replacement: replacement, + }) + } + + return res, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func appendExtraWords(replacer *misspell.Replacer, extraWords []config.MisspellExtraWords) error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go index a4e9ceff28..9873d7e850 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go @@ -8,11 +8,19 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.MustTagSettings) *goanalysis.Linter { var funcs []musttag.Func if settings != nil { for _, fn := range settings.Functions { +======= +func New(setting *config.MustTagSettings) *goanalysis.Linter { + var funcs []musttag.Func + + if setting != nil { + for _, fn := range setting.Functions { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) funcs = append(funcs, musttag.Func{ Name: fn.Name, Tag: fn.Tag, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go index b72538fd16..14147fc2d3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go @@ -1,21 +1,51 @@ package nestif import ( +<<<<<<< HEAD +======= + "sort" + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/nakabonne/nestif" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "nestif" func New(settings *config.NestifSettings) *goanalysis.Linter { +<<<<<<< HEAD analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { runNestIf(pass, settings) +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runNestIf(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, @@ -26,14 +56,24 @@ func New(settings *config.NestifSettings) *goanalysis.Linter { "Reports deeply nested if statements", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) { +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) checker := &nestif.Checker{ MinComplexity: settings.MinComplexity, } +<<<<<<< HEAD for _, file := range pass.Files { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -56,4 +96,29 @@ func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) { }) } } +======= + var lintIssues []nestif.Issue + for _, f := range pass.Files { + lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...) + } + + if len(lintIssues) == 0 { + return nil + } + + sort.SliceStable(lintIssues, func(i, j int) bool { + return lintIssues[i].Complexity > lintIssues[j].Complexity + }) + + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: linterName, + }, pass)) + } + + return issues +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go index ed25dec71f..ea1022e12a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go @@ -11,6 +11,7 @@ import ( func New(settings *config.NilNilSettings) *goanalysis.Linter { a := analyzer.New() +<<<<<<< HEAD cfg := make(map[string]map[string]any) if settings != nil { cfg[a.Name] = map[string]any{ @@ -18,6 +19,15 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter { } if len(settings.CheckedTypes) != 0 { cfg[a.Name]["checked-types"] = settings.CheckedTypes +======= + cfgMap := make(map[string]map[string]any) + if settings != nil { + cfgMap[a.Name] = map[string]any{ + "detect-opposite": settings.DetectOpposite, + } + if len(settings.CheckedTypes) != 0 { + cfgMap[a.Name]["checked-types"] = settings.CheckedTypes +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -25,7 +35,11 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ). WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go index 21cd20124f..55b99953e6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go @@ -2,6 +2,7 @@ package internal import ( +<<<<<<< HEAD "go/token" "regexp" "strings" @@ -13,6 +14,125 @@ import ( ) const LinterName = "nolintlint" +======= + "fmt" + "go/ast" + "go/token" + "regexp" + "strings" + "unicode" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type BaseIssue struct { + fullDirective string + directiveWithOptionalLeadingSpace string + position token.Position + replacement *result.Replacement +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (b BaseIssue) Position() token.Position { + return b.position +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (b BaseIssue) Replacement() *result.Replacement { + return b.replacement +} + +type ExtraLeadingSpace struct { + BaseIssue +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (i ExtraLeadingSpace) Details() string { + return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) +} + +func (i ExtraLeadingSpace) String() string { return toString(i) } + +type NotMachine struct { + BaseIssue +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (i NotMachine) Details() string { + expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) + return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", + i.fullDirective, expected) +} + +func (i NotMachine) String() string { return toString(i) } + +type NotSpecific struct { + BaseIssue +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (i NotSpecific) Details() string { + return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", + i.fullDirective, i.directiveWithOptionalLeadingSpace) +} + +func (i NotSpecific) String() string { return toString(i) } + +type ParseError struct { + BaseIssue +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (i ParseError) Details() string { + return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", + i.fullDirective, + i.directiveWithOptionalLeadingSpace) +} + +func (i ParseError) String() string { return toString(i) } + +type NoExplanation struct { + BaseIssue + fullDirectiveWithoutExplanation string +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (i NoExplanation) Details() string { + return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", + i.fullDirective, i.fullDirectiveWithoutExplanation) +} + +func (i NoExplanation) String() string { return toString(i) } + +type UnusedCandidate struct { + BaseIssue + ExpectedLinter string +} + +//nolint:gocritic // TODO(ldez) must be change in the future. +func (i UnusedCandidate) Details() string { + details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) + if i.ExpectedLinter != "" { + details += fmt.Sprintf(" for linter %q", i.ExpectedLinter) + } + return details +} + +func (i UnusedCandidate) String() string { return toString(i) } + +func toString(issue Issue) string { + return fmt.Sprintf("%s at %s", issue.Details(), issue.Position()) +} + +type Issue interface { + Details() string + Position() token.Position + String() string + Replacement() *result.Replacement +} + +type Needs uint +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const ( NeedsMachineOnly Needs = 1 << iota @@ -22,10 +142,13 @@ const ( NeedsAll = NeedsMachineOnly | NeedsSpecific | NeedsExplanation ) +<<<<<<< HEAD type Needs uint const commentMark = "//" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var commentPattern = regexp.MustCompile(`^//\s*(nolint)(:\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*)?\b`) // matches a complete nolint directive @@ -55,10 +178,22 @@ var ( ) //nolint:funlen,gocyclo // the function is going to be refactored in the future +<<<<<<< HEAD func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { var issues []goanalysis.Issue for _, file := range pass.Files { +======= +func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { + var issues []Issue + + for _, node := range nodes { + file, ok := node.(*ast.File) + if !ok { + continue + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, c := range file.Comments { for _, comment := range c.List { if !commentPattern.MatchString(comment.Text) { @@ -73,11 +208,16 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { leadingSpace = leadingSpaceMatches[1] } +<<<<<<< HEAD directiveWithOptionalLeadingSpace := commentMark +======= + directiveWithOptionalLeadingSpace := "//" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if leadingSpace != "" { directiveWithOptionalLeadingSpace += " " } +<<<<<<< HEAD split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], commentMark) directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) @@ -112,11 +252,44 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) +======= + split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") + directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) + + pos := fset.Position(comment.Pos()) + end := fset.Position(comment.End()) + + base := BaseIssue{ + fullDirective: comment.Text, + directiveWithOptionalLeadingSpace: directiveWithOptionalLeadingSpace, + position: pos, + } + + // check for, report and eliminate leading spaces, so we can check for other issues + if leadingSpace != "" { + removeWhitespace := &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: pos.Column + 1, + Length: len(leadingSpace), + NewString: "", + }, + } + if (l.needs & NeedsMachineOnly) != 0 { + issue := NotMachine{BaseIssue: base} + issue.BaseIssue.replacement = removeWhitespace + issues = append(issues, issue) + } else if len(leadingSpace) > 1 { + issue := ExtraLeadingSpace{BaseIssue: base} + issue.BaseIssue.replacement = removeWhitespace + issue.BaseIssue.replacement.Inline.NewString = " " // assume a single space was intended + issues = append(issues, issue) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } fullMatches := fullDirectivePattern.FindStringSubmatch(comment.Text) if len(fullMatches) == 0 { +<<<<<<< HEAD issue := &result.Issue{ FromLinter: LinterName, Text: formatParseError(comment.Text, directiveWithOptionalLeadingSpace), @@ -125,6 +298,9 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { issues = append(issues, goanalysis.NewIssue(issue, pass)) +======= + issues = append(issues, ParseError{BaseIssue: base}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -134,7 +310,11 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { if lintersText != "" && !strings.HasPrefix(lintersText, "all") { lls := strings.Split(lintersText, ",") linters = make([]string, 0, len(lls)) +<<<<<<< HEAD rangeStart := (pos.Column - 1) + len(commentMark) + len(leadingSpace) + len("nolint:") +======= + rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, ll := range lls { rangeEnd := rangeStart + len(ll) if i < len(lls)-1 { @@ -150,6 +330,7 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { if (l.needs & NeedsSpecific) != 0 { if len(linters) == 0 { +<<<<<<< HEAD issue := &result.Issue{ FromLinter: LinterName, Text: formatNotSpecific(comment.Text, directiveWithOptionalLeadingSpace), @@ -157,11 +338,15 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) +======= + issues = append(issues, NotSpecific{BaseIssue: base}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } // when detecting unused directives, we send all the directives through and filter them out in the nolint processor if (l.needs & NeedsUnused) != 0 { +<<<<<<< HEAD removeNolintCompletely := []analysis.SuggestedFix{{ TextEdits: []analysis.TextEdit{{ Pos: token.Pos(pos.Offset), @@ -198,11 +383,46 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) +======= + removeNolintCompletely := &result.Replacement{} + + startCol := pos.Column - 1 + + if startCol == 0 { + // if the directive starts from a new line, remove the line + removeNolintCompletely.NeedOnlyDelete = true + } else { + removeNolintCompletely.Inline = &result.InlineFix{ + StartCol: startCol, + Length: end.Column - pos.Column, + NewString: "", + } + } + + if len(linters) == 0 { + issue := UnusedCandidate{BaseIssue: base} + issue.replacement = removeNolintCompletely + issues = append(issues, issue) + } else { + for _, linter := range linters { + issue := UnusedCandidate{BaseIssue: base, ExpectedLinter: linter} + // only offer replacement if there is a single linter + // because of issues around commas and the possibility of all + // linters being removed + if len(linters) == 1 { + issue.replacement = removeNolintCompletely + } + issues = append(issues, issue) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } +<<<<<<< HEAD if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == commentMark) { +======= + if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation // otherwise, check if we are excluding all the mentioned linters for _, ll := range linters { @@ -214,6 +434,7 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { if needsExplanation { fullDirectiveWithoutExplanation := trailingBlankExplanation.ReplaceAllString(comment.Text, "") +<<<<<<< HEAD issue := &result.Issue{ FromLinter: LinterName, @@ -222,6 +443,12 @@ func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) +======= + issues = append(issues, NoExplanation{ + BaseIssue: base, + fullDirectiveWithoutExplanation: fullDirectiveWithoutExplanation, + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go index e1c878628d..d92c25d491 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -2,10 +2,15 @@ package nolintlint import ( "fmt" +<<<<<<< HEAD +======= + "go/ast" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "golang.org/x/tools/go/analysis" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/config" @@ -15,11 +20,22 @@ import ( ) const LinterName = nolintlint.LinterName +======= + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const LinterName = "nolintlint" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func New(settings *config.NoLintLintSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue +<<<<<<< HEAD var needs nolintlint.Needs if settings.RequireExplanation { needs |= nolintlint.NeedsExplanation @@ -43,6 +59,15 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { issues, err := lnt.Run(pass) if err != nil { return nil, fmt.Errorf("linter failed to run: %w", err) +======= + analyzer := &analysis.Analyzer{ + Name: LinterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runNoLintLint(pass, settings) + if err != nil { + return nil, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if len(issues) == 0 { @@ -58,7 +83,11 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { } return goanalysis.NewLinter( +<<<<<<< HEAD nolintlint.LinterName, +======= + LinterName, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "Reports ill-formed or insufficient nolint directives", []*analysis.Analyzer{analyzer}, nil, @@ -66,3 +95,58 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } +<<<<<<< HEAD +======= + +func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) { + var needs internal.Needs + if settings.RequireExplanation { + needs |= internal.NeedsExplanation + } + if settings.RequireSpecific { + needs |= internal.NeedsSpecific + } + if !settings.AllowUnused { + needs |= internal.NeedsUnused + } + + lnt, err := internal.NewLinter(needs, settings.AllowNoExplanation) + if err != nil { + return nil, err + } + + nodes := make([]ast.Node, 0, len(pass.Files)) + for _, n := range pass.Files { + nodes = append(nodes, n) + } + + lintIssues, err := lnt.Run(pass.Fset, nodes...) + if err != nil { + return nil, fmt.Errorf("linter failed to run: %w", err) + } + + var issues []goanalysis.Issue + + for _, i := range lintIssues { + expectNoLint := false + var expectedNolintLinter string + if ii, ok := i.(internal.UnusedCandidate); ok { + expectedNolintLinter = ii.ExpectedLinter + expectNoLint = true + } + + issue := &result.Issue{ + FromLinter: LinterName, + Text: i.Details(), + Pos: i.Position(), + ExpectNoLint: expectNoLint, + ExpectedNoLintLinter: expectedNolintLinter, + Replacement: i.Replacement(), + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) + } + + return issues, nil +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go index 17e86c98ee..4f1d16cbd6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go @@ -2,6 +2,10 @@ package prealloc import ( "fmt" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/alexkohler/prealloc/pkg" "golang.org/x/tools/go/analysis" @@ -9,16 +13,39 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "prealloc" func New(settings *config.PreallocSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD runPreAlloc(pass, settings) +======= + issues := runPreAlloc(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, @@ -29,6 +56,7 @@ func New(settings *config.PreallocSettings) *goanalysis.Linter { "Finds slice declarations that could potentially be pre-allocated", []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -41,4 +69,25 @@ func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) { Message: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), }) } +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue { + var issues []goanalysis.Issue + + hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops) + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(hint.Pos), + Text: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), + FromLinter: linterName, + }, pass)) + } + + return issues +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go index 6c65f86bcf..72532e46bc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go @@ -1,14 +1,30 @@ package protogetter import ( +<<<<<<< HEAD +======= + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ghostiam/protogetter" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD +) + +func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" ) func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var cfg protogetter.Config if settings != nil { cfg = protogetter.Config{ @@ -18,15 +34,61 @@ func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend, } } +<<<<<<< HEAD cfg.Mode = protogetter.StandaloneMode a := protogetter.NewAnalyzer(&cfg) +======= + cfg.Mode = protogetter.GolangciLintMode + + a := protogetter.NewAnalyzer(&cfg) + a.Run = func(pass *analysis.Pass) (any, error) { + pgIssues, err := protogetter.Run(pass, &cfg) + if err != nil { + return nil, err + } + + issues := make([]goanalysis.Issue, len(pgIssues)) + for i, issue := range pgIssues { + report := &result.Issue{ + FromLinter: a.Name, + Pos: issue.Pos, + Text: issue.Message, + Replacement: &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: issue.InlineFix.StartCol, + Length: issue.InlineFix.Length, + NewString: issue.InlineFix.NewString, + }, + }, + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go index 3af4885b40..5ad82008a6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go @@ -4,6 +4,7 @@ import ( "github.com/raeperd/recvcheck" "golang.org/x/tools/go/analysis" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" ) @@ -17,6 +18,13 @@ func New(settings *config.RecvcheckSettings) *goanalysis.Linter { } a := recvcheck.NewAnalyzer(cfg) +======= + "github.com/golangci/golangci-lint/pkg/goanalysis" +) + +func New() *goanalysis.Linter { + a := recvcheck.Analyzer +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return goanalysis.NewLinter( a.Name, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go index ec621ccfba..49ef8b63ca 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go @@ -2,7 +2,10 @@ package revive import ( "bytes" +<<<<<<< HEAD "cmp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "encoding/json" "fmt" "go/token" @@ -115,7 +118,11 @@ func newWrapper(settings *config.ReviveSettings) (*wrapper, error) { } func (w *wrapper) run(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { +<<<<<<< HEAD packages := [][]string{internal.GetGoFileNames(pass)} +======= + packages := [][]string{internal.GetFileNames(pass)} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) failures, err := w.revive.Lint(packages, w.lintingRules, *w.conf) if err != nil { @@ -165,7 +172,11 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { lineRangeTo = object.Position.Start.Line } +<<<<<<< HEAD issue := &result.Issue{ +======= + return goanalysis.NewIssue(&result.Issue{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Severity: string(object.Severity), Text: fmt.Sprintf("%s: %s", object.RuleName, object.Failure.Failure), Pos: token.Position{ @@ -179,6 +190,7 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { To: lineRangeTo, }, FromLinter: linterName, +<<<<<<< HEAD } if object.ReplacementLine != "" { @@ -197,6 +209,9 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { } return goanalysis.NewIssue(issue, pass) +======= + }, pass) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // This function mimics the GetConfig function of revive. @@ -397,8 +412,17 @@ const defaultConfidence = 0.8 func normalizeConfig(cfg *lint.Config) { // NOTE(ldez): this custom section for golangci-lint should be kept. // --- +<<<<<<< HEAD cfg.Confidence = cmp.Or(cfg.Confidence, defaultConfidence) cfg.Severity = cmp.Or(cfg.Severity, lint.SeverityWarning) +======= + if cfg.Confidence == 0 { + cfg.Confidence = defaultConfidence + } + if cfg.Severity == "" { + cfg.Severity = lint.SeverityWarning + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // --- if len(cfg.Rules) == 0 { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go index 7c8a0c8b02..b46118d96c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go @@ -1,15 +1,32 @@ package tagalign import ( +<<<<<<< HEAD +======= + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/4meepo/tagalign" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD ) func New(settings *config.TagAlignSettings) *goanalysis.Linter { var options []tagalign.Option +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func New(settings *config.TagAlignSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if settings != nil { options = append(options, tagalign.WithAlign(settings.Align)) @@ -25,11 +42,51 @@ func New(settings *config.TagAlignSettings) *goanalysis.Linter { } analyzer := tagalign.NewAnalyzer(options...) +<<<<<<< HEAD +======= + analyzer.Run = func(pass *analysis.Pass) (any, error) { + taIssues := tagalign.Run(pass, options...) + + issues := make([]goanalysis.Issue, len(taIssues)) + for i, issue := range taIssues { + report := &result.Issue{ + FromLinter: analyzer.Name, + Pos: issue.Pos, + Text: issue.Message, + Replacement: &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: issue.InlineFix.StartCol, + Length: issue.InlineFix.Length, + NewString: issue.InlineFix.NewString, + }, + }, + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return goanalysis.NewLinter( analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) +======= + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go index 08215c3a53..0e76e3858d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go @@ -10,12 +10,19 @@ import ( func New(settings *config.TagliatelleSettings) *goanalysis.Linter { cfg := tagliatelle.Config{ +<<<<<<< HEAD Base: tagliatelle.Base{ Rules: map[string]string{ "json": "camel", "yaml": "camel", "header": "header", }, +======= + Rules: map[string]string{ + "json": "camel", + "yaml": "camel", + "header": "header", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, } @@ -23,6 +30,7 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { for k, v := range settings.Case.Rules { cfg.Rules[k] = v } +<<<<<<< HEAD cfg.ExtendedRules = toExtendedRules(settings.Case.ExtendedRules) cfg.UseFieldName = settings.Case.UseFieldName @@ -40,6 +48,9 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { Package: override.Package, }) } +======= + cfg.UseFieldName = settings.Case.UseFieldName +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } a := tagliatelle.New(cfg) @@ -49,6 +60,7 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -64,4 +76,7 @@ func toExtendedRules(src map[string]config.TagliatelleExtendedRule) map[string]t } return result +======= + ).WithLoadMode(goanalysis.LoadModeSyntax) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go index f617da5536..0f98fcca34 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go @@ -10,6 +10,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.TestpackageSettings) *goanalysis.Linter { a := testpackage.NewAnalyzer() @@ -19,10 +20,25 @@ func New(settings *config.TestpackageSettings) *goanalysis.Linter { a.Name: { testpackage.SkipRegexpFlagName: settings.SkipRegexp, testpackage.AllowPackagesFlagName: strings.Join(settings.AllowPackages, ","), +======= +func New(cfg *config.TestpackageSettings) *goanalysis.Linter { + a := testpackage.NewAnalyzer() + + var settings map[string]map[string]any + if cfg != nil { + settings = map[string]map[string]any{ + a.Name: { + testpackage.SkipRegexpFlagName: cfg.SkipRegexp, + testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, } } +<<<<<<< HEAD return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). +======= + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go index 102610a69a..57874d9300 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go @@ -12,7 +12,11 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) +<<<<<<< HEAD func New(settings *config.ThelperSettings) *goanalysis.Linter { +======= +func New(cfg *config.ThelperSettings) *goanalysis.Linter { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) a := analyzer.NewAnalyzer() opts := map[string]struct{}{ @@ -33,11 +37,19 @@ func New(settings *config.ThelperSettings) *goanalysis.Linter { "tb_first": {}, } +<<<<<<< HEAD if settings != nil { applyTHelperOptions(settings.Test, "t_", opts) applyTHelperOptions(settings.Fuzz, "f_", opts) applyTHelperOptions(settings.Benchmark, "b_", opts) applyTHelperOptions(settings.TB, "tb_", opts) +======= + if cfg != nil { + applyTHelperOptions(cfg.Test, "t_", opts) + applyTHelperOptions(cfg.Fuzz, "f_", opts) + applyTHelperOptions(cfg.Benchmark, "b_", opts) + applyTHelperOptions(cfg.TB, "tb_", opts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if len(opts) == 0 { @@ -46,7 +58,11 @@ func New(settings *config.ThelperSettings) *goanalysis.Linter { args := maps.Keys(opts) +<<<<<<< HEAD cfg := map[string]map[string]any{ +======= + cfgMap := map[string]map[string]any{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) a.Name: { "checks": strings.Join(args, ","), }, @@ -56,7 +72,11 @@ func New(settings *config.ThelperSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go index 04c9a223e5..c2fcae383b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go @@ -1,6 +1,11 @@ package unparam import ( +<<<<<<< HEAD +======= + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/packages" @@ -9,21 +14,46 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" +<<<<<<< HEAD +======= + "github.com/golangci/golangci-lint/pkg/result" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "unparam" func New(settings *config.UnparamSettings) *goanalysis.Linter { +<<<<<<< HEAD +======= + var mu sync.Mutex + var resIssues []goanalysis.Issue + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: func(pass *analysis.Pass) (any, error) { +<<<<<<< HEAD err := runUnparam(pass, settings) +======= + issues, err := runUnparam(pass, settings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD +======= + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -37,10 +67,19 @@ func New(settings *config.UnparamSettings) *goanalysis.Linter { if settings.Algo != "cha" { lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") } +<<<<<<< HEAD }).WithLoadMode(goanalysis.LoadModeTypesInfo) } func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error { +======= + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) ssaPkg := ssa.Pkg @@ -58,6 +97,7 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error { unparamIssues, err := c.Check() if err != nil { +<<<<<<< HEAD return err } @@ -69,4 +109,19 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error { } return nil +======= + return nil, err + } + + var issues []goanalysis.Issue + for _, i := range unparamIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(i.Pos()), + Text: i.Message(), + FromLinter: linterName, + }, pass)) + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go index 00f7d9742a..84a676b4bb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go @@ -8,6 +8,7 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) +<<<<<<< HEAD func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { a := analyzer.New() @@ -26,6 +27,26 @@ func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { analyzer.TimeMonthFlag: settings.TimeMonth, analyzer.TimeWeekdayFlag: settings.TimeWeekday, analyzer.TLSSignatureSchemeFlag: settings.TLSSignatureScheme, +======= +func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { + a := analyzer.New() + + cfgMap := make(map[string]map[string]any) + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + analyzer.ConstantKindFlag: cfg.ConstantKind, + analyzer.CryptoHashFlag: cfg.CryptoHash, + analyzer.HTTPMethodFlag: cfg.HTTPMethod, + analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode, + analyzer.OSDevNullFlag: cfg.OSDevNull, + analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath, + analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel, + analyzer.SyslogPriorityFlag: cfg.SyslogPriority, + analyzer.TimeLayoutFlag: cfg.TimeLayout, + analyzer.TimeMonthFlag: cfg.TimeMonth, + analyzer.TimeWeekdayFlag: cfg.TimeWeekday, + analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -33,6 +54,10 @@ func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, +<<<<<<< HEAD cfg, +======= + cfgMap, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go index d45969efce..38a82c1f3d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go @@ -1,17 +1,40 @@ package whitespace import ( +<<<<<<< HEAD +======= + "fmt" + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ultraware/whitespace" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" +<<<<<<< HEAD ) func New(settings *config.WhitespaceSettings) *goanalysis.Linter { var wsSettings whitespace.Settings if settings != nil { wsSettings = whitespace.Settings{ +======= + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const linterName = "whitespace" + +func New(settings *config.WhitespaceSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + var wsSettings whitespace.Settings + if settings != nil { + wsSettings = whitespace.Settings{ + Mode: whitespace.RunningModeGolangCI, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MultiIf: settings.MultiIf, MultiFunc: settings.MultiFunc, } @@ -24,5 +47,72 @@ func New(settings *config.WhitespaceSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, +<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) +======= + ).WithContextSetter(func(_ *linter.Context) { + a.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runWhitespace(pass, wsSettings) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) { + lintIssues := whitespace.Run(pass, &wsSettings) + + issues := make([]goanalysis.Issue, len(lintIssues)) + for i, issue := range lintIssues { + report := &result.Issue{ + FromLinter: linterName, + Pos: pass.Fset.PositionFor(issue.Diagnostic, false), + Text: issue.Message, + } + + switch issue.MessageType { + case whitespace.MessageTypeRemove: + if len(issue.LineNumbers) == 0 { + continue + } + + report.LineRange = &result.Range{ + From: issue.LineNumbers[0], + To: issue.LineNumbers[len(issue.LineNumbers)-1], + } + + report.Replacement = &result.Replacement{NeedOnlyDelete: true} + + case whitespace.MessageTypeAdd: + report.Pos = pass.Fset.PositionFor(issue.FixStart, false) + report.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: 0, + Length: 1, + NewString: "\n\t", + }, + } + + default: + return nil, fmt.Errorf("unknown message type: %v", issue.MessageType) + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + return issues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go index b2f5ec7420..609d8e88c5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go @@ -11,8 +11,11 @@ import ( func New(settings *config.WrapcheckSettings) *goanalysis.Linter { cfg := wrapcheck.NewDefaultConfig() if settings != nil { +<<<<<<< HEAD cfg.ExtraIgnoreSigs = settings.ExtraIgnoreSigs +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(settings.IgnoreSigs) != 0 { cfg.IgnoreSigs = settings.IgnoreSigs } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go index 1e438a0f03..3de17437e2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -164,6 +164,7 @@ func (lc *Config) WithNoopFallback(cfg *config.Config, cond func(cfg *config.Con } func IsGoLowerThanGo122() func(cfg *config.Config) error { +<<<<<<< HEAD return isGoLowerThanGo("1.22") } @@ -178,6 +179,14 @@ func isGoLowerThanGo(v string) func(cfg *config.Config) error { } return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go %s", cfg.Run.Go, v) +======= + return func(cfg *config.Config) error { + if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, "1.22") { + return nil + } + + return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go 1.22", cfg.Run.Go) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go index 4338aa88cc..a05303c3e9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go @@ -26,7 +26,10 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/exhaustive" "github.com/golangci/golangci-lint/pkg/golinters/exhaustruct" "github.com/golangci/golangci-lint/pkg/golinters/exportloopref" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/exptostd" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/golinters/fatcontext" "github.com/golangci/golangci-lint/pkg/golinters/forbidigo" "github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert" @@ -73,7 +76,10 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/nakedret" "github.com/golangci/golangci-lint/pkg/golinters/nestif" "github.com/golangci/golangci-lint/pkg/golinters/nilerr" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/nilnesserr" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/golinters/nilnil" "github.com/golangci/golangci-lint/pkg/golinters/nlreturn" "github.com/golangci/golangci-lint/pkg/golinters/noctx" @@ -107,7 +113,10 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/unparam" "github.com/golangci/golangci-lint/pkg/golinters/unused" "github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/usetesting" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/golinters/varnamelen" "github.com/golangci/golangci-lint/pkg/golinters/wastedassign" "github.com/golangci/golangci-lint/pkg/golinters/whitespace" @@ -163,8 +172,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). +<<<<<<< HEAD WithAutoFix(). WithURL("https://github.com/lasiar/canonicalheader"), +======= + WithURL("https://github.com/lasiar/canonicalHeader"), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linter.NewConfig(containedctx.New()). WithSince("v1.44.0"). @@ -186,16 +199,28 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(cyclop.New(&cfg.LintersSettings.Cyclop)). WithSince("v1.37.0"). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetComplexity). WithURL("https://github.com/bkielbasa/cyclop"), linter.NewConfig(decorder.New(&cfg.LintersSettings.Decorder)). WithSince("v1.44.0"). +<<<<<<< HEAD WithPresets(linter.PresetStyle). +======= + WithPresets(linter.PresetFormatting, linter.PresetStyle). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://gitlab.com/bosi/decorder"), linter.NewConfig(linter.NewNoopDeprecated("deadcode", cfg, linter.DeprecationError)). WithSince("v1.0.0"). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetUnused). WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -218,7 +243,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(dupword.New(&cfg.LintersSettings.DupWord)). WithSince("v1.50.0"). WithPresets(linter.PresetComment). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Abirdcfly/dupword"), linter.NewConfig(durationcheck.New()). @@ -250,12 +278,19 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.32.0"). WithPresets(linter.PresetBugs, linter.PresetError). WithLoadForGoAnalysis(). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/polyfloyd/go-errorlint"), linter.NewConfig(linter.NewNoopDeprecated("execinquery", cfg, linter.DeprecationError)). WithSince("v1.46.0"). WithPresets(linter.PresetSQL). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/1uf3/execinquery"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.58.0", ""), @@ -268,6 +303,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("exhaustivestruct", cfg, linter.DeprecationError)). WithSince("v1.32.0"). WithPresets(linter.PresetStyle, linter.PresetTest). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/mbilski/exhaustivestruct"). DeprecatedError("The repository of the linter has been deprecated by the owner.", "v1.46.0", "exhaustruct"), @@ -284,6 +323,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithURL("https://github.com/kyoh86/exportloopref"). DeprecatedWarning("Since Go1.22 (loopvar) this linter is no longer relevant.", "v1.60.2", "copyloopvar"), +<<<<<<< HEAD linter.NewConfig(exptostd.New()). WithSince("v1.63.0"). WithPresets(linter.PresetStyle). @@ -291,6 +331,8 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithAutoFix(). WithURL("https://github.com/ldez/exptostd"), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linter.NewConfig(forbidigo.New(&cfg.LintersSettings.Forbidigo)). WithSince("v1.34.0"). WithPresets(linter.PresetStyle). @@ -310,7 +352,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetPerformance). WithLoadForGoAnalysis(). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Crocmagnon/fatcontext"), linter.NewConfig(funlen.New(&cfg.LintersSettings.Funlen)). @@ -328,7 +373,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.51.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/nunnatsa/ginkgolinter"), linter.NewConfig(gocheckcompilerdirectives.New()). @@ -390,7 +438,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetStyle, linter.PresetError). WithLoadForGoAnalysis(). WithAlternativeNames("goerr113"). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Djarvur/go-err113"), linter.NewConfig(gofmt.New(&cfg.LintersSettings.Gofmt)). @@ -419,6 +470,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("golint", cfg, linter.DeprecationError)). WithSince("v1.0.0"). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetStyle). WithURL("https://github.com/golang/lint"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), @@ -462,7 +517,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithAlternativeNames(megacheckName). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), linter.NewConfig(gosmopolitan.New(&cfg.LintersSettings.Gosmopolitan)). @@ -476,7 +534,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.0.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithAlternativeNames("vet", "vetshadow"). WithURL("https://pkg.go.dev/cmd/vet"), @@ -495,14 +556,20 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.62.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/uudashr/iface"), linter.NewConfig(importas.New(&cfg.LintersSettings.ImportAs)). WithSince("v1.38.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/julz/importas"), linter.NewConfig(inamedparam.New(&cfg.LintersSettings.Inamedparam)). @@ -523,6 +590,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("interfacer", cfg, linter.DeprecationError)). WithSince("v1.0.0"). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetStyle). WithURL("https://github.com/mvdan/interfacer"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", ""), @@ -531,7 +602,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.57.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/ckaznocha/intrange"). WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), @@ -565,6 +639,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("maligned", cfg, linter.DeprecationError)). WithSince("v1.0.0"). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetPerformance). WithURL("https://github.com/mdempsky/maligned"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), @@ -591,7 +669,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nakedret.New(&cfg.LintersSettings.Nakedret)). WithSince("v1.19.0"). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/alexkohler/nakedret"), linter.NewConfig(nestif.New(&cfg.LintersSettings.Nestif)). @@ -605,12 +686,15 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetBugs). WithURL("https://github.com/gostaticanalysis/nilerr"), +<<<<<<< HEAD linter.NewConfig(nilnesserr.New()). WithSince("v1.63.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs). WithURL("https://github.com/alingse/nilnesserr"), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linter.NewConfig(nilnil.New(&cfg.LintersSettings.NilNil)). WithSince("v1.43.0"). WithPresets(linter.PresetStyle). @@ -620,7 +704,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nlreturn.New(&cfg.LintersSettings.Nlreturn)). WithSince("v1.30.0"). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/ssgreg/nlreturn"), linter.NewConfig(noctx.New()). @@ -656,7 +743,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/catenacyber/perfsprint"), linter.NewConfig(prealloc.New(&cfg.LintersSettings.Prealloc)). @@ -687,7 +777,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithURL("https://github.com/curioswitch/go-reassign"), +<<<<<<< HEAD linter.NewConfig(recvcheck.New(&cfg.LintersSettings.Recvcheck)). +======= + linter.NewConfig(recvcheck.New()). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithSince("v1.62.0"). WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). @@ -697,7 +791,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.37.0"). WithPresets(linter.PresetStyle, linter.PresetMetaLinter). ConsiderSlow(). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/mgechev/revive"), linter.NewConfig(rowserrcheck.New(&cfg.LintersSettings.RowsErrCheck)). @@ -709,7 +806,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(sloglint.New(&cfg.LintersSettings.SlogLint)). WithSince("v1.55.0"). WithLoadForGoAnalysis(). +<<<<<<< HEAD WithPresets(linter.PresetStyle). +======= + WithPresets(linter.PresetStyle, linter.PresetFormatting). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/go-simpler/sloglint"), linter.NewConfig(linter.NewNoopDeprecated("scopelint", cfg, linter.DeprecationError)). @@ -736,11 +837,18 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). WithAlternativeNames(megacheckName). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://staticcheck.dev/"), linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). +<<<<<<< HEAD +======= + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -749,19 +857,29 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.20.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), linter.NewConfig(tagalign.New(&cfg.LintersSettings.TagAlign)). WithSince("v1.53.0"). +<<<<<<< HEAD WithPresets(linter.PresetStyle). +======= + WithPresets(linter.PresetStyle, linter.PresetFormatting). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithAutoFix(). WithURL("https://github.com/4meepo/tagalign"), linter.NewConfig(tagliatelle.New(&cfg.LintersSettings.Tagliatelle)). WithSince("v1.40.0"). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithLoadForGoAnalysis(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/ldez/tagliatelle"), linter.NewConfig(tenv.New(&cfg.LintersSettings.Tenv)). @@ -779,7 +897,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithPresets(linter.PresetTest, linter.PresetBugs). WithLoadForGoAnalysis(). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Antonboom/testifylint"), linter.NewConfig(testpackage.New(&cfg.LintersSettings.Testpackage)). @@ -829,6 +950,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(usestdlibvars.New(&cfg.LintersSettings.UseStdlibVars)). WithSince("v1.48.0"). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). WithURL("https://github.com/sashamelentyev/usestdlibvars"), @@ -841,6 +963,13 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). +======= + WithURL("https://github.com/sashamelentyev/usestdlibvars"), + + linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -872,7 +1001,10 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(wsl.New(&cfg.LintersSettings.WSL)). WithSince("v1.20.0"). WithPresets(linter.PresetStyle). +<<<<<<< HEAD WithAutoFix(). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/bombsimon/wsl"), linter.NewConfig(zerologlint.New()). diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index 157fde715f..da0021949a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -10,7 +10,10 @@ import ( "github.com/golangci/golangci-lint/internal/errorutil" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" +<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/goformatters" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -61,11 +64,14 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti return nil, fmt.Errorf("failed to get enabled linters: %w", err) } +<<<<<<< HEAD metaFormatter, err := goformatters.NewMetaFormatter(log, cfg, enabledLinters) if err != nil { return nil, fmt.Errorf("failed to create meta-formatter: %w", err) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &Runner{ Processors: []processors.Processor{ processors.NewCgo(goenv), @@ -100,7 +106,11 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti processors.NewSeverity(log.Child(logutils.DebugKeySeverityRules), files, &cfg.Severity), // The fixer still needs to see paths for the issues that are relative to the current directory. +<<<<<<< HEAD processors.NewFixer(cfg, log, fileCache, metaFormatter), +======= + processors.NewFixer(cfg, log, fileCache), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Now we can modify the issues for output. processors.NewPathPrefixer(cfg.Output.PathPrefix), diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index 7c5932368f..50c87d234e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -12,7 +12,11 @@ const defaultCodeClimateSeverity = "critical" // CodeClimateIssue is a subset of the Code Climate spec. // https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types // It is just enough to support GitLab CI Code Quality. +<<<<<<< HEAD // https://docs.gitlab.com/ee/ci/testing/code_quality.html#code-quality-report-format +======= +// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CodeClimateIssue struct { Description string `json:"description"` CheckName string `json:"check_name"` diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go index 83c4959114..289c8c3859 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go @@ -4,6 +4,10 @@ import ( "fmt" "io" "strings" +<<<<<<< HEAD +======= + "unicode/utf8" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/result" ) @@ -111,9 +115,19 @@ func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int, } func cutVal(s string, limit int) string { +<<<<<<< HEAD runes := []rune(s) if len(runes) > limit { return string(runes[:limit]) } return s +======= + var size, count int + for i := 0; i < limit && count < len(s); i++ { + _, size = utf8.DecodeRuneInString(s[count:]) + count += size + } + + return s[:count] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index e338963fa3..713fe36e5b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -5,7 +5,10 @@ import ( "fmt" "go/token" +<<<<<<< HEAD "golang.org/x/tools/go/analysis" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/packages" ) @@ -13,6 +16,21 @@ type Range struct { From, To int } +<<<<<<< HEAD +======= +type Replacement struct { + NeedOnlyDelete bool // need to delete all lines of the issue without replacement with new lines + NewLines []string // if NeedDelete is false it's the replacement lines + Inline *InlineFix +} + +type InlineFix struct { + StartCol int // zero-based + Length int // length of chunk to be replaced + NewString string +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Issue struct { FromLinter string Text string @@ -22,6 +40,7 @@ type Issue struct { // Source lines of a code with the issue to show SourceLines []string +<<<<<<< HEAD // Pkg is needed for proper caching of linting results Pkg *packages.Package `json:"-"` @@ -35,6 +54,21 @@ type Issue struct { // If we know how to fix the issue we can provide replacement lines SuggestedFixes []analysis.SuggestedFix `json:",omitempty"` +======= + // If we know how to fix the issue we can provide replacement lines + Replacement *Replacement + + // Pkg is needed for proper caching of linting results + Pkg *packages.Package `json:"-"` + + LineRange *Range `json:",omitempty"` + + Pos token.Position + + // HunkPos is used only when golangci-lint is run over a diff + HunkPos int `json:",omitempty"` + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If we are expecting a nolint (because this is from nolintlint), record the expected linter ExpectNoLint bool ExpectedNoLintLinter string diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index b82c7f2071..bdf81fcf6c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -1,3 +1,4 @@ +<<<<<<< HEAD // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -22,6 +23,21 @@ import ( "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" "github.com/golangci/golangci-lint/pkg/goformatters/goimports" +======= +package processors + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/golangci/golangci-lint/internal/go/robustio" + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/timeutils" @@ -29,23 +45,35 @@ import ( var _ Processor = (*Fixer)(nil) +<<<<<<< HEAD const filePerm = 0644 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Fixer struct { cfg *config.Config log logutils.Log fileCache *fsutils.FileCache sw *timeutils.Stopwatch +<<<<<<< HEAD formatter *goformatters.MetaFormatter } func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache, formatter *goformatters.MetaFormatter) *Fixer { +======= +} + +func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache) *Fixer { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &Fixer{ cfg: cfg, log: log, fileCache: fileCache, sw: timeutils.NewStopwatch("fixer", log), +<<<<<<< HEAD formatter: formatter, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -58,6 +86,7 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } +<<<<<<< HEAD p.log.Infof("Applying suggested fixes") notFixableIssues, err := timeutils.TrackStage(p.sw, "all", func() ([]result.Issue, error) { @@ -65,10 +94,35 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) { }) if err != nil { p.log.Warnf("Failed to fix issues: %v", err) +======= + outIssues := make([]result.Issue, 0, len(issues)) + issuesToFixPerFile := map[string][]result.Issue{} + for i := range issues { + issue := &issues[i] + if issue.Replacement == nil { + outIssues = append(outIssues, *issue) + continue + } + + issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) + } + + for file, issuesToFix := range issuesToFixPerFile { + err := p.sw.TrackStageErr("all", func() error { + return p.fixIssuesInFile(file, issuesToFix) + }) + if err != nil { + p.log.Errorf("Failed to fix issues in file %s: %s", file, err) + + // show issues only if can't fix them + outIssues = append(outIssues, issuesToFix...) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } p.printStat() +<<<<<<< HEAD return notFixableIssues, nil } @@ -223,10 +277,14 @@ func (p Fixer) process(issues []result.Issue) ([]result.Issue, error) { } return notFixableIssues, editError +======= + return outIssues, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (Fixer) Finish() {} +<<<<<<< HEAD func (p Fixer) printStat() { p.sw.PrintStages() } @@ -301,3 +359,189 @@ func diff3Conflict(path, xlabel, ylabel string, xedits, yedits []diff.Edit) erro return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", xlabel, ylabel, path, xdiff, ydiff) } +======= +func (p Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { + // TODO: don't read the whole file into memory: read line by line; + // can't just use bufio.scanner: it has a line length limit + origFileData, err := p.fileCache.GetFileBytes(filePath) + if err != nil { + return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err) + } + + origFileLines := bytes.Split(origFileData, []byte("\n")) + + tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) + + tmpOutFile, err := os.Create(tmpFileName) + if err != nil { + return fmt.Errorf("failed to make file %s: %w", tmpFileName, err) + } + + // merge multiple issues per line into one issue + issuesPerLine := map[int][]result.Issue{} + for i := range issues { + issue := &issues[i] + issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) + } + + issues = issues[:0] // reuse the same memory + for line, lineIssues := range issuesPerLine { + if mergedIssue := p.mergeLineIssues(line, lineIssues, origFileLines); mergedIssue != nil { + issues = append(issues, *mergedIssue) + } + } + + issues = p.findNotIntersectingIssues(issues) + + if err = p.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { + tmpOutFile.Close() + _ = robustio.RemoveAll(tmpOutFile.Name()) + return err + } + + tmpOutFile.Close() + + if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil { + _ = robustio.RemoveAll(tmpOutFile.Name()) + return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err) + } + + return nil +} + +func (p Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileLines [][]byte) *result.Issue { + origLine := origFileLines[lineNum-1] // lineNum is 1-based + + if len(lineIssues) == 1 && lineIssues[0].Replacement.Inline == nil { + return &lineIssues[0] + } + + // check issues first + for ind := range lineIssues { + li := &lineIssues[ind] + + if li.LineRange != nil { + p.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) + return &lineIssues[0] + } + + inline := li.Replacement.Inline + + if inline == nil || len(li.Replacement.NewLines) != 0 || li.Replacement.NeedOnlyDelete { + p.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues) + return li + } + + if inline.StartCol < 0 || inline.Length <= 0 || inline.StartCol+inline.Length > len(origLine) { + p.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, li, inline) + return nil + } + } + + return p.applyInlineFixes(lineIssues, origLine, lineNum) +} + +func (p Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, lineNum int) *result.Issue { + sort.Slice(lineIssues, func(i, j int) bool { + return lineIssues[i].Replacement.Inline.StartCol < lineIssues[j].Replacement.Inline.StartCol + }) + + var newLineBuf bytes.Buffer + newLineBuf.Grow(len(origLine)) + + //nolint:misspell // misspelling is intentional + // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" + + curOrigLinePos := 0 + for i := range lineIssues { + fix := lineIssues[i].Replacement.Inline + if fix.StartCol < curOrigLinePos { + p.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) + return nil + } + + if curOrigLinePos != fix.StartCol { + newLineBuf.Write(origLine[curOrigLinePos:fix.StartCol]) + } + newLineBuf.WriteString(fix.NewString) + curOrigLinePos = fix.StartCol + fix.Length + } + if curOrigLinePos != len(origLine) { + newLineBuf.Write(origLine[curOrigLinePos:]) + } + + mergedIssue := lineIssues[0] // use text from the first issue (it's not really used) + mergedIssue.Replacement = &result.Replacement{ + NewLines: []string{newLineBuf.String()}, + } + return &mergedIssue +} + +func (p Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { + sort.SliceStable(issues, func(i, j int) bool { + a, b := issues[i], issues[j] + return a.Line() < b.Line() + }) + + var ret []result.Issue + var currentEnd int + for i := range issues { + issue := &issues[i] + rng := issue.GetLineRange() + if rng.From <= currentEnd { + p.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) + continue // skip intersecting issue + } + p.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) + ret = append(ret, *issue) + currentEnd = rng.To + } + + return ret +} + +func (p Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmpOutFile *os.File) error { + // issues aren't intersecting + + nextIssueIndex := 0 + for i := 0; i < len(origFileLines); i++ { + var outLine string + var nextIssue *result.Issue + if nextIssueIndex != len(issues) { + nextIssue = &issues[nextIssueIndex] + } + + origFileLineNumber := i + 1 + if nextIssue == nil || origFileLineNumber != nextIssue.GetLineRange().From { + outLine = string(origFileLines[i]) + } else { + nextIssueIndex++ + rng := nextIssue.GetLineRange() + if rng.From > rng.To { + // Maybe better decision is to skip such issues, re-evaluate if regressed. + p.log.Warnf("[fixer]: issue line range is probably invalid, fix can be incorrect (from=%d, to=%d, linter=%s)", + rng.From, rng.To, nextIssue.FromLinter, + ) + } + i += rng.To - rng.From + if nextIssue.Replacement.NeedOnlyDelete { + continue + } + outLine = strings.Join(nextIssue.Replacement.NewLines, "\n") + } + + if i < len(origFileLines)-1 { + outLine += "\n" + } + if _, err := tmpOutFile.WriteString(outLine); err != nil { + return fmt.Errorf("failed to write output line: %w", err) + } + } + + return nil +} + +func (p Fixer) printStat() { + p.sw.PrintStages() +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go index 690cdf3f8a..0fab2b09f6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -34,7 +34,11 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { +<<<<<<< HEAD if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { +======= + if issue.Replacement != nil && p.cfg.Issues.NeedFix { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // we need to fix all issues at once => we need to return all of them return true } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go index 0d1c286282..d961ccb0f5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -36,7 +36,11 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { +<<<<<<< HEAD if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { +======= + if issue.Replacement != nil && p.cfg.Issues.NeedFix { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // we need to fix all issues at once => we need to return all of them return true } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go index fcd65326f9..68432daf72 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go @@ -1,7 +1,10 @@ package processors import ( +<<<<<<< HEAD "cmp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "regexp" "github.com/golangci/golangci-lint/pkg/config" @@ -68,7 +71,14 @@ func (p *Severity) transform(issue *result.Issue) *result.Issue { return issue } +<<<<<<< HEAD issue.Severity = cmp.Or(rule.severity, p.defaultSeverity) +======= + issue.Severity = rule.severity + if issue.Severity == "" { + issue.Severity = p.defaultSeverity + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return issue } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go index ec134f25f6..7cf9daa576 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -26,7 +26,11 @@ func (*UniqByLine) Name() string { } func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { +<<<<<<< HEAD if !p.cfg.Issues.UniqByLine { +======= + if !p.cfg.Output.UniqByLine { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return issues, nil } @@ -36,7 +40,11 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { func (*UniqByLine) Finish() {} func (p *UniqByLine) shouldPassIssue(issue *result.Issue) bool { +<<<<<<< HEAD if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { +======= + if issue.Replacement != nil && p.cfg.Issues.NeedFix { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // if issue will be auto-fixed we shouldn't collapse issues: // e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them. return true diff --git a/vendor/github.com/golangci/modinfo/.gitignore b/vendor/github.com/golangci/modinfo/.gitignore new file mode 100644 index 0000000000..9f11b755a1 --- /dev/null +++ b/vendor/github.com/golangci/modinfo/.gitignore @@ -0,0 +1 @@ +.idea/ diff --git a/vendor/github.com/golangci/modinfo/.golangci.yml b/vendor/github.com/golangci/modinfo/.golangci.yml new file mode 100644 index 0000000000..9698182f2a --- /dev/null +++ b/vendor/github.com/golangci/modinfo/.golangci.yml @@ -0,0 +1,157 @@ +run: + timeout: 7m + +linters-settings: + govet: + enable: + - shadow + gocyclo: + min-complexity: 12 + goconst: + min-len: 3 + min-occurrences: 3 + funlen: + lines: -1 + statements: 50 + misspell: + locale: US + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + tagalign: + align: false + order: + - xml + - json + - yaml + - yml + - toml + - mapstructure + - url + godox: + keywords: + - FIXME + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - paramTypeCombine # already handle by gofumpt.extra-rules + - whyNoLint # already handle by nonolint + - unnamedResult + - hugeParam + - sloppyReassign + - rangeValCopy + - octalLiteral + - ptrToRefParam + - appendAssign + - ruleguard + - httpNoBody + - exposedSyncMutex + revive: + rules: + - name: struct-tag + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + disabled: true + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: package-comments + disabled: true + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + disabled: true + - name: unreachable-code + - name: redefines-builtin-id + + tagliatelle: + case: + rules: + json: pascal + yaml: camel + xml: camel + header: header + mapstructure: camel + env: upperSnake + envconfig: upperSnake + +linters: + enable-all: true + disable: + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated + - cyclop # duplicate of gocyclo + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) + - lll + - gosec + - dupl # not relevant + - prealloc # too many false-positive + - bodyclose # too many false-positive + - gomnd + - testpackage # not relevant + - tparallel # not relevant + - paralleltest # not relevant + - nestif # too many false-positive + - wrapcheck + - goerr113 # not relevant + - nlreturn # not relevant + - wsl # not relevant + - exhaustive # not relevant + - exhaustruct # not relevant + - makezero # not relevant + - forbidigo + - varnamelen # not relevant + - nilnil # not relevant + - ireturn # not relevant + - contextcheck # too many false-positive + - tenv # we already have a test "framework" to handle env vars + - noctx + - errchkjson + - nonamedreturns + - gosmopolitan # not relevant + - gochecknoglobals + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 + exclude: + - 'Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' + - 'ST1000: at least one file in a package should have a package comment' + exclude-rules: + - path: (.+)_test.go + linters: + - funlen + - goconst + - maintidx diff --git a/vendor/github.com/golangci/modinfo/LICENSE b/vendor/github.com/golangci/modinfo/LICENSE new file mode 100644 index 0000000000..f288702d2f --- /dev/null +++ b/vendor/github.com/golangci/modinfo/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/golangci/modinfo/Makefile b/vendor/github.com/golangci/modinfo/Makefile new file mode 100644 index 0000000000..df91018f11 --- /dev/null +++ b/vendor/github.com/golangci/modinfo/Makefile @@ -0,0 +1,12 @@ +.PHONY: clean check test + +default: clean check test + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run diff --git a/vendor/github.com/golangci/modinfo/module.go b/vendor/github.com/golangci/modinfo/module.go new file mode 100644 index 0000000000..ff0b21b9b8 --- /dev/null +++ b/vendor/github.com/golangci/modinfo/module.go @@ -0,0 +1,157 @@ +package modinfo + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/go/analysis" +) + +type ModInfo struct { + Path string `json:"Path"` + Dir string `json:"Dir"` + GoMod string `json:"GoMod"` + GoVersion string `json:"GoVersion"` + Main bool `json:"Main"` +} + +var ( + once sync.Once + information []ModInfo + errInfo error +) + +var Analyzer = &analysis.Analyzer{ + Name: "modinfo", + Doc: "Module information", + URL: "https://github.com/golangci/modinfo", + Run: runOnce, + ResultType: reflect.TypeOf([]ModInfo(nil)), +} + +func runOnce(pass *analysis.Pass) (any, error) { + _, ok := os.LookupEnv("MODINFO_DEBUG_DISABLE_ONCE") + if ok { + return GetModuleInfo(pass) + } + + once.Do(func() { + information, errInfo = GetModuleInfo(pass) + }) + + return information, errInfo +} + +// GetModuleInfo gets modules information. +// Always returns 1 element except for workspace (returns all the modules of the workspace). +// Based on `go list -m -json` behavior. +func GetModuleInfo(pass *analysis.Pass) ([]ModInfo, error) { + // https://github.com/golang/go/issues/44753#issuecomment-790089020 + cmd := exec.Command("go", "list", "-m", "-json") + for _, file := range pass.Files { + name := pass.Fset.File(file.Pos()).Name() + if filepath.Ext(name) != ".go" { + continue + } + + cmd.Dir = filepath.Dir(name) + break + } + + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("command go list: %w: %s", err, string(out)) + } + + var infos []ModInfo + + for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); { + var v ModInfo + if err := dec.Decode(&v); err != nil { + return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out)) + } + + if v.GoMod == "" { + return nil, errors.New("working directory is not part of a module") + } + + if !v.Main || v.Dir == "" { + continue + } + + infos = append(infos, v) + } + + if len(infos) == 0 { + return nil, errors.New("go.mod file not found") + } + + sort.Slice(infos, func(i, j int) bool { + return len(infos[i].Path) > len(infos[j].Path) + }) + + return infos, nil +} + +// FindModuleFromPass finds the module related to the files of the pass. +func FindModuleFromPass(pass *analysis.Pass) (ModInfo, error) { + infos, ok := pass.ResultOf[Analyzer].([]ModInfo) + if !ok { + return ModInfo{}, errors.New("no modinfo analyzer result") + } + + var name string + for _, file := range pass.Files { + f := pass.Fset.File(file.Pos()).Name() + if filepath.Ext(f) != ".go" { + continue + } + + name = f + break + } + + // no Go file found in analysis pass + if name == "" { + name, _ = os.Getwd() + } + + for _, info := range infos { + if !strings.HasPrefix(name, info.Dir) { + continue + } + return info, nil + } + + return ModInfo{}, errors.New("module information not found") +} + +// ReadModuleFileFromPass read the `go.mod` file from the pass result. +func ReadModuleFileFromPass(pass *analysis.Pass) (*modfile.File, error) { + info, err := FindModuleFromPass(pass) + if err != nil { + return nil, err + } + + return ReadModuleFile(info) +} + +// ReadModuleFile read the `go.mod` file. +func ReadModuleFile(info ModInfo) (*modfile.File, error) { + raw, err := os.ReadFile(info.GoMod) + if err != nil { + return nil, fmt.Errorf("reading go.mod file: %w", err) + } + + return modfile.Parse("go.mod", raw, nil) +} diff --git a/vendor/github.com/golangci/modinfo/readme.md b/vendor/github.com/golangci/modinfo/readme.md new file mode 100644 index 0000000000..2175de8eb4 --- /dev/null +++ b/vendor/github.com/golangci/modinfo/readme.md @@ -0,0 +1,73 @@ +# modinfo + +This module contains: +- an analyzer that returns module information. +- methods to find and read `go.mod` file + +## Examples + +```go +package main + +import ( + "fmt" + + "github.com/golangci/modinfo" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" +) + +var Analyzer = &analysis.Analyzer{ + Name: "example", + Doc: "Example", + Run: func(pass *analysis.Pass) (interface{}, error) { + file, err := modinfo.ReadModuleFileFromPass(pass) + if err != nil { + return nil, err + } + + fmt.Println("go.mod", file) + + // TODO + + return nil, nil + }, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + modinfo.Analyzer, + }, +} +``` + +```go +package main + +import ( + "fmt" + + "github.com/golangci/modinfo" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" +) + +var Analyzer = &analysis.Analyzer{ + Name: "example", + Doc: "Example", + Run: func(pass *analysis.Pass) (interface{}, error) { + info, err := modinfo.FindModuleFromPass(pass) + if err != nil { + return nil, err + } + + fmt.Println("Module", info.Dir) + + // TODO + + return nil, nil + }, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + modinfo.Analyzer, + }, +} +``` diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 81549fb4c5..67ba29766f 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -39,7 +39,10 @@ go_library( "//common/types/traits:go_default_library", "//interpreter:go_default_library", "//parser:go_default_library", +<<<<<<< HEAD "@dev_cel_expr//:expr", +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protodesc:go_default_library", @@ -82,6 +85,10 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", +<<<<<<< HEAD +======= + "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//encoding/prototext:go_default_library", diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go index 4188060210..6b3dacb174 100644 --- a/vendor/github.com/google/cel-go/cel/decls.go +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -23,7 +23,10 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" +<<<<<<< HEAD celpb "cel.dev/expr" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -313,6 +316,7 @@ func ExprTypeToType(t *exprpb.Type) (*Type, error) { // ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { +<<<<<<< HEAD return AlphaProtoAsDeclaration(d) } @@ -329,18 +333,30 @@ func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) { func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { switch d.GetDeclKind().(type) { case *celpb.Decl_Function: +======= + switch d.GetDeclKind().(type) { + case *exprpb.Decl_Function: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) overloads := d.GetFunction().GetOverloads() opts := make([]FunctionOpt, len(overloads)) for i, o := range overloads { args := make([]*Type, len(o.GetParams())) for j, p := range o.GetParams() { +<<<<<<< HEAD a, err := types.ProtoAsType(p) +======= + a, err := types.ExprTypeToType(p) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } args[j] = a } +<<<<<<< HEAD res, err := types.ProtoAsType(o.GetResultType()) +======= + res, err := types.ExprTypeToType(o.GetResultType()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -351,15 +367,24 @@ func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { } } return Function(d.GetName(), opts...), nil +<<<<<<< HEAD case *celpb.Decl_Ident: t, err := types.ProtoAsType(d.GetIdent().GetType()) +======= + case *exprpb.Decl_Ident: + t, err := types.ExprTypeToType(d.GetIdent().GetType()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } if d.GetIdent().GetValue() == nil { return Variable(d.GetName(), t), nil } +<<<<<<< HEAD val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue()) +======= + val, err := ast.ConstantToVal(d.GetIdent().GetValue()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index 3bfe428992..57b0e8204c 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -44,9 +44,12 @@ type Ast struct { // NativeRep converts the AST to a Go-native representation. func (ast *Ast) NativeRep() *celast.AST { +<<<<<<< HEAD if ast == nil { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ast.impl } @@ -58,13 +61,24 @@ func (ast *Ast) Expr() *exprpb.Expr { if ast == nil { return nil } +<<<<<<< HEAD pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) +======= + pbExpr, _ := celast.ExprToProto(ast.impl.Expr()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pbExpr } // IsChecked returns whether the Ast value has been successfully type-checked. func (ast *Ast) IsChecked() bool { +<<<<<<< HEAD return ast.NativeRep().IsChecked() +======= + if ast == nil { + return false + } + return ast.impl.IsChecked() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SourceInfo returns character offset and newline position information about expression elements. @@ -72,7 +86,11 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { if ast == nil { return nil } +<<<<<<< HEAD pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) +======= + pbInfo, _ := celast.SourceInfoToProto(ast.impl.SourceInfo()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pbInfo } @@ -95,7 +113,11 @@ func (ast *Ast) OutputType() *Type { if ast == nil { return types.ErrorType } +<<<<<<< HEAD return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) +======= + return ast.impl.GetType(ast.impl.Expr().ID()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Source returns a view of the input used to create the Ast. This source may be complete or @@ -217,6 +239,7 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { chk, err := e.initChecker() if err != nil { errs := common.NewErrors(ast.Source()) +<<<<<<< HEAD errs.ReportErrorString(common.NoLocation, err.Error()) return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } @@ -224,6 +247,15 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) if len(errs.GetErrors()) > 0 { return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) +======= + errs.ReportError(common.NoLocation, err.Error()) + return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) + } + + checked, errs := checker.Check(ast.impl, ast.Source(), chk) + if len(errs.GetErrors()) > 0 { + return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. @@ -244,7 +276,11 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { } } // Apply additional validators on the type-checked result. +<<<<<<< HEAD iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) +======= + iss := NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, v := range e.validators { v.Validate(e, vConfig, checked, iss) } @@ -309,13 +345,26 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. +<<<<<<< HEAD +======= + varsCopy := []*decls.VariableDecl{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) +<<<<<<< HEAD } varsCopy := make([]*decls.VariableDecl, len(e.variables)) copy(varsCopy, e.variables) +======= + } else { + // If the type-checker has not been instantiated, ensure the unvalidated declarations are + // provided to the extended Env instance. + varsCopy = make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copy macros and program options macsCopy := make([]parser.Macro, len(e.macros)) @@ -412,6 +461,7 @@ func (e *Env) Libraries() []string { return libraries } +<<<<<<< HEAD // HasFunction returns whether a specific function has been configured in the environment func (e *Env) HasFunction(functionName string) bool { _, ok := e.functions[functionName] @@ -423,6 +473,8 @@ func (e *Env) Functions() map[string]*decls.FunctionDecl { return e.functions } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // HasValidator returns whether a specific ASTValidator has been configured in the environment. func (e *Env) HasValidator(name string) bool { for _, v := range e.validators { @@ -459,12 +511,15 @@ func (e *Env) ParseSource(src Source) (*Ast, *Issues) { // Program generates an evaluable instance of the Ast within the environment (Env). func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { +<<<<<<< HEAD return e.PlanProgram(ast.NativeRep(), opts...) } // PlanProgram generates an evaluable instance of the AST in the go-native representation within // the environment (Env). func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) { +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) optSet := e.progOpts if len(opts) != 0 { mergedOpts := []ProgramOption{} @@ -472,7 +527,11 @@ func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) mergedOpts = append(mergedOpts, opts...) optSet = mergedOpts } +<<<<<<< HEAD return newProgram(e, a, optSet) +======= + return newProgram(e, ast, optSet) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CELTypeAdapter returns the `types.Adapter` configured for the environment. @@ -556,8 +615,12 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { // TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an // Ast format and then Program again. func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { +<<<<<<< HEAD ast := a.NativeRep() pruned := interpreter.PruneAst(ast.Expr(), ast.SourceInfo().MacroCalls(), details.State()) +======= + pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newAST := &Ast{source: a.Source(), impl: pruned} expr, err := AstToString(newAST) if err != nil { @@ -583,7 +646,11 @@ func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...ch extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) extendedOpts = append(extendedOpts, opts...) extendedOpts = append(extendedOpts, e.costOptions...) +<<<<<<< HEAD return checker.Cost(ast.NativeRep(), estimator, extendedOpts...) +======= + return checker.Cost(ast.impl, estimator, extendedOpts...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // configure applies a series of EnvOptions to the current environment. @@ -615,9 +682,12 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { if e.HasFeature(featureVariadicLogicalASTs) { prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true)) } +<<<<<<< HEAD if e.HasFeature(featureIdentEscapeSyntax) { prsrOpts = append(prsrOpts, parser.EnableIdentEscapeSyntax(true)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) e.prsr, err = parser.NewParser(prsrOpts...) if err != nil { return nil, err @@ -770,10 +840,17 @@ func (i *Issues) Append(other *Issues) *Issues { if i == nil { return other } +<<<<<<< HEAD if other == nil || i == other { return i } return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) +======= + if other == nil { + return i + } + return NewIssues(i.errs.Append(other.errs.GetErrors())) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // String converts the issues to a suitable display string. @@ -807,7 +884,11 @@ type interopCELTypeProvider struct { // FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. // +<<<<<<< HEAD // This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type +======= +// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if et, found := p.FindType(typeName); found { @@ -830,7 +911,11 @@ func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string // FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field // name, if one exists. // +<<<<<<< HEAD // This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type +======= +// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { if ft, found := p.FindFieldType(structType, fieldName); found { diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go index a4530e19e7..8d9fde251c 100644 --- a/vendor/github.com/google/cel-go/cel/inlining.go +++ b/vendor/github.com/google/cel-go/cel/inlining.go @@ -60,7 +60,11 @@ func NewInlineVariable(name string, definition *Ast) *InlineVariable { // If the variable occurs more than once, the provided alias will be used to replace the expressions // where the variable name occurs. func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable { +<<<<<<< HEAD return &InlineVariable{name: name, alias: alias, def: definition.NativeRep()} +======= + return &InlineVariable{name: name, alias: alias, def: definition.impl} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewInliningOptimizer creates and optimizer which replaces variables with expression definitions. diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go index a327c9672d..079ad0c1a1 100644 --- a/vendor/github.com/google/cel-go/cel/io.go +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -28,7 +28,10 @@ import ( "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/parser" +<<<<<<< HEAD celpb "cel.dev/expr" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" anypb "google.golang.org/protobuf/types/known/anypb" ) @@ -62,7 +65,11 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { if !a.IsChecked() { return nil, fmt.Errorf("cannot convert unchecked ast") } +<<<<<<< HEAD return ast.ToProto(a.NativeRep()) +======= + return ast.ToProto(a.impl) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ParsedExprToAst converts a parsed expression proto message to an Ast. @@ -99,6 +106,7 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { // Note, the conversion may not be an exact replica of the original expression, but will produce // a string that is semantically equivalent and whose textual representation is stable. func AstToString(a *Ast) (string, error) { +<<<<<<< HEAD return parser.Unparse(a.NativeRep().Expr(), a.NativeRep().SourceInfo()) } @@ -142,11 +150,39 @@ func ValueAsProto(res ref.Val) (*celpb.Value, error) { elts := make([]*celpb.Value, 0, int64(sz)) for i := types.Int(0); i < sz; i++ { v, err := ValueAsProto(l.Get(i)) +======= + return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo()) +} + +// RefValueToValue converts between ref.Val and api.expr.Value. +// The result Value is the serialized proto form. The ref.Val must not be error or unknown. +func RefValueToValue(res ref.Val) (*exprpb.Value, error) { + switch res.Type() { + case types.BoolType: + return &exprpb.Value{ + Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Value{ + Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Value{ + Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Value{ + Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil + case types.ListType: + l := res.(traits.Lister) + sz := l.Size().(types.Int) + elts := make([]*exprpb.Value, 0, int64(sz)) + for i := types.Int(0); i < sz; i++ { + v, err := RefValueToValue(l.Get(i)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } elts = append(elts, v) } +<<<<<<< HEAD return &celpb.Value{ Kind: &celpb.Value_ListValue{ ListValue: &celpb.ListValue{Values: elts}}}, nil @@ -182,13 +218,55 @@ func ValueAsProto(res ref.Val) (*celpb.Value, error) { case types.UintType: return &celpb.Value{ Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil +======= + return &exprpb.Value{ + Kind: &exprpb.Value_ListValue{ + ListValue: &exprpb.ListValue{Values: elts}}}, nil + case types.MapType: + mapper := res.(traits.Mapper) + sz := mapper.Size().(types.Int) + entries := make([]*exprpb.MapValue_Entry, 0, int64(sz)) + for it := mapper.Iterator(); it.HasNext().(types.Bool); { + k := it.Next() + v := mapper.Get(k) + kv, err := RefValueToValue(k) + if err != nil { + return nil, err + } + vv, err := RefValueToValue(v) + if err != nil { + return nil, err + } + entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv}) + } + return &exprpb.Value{ + Kind: &exprpb.Value_MapValue{ + MapValue: &exprpb.MapValue{Entries: entries}}}, nil + case types.NullType: + return &exprpb.Value{ + Kind: &exprpb.Value_NullValue{}}, nil + case types.StringType: + return &exprpb.Value{ + Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil + case types.TypeType: + typeName := res.(ref.Type).TypeName() + return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil + case types.UintType: + return &exprpb.Value{ + Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: any, err := res.ConvertToNative(anyPbType) if err != nil { return nil, err } +<<<<<<< HEAD return &celpb.Value{ Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil +======= + return &exprpb.Value{ + Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -209,6 +287,7 @@ var ( anyPbType = reflect.TypeOf(&anypb.Any{}) ) +<<<<<<< HEAD // ValueToRefValue converts between google.api.expr.v1alpha1.Value and ref.Val. func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { return AlphaProtoAsValue(adapter, v) @@ -241,12 +320,33 @@ func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { case *celpb.Value_BytesValue: return types.Bytes(v.GetBytesValue()), nil case *celpb.Value_ObjectValue: +======= +// ValueToRefValue converts between exprpb.Value and ref.Val. +func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + switch v.Kind.(type) { + case *exprpb.Value_NullValue: + return types.NullValue, nil + case *exprpb.Value_BoolValue: + return types.Bool(v.GetBoolValue()), nil + case *exprpb.Value_Int64Value: + return types.Int(v.GetInt64Value()), nil + case *exprpb.Value_Uint64Value: + return types.Uint(v.GetUint64Value()), nil + case *exprpb.Value_DoubleValue: + return types.Double(v.GetDoubleValue()), nil + case *exprpb.Value_StringValue: + return types.String(v.GetStringValue()), nil + case *exprpb.Value_BytesValue: + return types.Bytes(v.GetBytesValue()), nil + case *exprpb.Value_ObjectValue: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) any := v.GetObjectValue() msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true}) if err != nil { return nil, err } return adapter.NativeToValue(msg), nil +<<<<<<< HEAD case *celpb.Value_MapValue: m := v.GetMapValue() entries := make(map[ref.Val]ref.Val) @@ -256,24 +356,47 @@ func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { return nil, err } pb, err := ProtoAsValue(adapter, entry.Value) +======= + case *exprpb.Value_MapValue: + m := v.GetMapValue() + entries := make(map[ref.Val]ref.Val) + for _, entry := range m.Entries { + key, err := ValueToRefValue(adapter, entry.Key) + if err != nil { + return nil, err + } + pb, err := ValueToRefValue(adapter, entry.Value) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } entries[key] = pb } return adapter.NativeToValue(entries), nil +<<<<<<< HEAD case *celpb.Value_ListValue: l := v.GetListValue() elts := make([]ref.Val, len(l.Values)) for i, e := range l.Values { rv, err := ProtoAsValue(adapter, e) +======= + case *exprpb.Value_ListValue: + l := v.GetListValue() + elts := make([]ref.Val, len(l.Values)) + for i, e := range l.Values { + rv, err := ValueToRefValue(adapter, e) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } elts[i] = rv } return adapter.NativeToValue(elts), nil +<<<<<<< HEAD case *celpb.Value_TypeValue: +======= + case *exprpb.Value_TypeValue: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) typeName := v.GetTypeValue() tv, ok := typeNameToTypeValue[typeName] if ok { @@ -283,6 +406,7 @@ func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { } return nil, errors.New("unknown value") } +<<<<<<< HEAD func convertProto(src, dst proto.Message) error { pb, err := proto.Marshal(src) @@ -292,3 +416,5 @@ func convertProto(src, dst proto.Message) error { err = proto.Unmarshal(pb, dst) return err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index c0aef50190..c0348d9024 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -15,7 +15,10 @@ package cel import ( +<<<<<<< HEAD "fmt" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "strconv" "strings" @@ -36,11 +39,17 @@ const ( optMapMacro = "optMap" optFlatMapMacro = "optFlatMap" hasValueFunc = "hasValue" +<<<<<<< HEAD unwrapOptFunc = "unwrapOpt" optionalNoneFunc = "optional.none" optionalOfFunc = "optional.of" optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" optionalUnwrapFunc = "optional.unwrap" +======= + optionalNoneFunc = "optional.none" + optionalOfFunc = "optional.of" + optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) valueFunc = "value" unusedIterVar = "#unused" ) @@ -263,6 +272,7 @@ func (stdLibrary) ProgramOptions() []ProgramOption { // be expressed with `optMap`. // // msg.?elements.optFlatMap(e, e[?0]) // return the first element if present. +<<<<<<< HEAD // # First // @@ -294,6 +304,8 @@ func (stdLibrary) ProgramOptions() []ProgramOption { // optional.unwrap([optional.of(42), optional.none()]) == [42] // [optional.of(42), optional.none()].unwrapOpt() == [42] +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func OptionalTypes(opts ...OptionalTypesOption) EnvOption { lib := &optionalLib{version: math.MaxUint32} for _, opt := range opts { @@ -337,7 +349,10 @@ func (lib *optionalLib) CompileOptions() []EnvOption { optionalTypeV := OptionalType(paramTypeV) listTypeV := ListType(paramTypeV) mapTypeKV := MapType(paramTypeK, paramTypeV) +<<<<<<< HEAD listOptionalTypeV := ListType(optionalTypeV) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) opts := []EnvOption{ // Enable the optional syntax in the parser. @@ -410,6 +425,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption { if lib.version >= 1 { opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap))) } +<<<<<<< HEAD if lib.version >= 2 { opts = append(opts, Function("last", @@ -450,6 +466,8 @@ func (lib *optionalLib) CompileOptions() []EnvOption { UnaryBinding(optUnwrap)))) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return opts } @@ -478,7 +496,11 @@ func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, * meh.NewList(), unusedIterVar, varName, +<<<<<<< HEAD meh.NewMemberCall(valueFunc, meh.Copy(target)), +======= + meh.NewMemberCall(valueFunc, target), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) meh.NewLiteral(types.False), meh.NewIdent(varName), mapExpr, @@ -505,7 +527,11 @@ func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Exp meh.NewList(), unusedIterVar, varName, +<<<<<<< HEAD meh.NewMemberCall(valueFunc, meh.Copy(target)), +======= + meh.NewMemberCall(valueFunc, target), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) meh.NewLiteral(types.False), meh.NewIdent(varName), mapExpr, @@ -514,6 +540,7 @@ func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Exp ), nil } +<<<<<<< HEAD func optUnwrap(value ref.Val) ref.Val { list := value.(traits.Lister) var unwrappedList []ref.Val @@ -531,6 +558,8 @@ func optUnwrap(value ref.Val) ref.Val { return types.DefaultTypeAdapter.NativeToValue(unwrappedList) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func enableOptionalSyntax() EnvOption { return func(e *Env) (*Env, error) { e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true)) @@ -538,12 +567,15 @@ func enableOptionalSyntax() EnvOption { } } +<<<<<<< HEAD // EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field // selection is performed on a primitive type. func EnableErrorOnBadPresenceTest(value bool) EnvOption { return features(featureEnableErrorOnBadPresenceTest, value) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { call, ok := i.(interpreter.InterpretableCall) if !ok { @@ -769,7 +801,11 @@ var ( func timestampGetFullYear(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Year()) } @@ -777,7 +813,11 @@ func timestampGetFullYear(ts, tz ref.Val) ref.Val { func timestampGetMonth(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CEL spec indicates that the month should be 0-based, but the Time value // for Month() is 1-based. @@ -787,7 +827,11 @@ func timestampGetMonth(ts, tz ref.Val) ref.Val { func timestampGetDayOfYear(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.YearDay() - 1) } @@ -795,7 +839,11 @@ func timestampGetDayOfYear(ts, tz ref.Val) ref.Val { func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Day() - 1) } @@ -803,7 +851,11 @@ func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val { func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Day()) } @@ -811,7 +863,11 @@ func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val { func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Weekday()) } @@ -819,7 +875,11 @@ func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val { func timestampGetHours(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Hour()) } @@ -827,7 +887,11 @@ func timestampGetHours(ts, tz ref.Val) ref.Val { func timestampGetMinutes(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Minute()) } @@ -835,7 +899,11 @@ func timestampGetMinutes(ts, tz ref.Val) ref.Val { func timestampGetSeconds(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Second()) } @@ -843,7 +911,11 @@ func timestampGetSeconds(ts, tz ref.Val) ref.Val { func timestampGetMilliseconds(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { +<<<<<<< HEAD return types.NewErrFromString(err.Error()) +======= + return types.NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Nanosecond() / 1000000) } diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go index 9a2a97a647..3cf5022328 100644 --- a/vendor/github.com/google/cel-go/cel/optimizer.go +++ b/vendor/github.com/google/cel-go/cel/optimizer.go @@ -15,8 +15,11 @@ package cel import ( +<<<<<<< HEAD "sort" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" @@ -48,8 +51,13 @@ func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { // If issues are encountered, the Issues.Err() return value will be non-nil. func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // Make a copy of the AST to be optimized. +<<<<<<< HEAD optimized := ast.Copy(a.NativeRep()) ids := newIDGenerator(ast.MaxID(a.NativeRep())) +======= + optimized := ast.Copy(a.impl) + ids := newIDGenerator(ast.MaxID(a.impl)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Create the optimizer context, could be pooled in the future. issues := NewIssues(common.NewErrors(a.Source())) @@ -86,7 +94,11 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { if iss.Err() != nil { return nil, iss } +<<<<<<< HEAD optimized = checked.NativeRep() +======= + optimized = checked.impl +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Return the optimized result. @@ -100,10 +112,15 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // that the ids within the expression correspond to the ids within macros. func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { optimized.RenumberIDs(idGen) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(info.MacroCalls()) == 0 { return } +<<<<<<< HEAD // Sort the macro ids to make sure that the renumbering of macro-specific variables // is stable across normalization calls. sortedMacroIDs := []int64{} @@ -115,6 +132,11 @@ func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInf // First, update the macro call ids themselves. callIDMap := map[int64]int64{} for _, id := range sortedMacroIDs { +======= + // First, update the macro call ids themselves. + callIDMap := map[int64]int64{} + for id := range info.MacroCalls() { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) callIDMap[id] = idGen(id) } // Then update the macro call definitions which refer to these ids, but @@ -125,8 +147,12 @@ func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInf call ast.Expr } macroUpdates := []macroUpdate{} +<<<<<<< HEAD for _, oldID := range sortedMacroIDs { newID := callIDMap[oldID] +======= + for oldID, newID := range callIDMap { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) call, found := info.GetMacroCall(oldID) if !found { continue @@ -144,7 +170,10 @@ func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { if len(info.MacroCalls()) == 0 { return } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Sanitize the macro call references once the optimized expression has been computed // and the ids normalized between the expression and the macros. exprRefMap := make(map[int64]struct{}) @@ -211,6 +240,7 @@ type OptimizerContext struct { *Issues } +<<<<<<< HEAD // ExtendEnv auguments the context's environment with the additional options. func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { e, err := opt.Env.Extend(opts...) @@ -221,6 +251,8 @@ func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ASTOptimizer applies an optimization over an AST and returns the optimized result. type ASTOptimizer interface { // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. @@ -274,11 +306,14 @@ func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { opt.sourceInfo.SetMacroCall(id, expr) } +<<<<<<< HEAD // MacroCalls returns the map of macro calls currently in the context. func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { return opt.sourceInfo.MacroCalls() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression // representing the unexpanded call signature to be inserted into the source info macro call metadata. func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index 85f777e959..f947a0d05c 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -61,6 +61,7 @@ const ( // compressing the logic graph to a single call when multiple like-operator // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) featureVariadicLogicalASTs +<<<<<<< HEAD // Enable error generation when a presence test or optional field selection is // performed on a primitive type. @@ -68,6 +69,8 @@ const ( // Enable escape syntax for field identifiers (`). featureIdentEscapeSyntax +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // EnvOption is a functional interface for configuring the environment. @@ -250,6 +253,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption { } } +<<<<<<< HEAD // customTypeRegistry is an internal-only interface containing the minimum methods required to support // custom types. It is a subset of methods from ref.TypeRegistry. type customTypeRegistry interface { @@ -257,6 +261,8 @@ type customTypeRegistry interface { RegisterType(...ref.Type) error } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Types adds one or more type declarations to the environment, allowing for construction of // type-literals whose definitions are included in the common expression built-in set. // @@ -269,7 +275,16 @@ type customTypeRegistry interface { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { +<<<<<<< HEAD reg, isReg := e.provider.(customTypeRegistry) +======= + var reg ref.TypeRegistry + var isReg bool + reg, isReg = e.provider.(*types.Registry) + if !isReg { + reg, isReg = e.provider.(ref.TypeRegistry) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -306,7 +321,11 @@ func Types(addTypes ...any) EnvOption { // extension or by re-using the same EnvOption with another NewEnv() call. func TypeDescs(descs ...any) EnvOption { return func(e *Env) (*Env, error) { +<<<<<<< HEAD reg, isReg := e.provider.(customTypeRegistry) +======= + reg, isReg := e.provider.(ref.TypeRegistry) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -354,7 +373,11 @@ func TypeDescs(descs ...any) EnvOption { } } +<<<<<<< HEAD func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { +======= +func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) files, err := protodesc.NewFiles(fileSet) if err != nil { return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) @@ -362,7 +385,11 @@ func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) return registerFiles(reg, files) } +<<<<<<< HEAD func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { +======= +func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var err error files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { err = reg.RegisterDescriptor(fd) @@ -621,12 +648,15 @@ func EnableMacroCallTracking() EnvOption { return features(featureEnableMacroCallTracking, true) } +<<<<<<< HEAD // EnableIdentifierEscapeSyntax enables identifier escaping (`) syntax for // fields. func EnableIdentifierEscapeSyntax() EnvOption { return features(featureIdentEscapeSyntax, true) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CrossTypeNumericComparisons makes it possible to compare across numeric types, e.g. double < int func CrossTypeNumericComparisons(enabled bool) EnvOption { return features(featureCrossTypeNumericComparisons, enabled) @@ -664,6 +694,7 @@ func ParserExpressionSizeLimit(limit int) EnvOption { } } +<<<<<<< HEAD // EnableHiddenAccumulatorName sets the parser to use the identifier '@result' for accumulators // which is not normally accessible from CEL source. func EnableHiddenAccumulatorName(enabled bool) EnvOption { @@ -673,6 +704,8 @@ func EnableHiddenAccumulatorName(enabled bool) EnvOption { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maybeInteropProvider(provider any) (types.Provider, error) { switch p := provider.(type) { case types.Provider: diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index 49bd537838..ef9321acd0 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -19,7 +19,10 @@ import ( "fmt" "sync" +<<<<<<< HEAD "github.com/google/cel-go/common/ast" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -100,9 +103,12 @@ type EvalDetails struct { // State of the evaluation, non-nil if the OptTrackState or OptExhaustiveEval is specified // within EvalOptions. func (ed *EvalDetails) State() interpreter.EvalState { +<<<<<<< HEAD if ed == nil { return interpreter.NewEvalState() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ed.state } @@ -155,7 +161,11 @@ func (p *prog) clone() *prog { // ProgramOption values. // // If the program cannot be configured the prog will be nil, with a non-nil error response. +<<<<<<< HEAD func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { +======= +func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Build the dispatcher, interpreter, and default program value. disp := interpreter.NewDispatcher() @@ -191,6 +201,7 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { // Set the attribute factory after the options have been set. var attrFactory interpreter.AttributeFactory +<<<<<<< HEAD attrFactorOpts := []interpreter.AttrFactoryOption{ interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)), } @@ -198,6 +209,12 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } else { attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) +======= + if p.evalOpts&OptPartialEval == OptPartialEval { + attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider) + } else { + attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory) p.interpreter = interp @@ -259,9 +276,15 @@ func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { return p.initInterpretable(a, decorators) } +<<<<<<< HEAD func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) { // When the AST has been exprAST it contains metadata that can be used to speed up program execution. interpretable, err := p.interpreter.NewInterpretable(a, decs...) +======= +func (p *prog) initInterpretable(a *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { + // When the AST has been exprAST it contains metadata that can be used to speed up program execution. + interpretable, err := p.interpreter.NewInterpretable(a.impl, decs...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel index 678b412a95..370a8a5d99 100644 --- a/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -16,6 +16,10 @@ go_library( "options.go", "printer.go", "scopes.go", +<<<<<<< HEAD +======= + "standard.go", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "types.go", ], importpath = "github.com/google/cel-go/checker", diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index 6824af7a54..a95bcf9950 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -496,6 +496,7 @@ func (c *checker) checkComprehension(e ast.Expr) { comp := e.AsComprehension() c.check(comp.IterRange()) c.check(comp.AccuInit()) +<<<<<<< HEAD rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) // Create a scope for the comprehension since it has a local accumulation variable. @@ -522,6 +523,18 @@ func (c *checker) checkComprehension(e ast.Expr) { // var2Type represents the map entry value for two-variable comprehensions. var2Type = rangeType.Parameters()[1] } +======= + accuType := c.getType(comp.AccuInit()) + rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) + var varType *types.Type + + switch rangeType.Kind() { + case types.ListKind: + varType = rangeType.Parameters()[0] + case types.MapKind: + // Ranges over the keys. + varType = rangeType.Parameters()[0] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case types.DynKind, types.ErrorKind, types.TypeParamKind: // Set the range type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type @@ -529,6 +542,7 @@ func (c *checker) checkComprehension(e ast.Expr) { c.isAssignable(types.DynType, rangeType) // Set the range iteration variable to type DYN as well. varType = types.DynType +<<<<<<< HEAD if comp.HasIterVar2() { var2Type = types.DynType } @@ -546,6 +560,20 @@ func (c *checker) checkComprehension(e ast.Expr) { if comp.HasIterVar2() { c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type)) } +======= + default: + c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType) + varType = types.ErrorType + } + + // Create a scope for the comprehension since it has a local accumulation variable. + // This scope will contain the accumulation variable used to compute the result. + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) + // Create a block scope for the loop. + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Check the variable references in the condition and step. c.check(comp.LoopCondition()) c.assertType(comp.LoopCondition(), types.BoolType) diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go index b9cd8a2ed3..9378cd653b 100644 --- a/vendor/github.com/google/cel-go/checker/cost.go +++ b/vendor/github.com/google/cel-go/checker/cost.go @@ -28,6 +28,7 @@ import ( // CostEstimator estimates the sizes of variable length input data and the costs of functions. type CostEstimator interface { +<<<<<<< HEAD // EstimateSize returns a SizeEstimate for the given AstNode, or nil if the estimator has no // estimate to provide. // @@ -42,6 +43,17 @@ type CostEstimator interface { // EstimateCallCost returns the estimated cost of an invocation, or nil if the estimator has no // estimate to provide. +======= + // EstimateSize returns a SizeEstimate for the given AstNode, or nil if + // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function: + // length of strings and bytes, number of map entries or number of list items. + // EstimateSize is only called for AstNodes where + // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size + // is already obvious to CEL. + EstimateSize(element AstNode) *SizeEstimate + // EstimateCallCost returns the estimated cost of an invocation, or nil if + // the estimator has no estimate to provide. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate } @@ -49,7 +61,10 @@ type CostEstimator interface { // The ResultSize should only be provided if the call results in a map, list, string or bytes. type CallEstimate struct { CostEstimate +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ResultSize *SizeEstimate } @@ -59,6 +74,7 @@ type AstNode interface { // represent type directly reachable from the provided type declarations. // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. Path() []string +<<<<<<< HEAD // Type returns the deduced type of the AstNode. Type() *types.Type @@ -66,6 +82,12 @@ type AstNode interface { // Expr returns the expression of the AstNode. Expr() ast.Expr +======= + // Type returns the deduced type of the AstNode. + Type() *types.Type + // Expr returns the expression of the AstNode. + Expr() ast.Expr +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no @@ -93,7 +115,40 @@ func (e astNode) Expr() ast.Expr { } func (e astNode) ComputedSize() *SizeEstimate { +<<<<<<< HEAD return e.derivedSize +======= + if e.derivedSize != nil { + return e.derivedSize + } + var v uint64 + switch e.expr.Kind() { + case ast.LiteralKind: + switch ck := e.expr.AsLiteral().(type) { + case types.String: + // converting to runes here is an O(n) operation, but + // this is consistent with how size is computed at runtime, + // and how the language definition defines string size + v = uint64(len([]rune(ck))) + case types.Bytes: + v = uint64(len(ck)) + case types.Bool, types.Double, types.Duration, + types.Int, types.Timestamp, types.Uint, + types.Null: + v = uint64(1) + default: + return nil + } + case ast.ListKind: + v = uint64(e.expr.AsList().Size()) + case ast.MapKind: + v = uint64(e.expr.AsMap().Size()) + default: + return nil + } + + return &SizeEstimate{Min: v, Max: v} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SizeEstimate represents an estimated size of a variable length string, bytes, map or list. @@ -101,6 +156,7 @@ type SizeEstimate struct { Min, Max uint64 } +<<<<<<< HEAD // UnknownSizeEstimate returns a size between 0 and max uint func UnknownSizeEstimate() SizeEstimate { return unknownSizeEstimate @@ -111,6 +167,8 @@ func FixedSizeEstimate(size uint64) SizeEstimate { return SizeEstimate{Min: size, Max: size} } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Add adds to another SizeEstimate and returns the sum. // If add would result in an uint64 overflow, the result is math.MaxUint64. func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate { @@ -165,6 +223,7 @@ type CostEstimate struct { Min, Max uint64 } +<<<<<<< HEAD // UnknownCostEstimate returns a cost with an unknown impact. func UnknownCostEstimate() CostEstimate { return unknownCostEstimate @@ -175,12 +234,19 @@ func FixedCostEstimate(cost uint64) CostEstimate { return CostEstimate{Min: cost, Max: cost} } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Add adds the costs and returns the sum. // If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64. func (ce CostEstimate) Add(cost CostEstimate) CostEstimate { return CostEstimate{ +<<<<<<< HEAD Min: addUint64NoOverflow(ce.Min, cost.Min), Max: addUint64NoOverflow(ce.Max, cost.Max), +======= + addUint64NoOverflow(ce.Min, cost.Min), + addUint64NoOverflow(ce.Max, cost.Max), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -188,8 +254,13 @@ func (ce CostEstimate) Add(cost CostEstimate) CostEstimate { // If multiply would result in an uint64 overflow, the result is math.MaxUint64. func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate { return CostEstimate{ +<<<<<<< HEAD Min: multiplyUint64NoOverflow(ce.Min, cost.Min), Max: multiplyUint64NoOverflow(ce.Max, cost.Max), +======= + multiplyUint64NoOverflow(ce.Min, cost.Min), + multiplyUint64NoOverflow(ce.Max, cost.Max), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -197,8 +268,13 @@ func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate { // nearest integer of the result, rounded up. func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate { return CostEstimate{ +<<<<<<< HEAD Min: multiplyByCostFactor(ce.Min, costPerUnit), Max: multiplyByCostFactor(ce.Max, costPerUnit), +======= + multiplyByCostFactor(ce.Min, costPerUnit), + multiplyByCostFactor(ce.Max, costPerUnit), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -245,6 +321,52 @@ func multiplyByCostFactor(x uint64, y float64) uint64 { return uint64(ceil) } +<<<<<<< HEAD +======= +var ( + selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost} + constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost} + + createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} + createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost} + createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost} +) + +type coster struct { + // exprPath maps from Expr Id to field path. + exprPath map[int64][]string + // iterRanges tracks the iterRange of each iterVar. + iterRanges iterRangeScopes + // computedSizes tracks the computed sizes of call results. + computedSizes map[int64]SizeEstimate + checkedAST *ast.AST + estimator CostEstimator + overloadEstimators map[string]FunctionEstimator + // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. + presenceTestCost CostEstimate +} + +// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. +type iterRangeScopes map[string][]int64 + +func (vs iterRangeScopes) push(varName string, expr ast.Expr) { + vs[varName] = append(vs[varName], expr.ID()) +} + +func (vs iterRangeScopes) pop(varName string) { + varStack := vs[varName] + vs[varName] = varStack[:len(varStack)-1] +} + +func (vs iterRangeScopes) peek(varName string) (int64, bool) { + varStack := vs[varName] + if len(varStack) > 0 { + return varStack[len(varStack)-1], true + } + return 0, false +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CostOption configures flags which affect cost computations. type CostOption func(*coster) error @@ -257,7 +379,11 @@ func PresenceTestHasCost(hasCost bool) CostOption { c.presenceTestCost = selectAndIdentCost return nil } +<<<<<<< HEAD c.presenceTestCost = FixedCostEstimate(0) +======= + c.presenceTestCost = CostEstimate{Min: 0, Max: 0} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } @@ -282,11 +408,18 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs checkedAST: checked, estimator: estimator, overloadEstimators: map[string]FunctionEstimator{}, +<<<<<<< HEAD exprPaths: map[int64][]string{}, localVars: make(scopes), computedSizes: map[int64]SizeEstimate{}, computedEntrySizes: map[int64]entrySizeEstimate{}, presenceTestCost: FixedCostEstimate(1), +======= + exprPath: map[int64][]string{}, + iterRanges: map[string][]int64{}, + computedSizes: map[int64]SizeEstimate{}, + presenceTestCost: CostEstimate{Min: 1, Max: 1}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, opt := range opts { err := opt(c) @@ -297,6 +430,7 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs return c.cost(checked.Expr()), nil } +<<<<<<< HEAD type coster struct { // exprPaths maps from Expr Id to field path. exprPaths map[int64][]string @@ -456,6 +590,8 @@ func (c *coster) popLocalVar(varName string) { c.localVars.pop(varName) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *coster) cost(e ast.Expr) CostEstimate { if e == nil { return CostEstimate{} @@ -477,11 +613,15 @@ func (c *coster) cost(e ast.Expr) CostEstimate { case ast.StructKind: cost = c.costCreateStruct(e) case ast.ComprehensionKind: +<<<<<<< HEAD if c.isBind(e) { cost = c.costBind(e) } else { cost = c.costComprehension(e) } +======= + cost = c.costComprehension(e) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return CostEstimate{} } @@ -491,11 +631,25 @@ func (c *coster) cost(e ast.Expr) CostEstimate { func (c *coster) costIdent(e ast.Expr) CostEstimate { identName := e.AsIdent() // build and track the field path +<<<<<<< HEAD if v, ok := c.peekLocalVar(identName); ok { c.addPath(e, v.path) } else { c.addPath(e, []string{identName}) } +======= + if iterRange, ok := c.iterRanges.peek(identName); ok { + switch c.checkedAST.GetType(iterRange).Kind() { + case types.ListKind: + c.addPath(e, append(c.exprPath[iterRange], "@items")) + case types.MapKind: + c.addPath(e, append(c.exprPath[iterRange], "@keys")) + } + } else { + c.addPath(e, []string{identName}) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return selectAndIdentCost } @@ -520,10 +674,15 @@ func (c *coster) costSelect(e ast.Expr) CostEstimate { // build and track the field path c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName())) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return sum } func (c *coster) costCall(e ast.Expr) CostEstimate { +<<<<<<< HEAD // Dyn is just a way to disable type-checking, so return the cost of 1 with the cost of the argument if dynEstimate := c.maybeUnwrapDynCall(e); dynEstimate != nil { return *dynEstimate @@ -532,6 +691,11 @@ func (c *coster) costCall(e ast.Expr) CostEstimate { // Continue estimating the cost of all other calls. call := e.AsCall() args := call.Args() +======= + call := e.AsCall() + args := call.Args() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var sum CostEstimate argTypes := make([]AstNode, len(args)) @@ -554,7 +718,11 @@ func (c *coster) costCall(e ast.Expr) CostEstimate { fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} var resultSize *SizeEstimate for _, overload := range overloadIDs { +<<<<<<< HEAD overloadCost := c.functionCost(e, call.FunctionName(), overload, &targetType, argTypes, argCosts) +======= + overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fnCost = fnCost.Union(overloadCost.CostEstimate) if overloadCost.ResultSize != nil { if resultSize == nil { @@ -568,12 +736,16 @@ func (c *coster) costCall(e ast.Expr) CostEstimate { switch overload { case overloads.IndexList: if len(args) > 0 { +<<<<<<< HEAD // note: assigning resultSize here could be redundant with the path-based lookup later resultSize = c.computeEntrySize(args[0]).valSize() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.addPath(e, append(c.getPath(args[0]), "@items")) } case overloads.IndexMap: if len(args) > 0 { +<<<<<<< HEAD resultSize = c.computeEntrySize(args[0]).valSize() c.addPath(e, append(c.getPath(args[0]), "@values")) } @@ -611,22 +783,44 @@ func (c *coster) costCreateList(e ast.Expr) CostEstimate { itemSize = itemSize.Union(is) } c.setEntrySize(e, &entrySizeEstimate{containerKind: types.ListKind, key: FixedSizeEstimate(1), val: itemSize}) +======= + c.addPath(e, append(c.getPath(args[0]), "@values")) + } + } + } + if resultSize != nil { + c.computedSizes[e.ID()] = *resultSize + } + return sum.Add(fnCost) +} + +func (c *coster) costCreateList(e ast.Expr) CostEstimate { + create := e.AsList() + var sum CostEstimate + for _, e := range create.Elements() { + sum = sum.Add(c.cost(e)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return sum.Add(createListBaseCost) } func (c *coster) costCreateMap(e ast.Expr) CostEstimate { mapVal := e.AsMap() var sum CostEstimate +<<<<<<< HEAD keySize := SizeEstimate{Min: math.MaxUint64, Max: 0} valSize := SizeEstimate{Min: math.MaxUint64, Max: 0} if mapVal.Size() == 0 { valSize.Min = 0 keySize.Min = 0 } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, ent := range mapVal.Entries() { entry := ent.AsMapEntry() sum = sum.Add(c.cost(entry.Key())) sum = sum.Add(c.cost(entry.Value())) +<<<<<<< HEAD // Compute the key size range ks := c.sizeOrUnknown(entry.Key()) keySize = keySize.Union(ks) @@ -635,6 +829,9 @@ func (c *coster) costCreateMap(e ast.Expr) CostEstimate { valSize = valSize.Union(vs) } c.setEntrySize(e, &entrySizeEstimate{containerKind: types.MapKind, key: keySize, val: valSize}) +======= + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return sum.Add(createMapBaseCost) } @@ -653,6 +850,7 @@ func (c *coster) costComprehension(e ast.Expr) CostEstimate { var sum CostEstimate sum = sum.Add(c.cost(comp.IterRange())) sum = sum.Add(c.cost(comp.AccuInit())) +<<<<<<< HEAD c.pushLocalVar(comp.AccuVar(), comp.AccuInit()) // Track the iterRange of each IterVar and AccuVar for field path construction @@ -723,6 +921,45 @@ func (c *coster) costBind(e ast.Expr) CostEstimate { } func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate { +======= + + // Track the iterRange of each IterVar for field path construction + c.iterRanges.push(comp.IterVar(), comp.IterRange()) + loopCost := c.cost(comp.LoopCondition()) + stepCost := c.cost(comp.LoopStep()) + c.iterRanges.pop(comp.IterVar()) + sum = sum.Add(c.cost(comp.Result())) + rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange())) + + c.computedSizes[e.ID()] = rangeCnt + + rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost)) + sum = sum.Add(rangeCost) + + return sum +} + +func (c *coster) sizeEstimate(t AstNode) SizeEstimate { + if l := t.ComputedSize(); l != nil { + return *l + } + if l := c.estimator.EstimateSize(t); l != nil { + return *l + } + // return an estimate of 1 for return types of set + // lengths, since strings/bytes/more complex objects could be of + // variable length + if isScalar(t.Type()) { + // TODO: since the logic for size estimation is split between + // ComputedSize and isScalar, changing one will likely require changing + // the other, so they should be merged in the future if possible + return SizeEstimate{Min: 1, Max: 1} + } + return SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) argCostSum := func() CostEstimate { var sum CostEstimate for _, a := range argCosts { @@ -747,6 +984,7 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A case overloads.ExtFormatString: if target != nil { // ResultSize not calculated because we can't bound the max size. +<<<<<<< HEAD return CallEstimate{ CostEstimate: c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} } @@ -777,12 +1015,41 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A case overloads.StartsWithString, overloads.EndsWithString: if len(args) == 1 { return CallEstimate{CostEstimate: c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} +======= + return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + case overloads.StringToBytes: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize max is when each char converts to 4 bytes. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}} + } + case overloads.BytesToString: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize min is when 4 bytes convert to 1 char. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}} + } + case overloads.ExtQuoteString: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize max is when each char is escaped. 2 quote chars always added. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}} + } + case overloads.StartsWithString, overloads.EndsWithString: + if len(args) == 1 { + return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case overloads.InList: // If a list is composed entirely of constant values this is O(1), but we don't account for that here. // We just assume all list containment checks are O(n). if len(args) == 2 { +<<<<<<< HEAD return CallEstimate{CostEstimate: c.sizeOrUnknown(args[1]).MultiplyByCostFactor(1).Add(argCostSum())} +======= + return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // O(nm) functions case overloads.MatchesString: @@ -790,19 +1057,32 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A if target != nil && len(args) == 1 { // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 // in case where string is empty but regex is still expensive. +<<<<<<< HEAD strCost := c.sizeOrUnknown(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) +======= + strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // We don't know how many expressions are in the regex, just the string length (a huge // improvement here would be to somehow get a count the number of expressions in the regex or // how many states are in the regex state machine and use that to measure regex cost). // For now, we're making a guess that each expression in a regex is typically at least 4 chars // in length. +<<<<<<< HEAD regexCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) +======= + regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())} } case overloads.ContainsString: if target != nil && len(args) == 1 { +<<<<<<< HEAD strCost := c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor) substrCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor) +======= + strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor) + substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())} } case overloads.LogicalOr, overloads.LogicalAnd: @@ -812,9 +1092,13 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max} return CallEstimate{CostEstimate: argCost} case overloads.Conditional: +<<<<<<< HEAD size := c.sizeOrUnknown(args[1]).Union(c.sizeOrUnknown(args[2])) resultEntrySize := c.computeEntrySize(args[1].Expr()).union(c.computeEntrySize(args[2].Expr())) c.setEntrySize(e, resultEntrySize) +======= + size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2])) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) conditionalCost := argCosts[0] ifTrueCost := argCosts[1] ifFalseCost := argCosts[2] @@ -822,6 +1106,7 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A return CallEstimate{CostEstimate: argCost, ResultSize: &size} case overloads.AddString, overloads.AddBytes, overloads.AddList: if len(args) == 2 { +<<<<<<< HEAD lhsSize := c.sizeOrUnknown(args[0]) rhsSize := c.sizeOrUnknown(args[1]) resultSize := lhsSize.Add(rhsSize) @@ -835,6 +1120,15 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A case overloads.AddList: // list concatenation is O(1), but we handle it here to track size return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum()), ResultSize: &resultSize} +======= + lhsSize := c.sizeEstimate(args[0]) + rhsSize := c.sizeEstimate(args[1]) + resultSize := lhsSize.Add(rhsSize) + switch overloadID { + case overloads.AddList: + // list concatenation is O(1), but we handle it here to track size + return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize} } @@ -842,8 +1136,13 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, overloads.Equals, overloads.NotEquals: +<<<<<<< HEAD lhsCost := c.sizeOrUnknown(args[0]) rhsCost := c.sizeOrUnknown(args[1]) +======= + lhsCost := c.sizeEstimate(args[0]) + rhsCost := c.sizeEstimate(args[1]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) min := uint64(0) smallestMax := lhsCost.Max if rhsCost.Max < smallestMax { @@ -853,16 +1152,24 @@ func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *A min = 1 } // equality of 2 scalar values results in a cost of 1 +<<<<<<< HEAD return CallEstimate{ CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), } +======= + return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // O(1) functions // See CostTracker.costCall for more details about O(1) cost calculations // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit // which on an Intel xeon 2.20GHz CPU is 50ns. +<<<<<<< HEAD return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum())} +======= + return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *coster) getType(e ast.Expr) *types.Type { @@ -870,6 +1177,7 @@ func (c *coster) getType(e ast.Expr) *types.Type { } func (c *coster) getPath(e ast.Expr) []string { +<<<<<<< HEAD if e.Kind() == ast.IdentKind { if v, found := c.peekLocalVar(e.AsIdent()); found { return v.path[:] @@ -884,18 +1192,37 @@ func (c *coster) addPath(e ast.Expr, path []string) { func isAccumulatorVar(name string) bool { return name == parser.AccumulatorName || name == parser.HiddenAccumulatorName +======= + return c.exprPath[e.ID()] +} + +func (c *coster) addPath(e ast.Expr, path []string) { + c.exprPath[e.ID()] = path +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *coster) newAstNode(e ast.Expr) *astNode { path := c.getPath(e) +<<<<<<< HEAD if len(path) > 0 && isAccumulatorVar(path[0]) { // only provide paths to root vars; omit accumulator vars path = nil } +======= + if len(path) > 0 && path[0] == parser.AccumulatorName { + // only provide paths to root vars; omit accumulator vars + path = nil + } + var derivedSize *SizeEstimate + if size, ok := c.computedSizes[e.ID()]; ok { + derivedSize = &size + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &astNode{ path: path, t: c.getType(e), expr: e, +<<<<<<< HEAD derivedSize: c.computeSize(e)} } @@ -1009,6 +1336,9 @@ func computeTypeSize(t *types.Type) *SizeEstimate { return &cost } return nil +======= + derivedSize: derivedSize} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // isScalar returns true if the given type is known to be of a constant size at @@ -1018,16 +1348,20 @@ func isScalar(t *types.Type) bool { switch t.Kind() { case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind: return true +<<<<<<< HEAD case types.OpaqueKind: if t.TypeName() == "optional_type" { return isScalar(t.Parameters()[0]) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return false } var ( doubleTwoTo64 = math.Ldexp(1.0, 64) +<<<<<<< HEAD unknownSizeEstimate = SizeEstimate{Min: 0, Max: math.MaxUint64} unknownCostEstimate = unknownSizeEstimate.MultiplyByCostFactor(1) @@ -1038,4 +1372,6 @@ var ( createListBaseCost = FixedCostEstimate(common.ListCreateBaseCost) createMapBaseCost = FixedCostEstimate(common.MapCreateBaseCost) createMessageBaseCost = FixedCostEstimate(common.StructCreateBaseCost) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go new file mode 100644 index 0000000000..11b35b80ee --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/standard.go @@ -0,0 +1,35 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/stdlib" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// StandardFunctions returns the Decls for all functions in the evaluator. +// +// Deprecated: prefer stdlib.FunctionExprDecls() +func StandardFunctions() []*exprpb.Decl { + return stdlib.FunctionExprDecls() +} + +// StandardTypes returns the set of type identifiers for standard library types. +// +// Deprecated: prefer stdlib.TypeExprDecls() +func StandardTypes() []*exprpb.Decl { + return stdlib.TypeExprDecls() +} diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel index eef7f281be..61ae36273f 100644 --- a/vendor/github.com/google/cel-go/common/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -18,6 +18,10 @@ go_library( deps = [ "//common/runes:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", +<<<<<<< HEAD +======= + "@org_golang_x_text//width:go_default_library", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ], ) diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel index 9824f57a9f..eb0340777a 100644 --- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "navigable.go", ], importpath = "github.com/google/cel-go/common/ast", +<<<<<<< HEAD deps = [ "//common:go_default_library", "//common/types:go_default_library", @@ -22,6 +23,13 @@ go_library( "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", +======= + deps = [ + "//common:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], ) @@ -37,13 +45,20 @@ go_test( embed = [ ":go_default_library", ], +<<<<<<< HEAD deps = [ +======= + deps = [ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "//checker:go_default_library", "//checker/decls:go_default_library", "//common:go_default_library", "//common/containers:go_default_library", "//common/decls:go_default_library", +<<<<<<< HEAD "//common/operators:go_default_library", +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "//common/overloads:go_default_library", "//common/stdlib:go_default_library", "//common/types:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go index b807669d49..b3e7cb5995 100644 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -310,6 +310,7 @@ func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { s.offsetRanges[id] = o } +<<<<<<< HEAD // ClearOffsetRange removes the OffsetRange for the given expression id. func (s *SourceInfo) ClearOffsetRange(id int64) { if s != nil { @@ -317,11 +318,27 @@ func (s *SourceInfo) ClearOffsetRange(id int64) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character // of the expression node at the id. func (s *SourceInfo) GetStartLocation(id int64) common.Location { if o, found := s.GetOffsetRange(id); found { +<<<<<<< HEAD return s.GetLocationByOffset(o.Start) +======= + line := 1 + col := int(o.Start) + for _, lineOffset := range s.LineOffsets() { + if lineOffset < o.Start { + line++ + col = int(o.Start - lineOffset) + } else { + break + } + } + return common.NewLocation(line, col) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return common.NoLocation } @@ -333,11 +350,26 @@ func (s *SourceInfo) GetStartLocation(id int64) common.Location { // be identical to the start location for the expression. func (s *SourceInfo) GetStopLocation(id int64) common.Location { if o, found := s.GetOffsetRange(id); found { +<<<<<<< HEAD return s.GetLocationByOffset(o.Stop) +======= + line := 1 + col := int(o.Stop) + for _, lineOffset := range s.LineOffsets() { + if lineOffset < o.Stop { + line++ + col = int(o.Stop - lineOffset) + } else { + break + } + } + return common.NewLocation(line, col) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return common.NoLocation } +<<<<<<< HEAD // GetLocationByOffset returns the line and column information for a given character offset. func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { line := 1 @@ -352,6 +384,8 @@ func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { return common.NewLocation(line, col) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. func (s *SourceInfo) ComputeOffset(line, col int32) int32 { if s != nil { diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go index 435d8f6547..c846b3f72e 100644 --- a/vendor/github.com/google/cel-go/common/ast/conversion.go +++ b/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -17,6 +17,7 @@ package ast import ( "fmt" +<<<<<<< HEAD "google.golang.org/protobuf/proto" "github.com/google/cel-go/common/types" @@ -25,6 +26,14 @@ import ( celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" structpb "google.golang.org/protobuf/types/known/structpb" +======= + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + structpb "google.golang.org/protobuf/types/known/structpb" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ToProto converts an AST to a CheckedExpr protobouf. @@ -175,10 +184,16 @@ func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehe if err != nil { return nil, err } +<<<<<<< HEAD return factory.NewComprehensionTwoVar(id, iterRange, comp.GetIterVar(), comp.GetIterVar2(), +======= + return factory.NewComprehension(id, + iterRange, + comp.GetIterVar(), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) comp.GetAccuVar(), accuInit, loopCond, @@ -366,7 +381,10 @@ func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) ExprKind: &exprpb.Expr_ComprehensionExpr{ ComprehensionExpr: &exprpb.Expr_Comprehension{ IterVar: comp.IterVar(), +<<<<<<< HEAD IterVar2: comp.IterVar2(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) IterRange: iterRange, AccuVar: comp.AccuVar(), AccuInit: accuInit, @@ -613,6 +631,7 @@ func ValToConstant(v ref.Val) (*exprpb.Constant, error) { // ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { +<<<<<<< HEAD return AlphaProtoConstantAsVal(c) } @@ -644,10 +663,30 @@ func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) { case *celpb.Constant_StringValue: return types.String(c.GetStringValue()), nil case *celpb.Constant_Uint64Value: +======= + if c == nil { + return nil, nil + } + switch c.GetConstantKind().(type) { + case *exprpb.Constant_BoolValue: + return types.Bool(c.GetBoolValue()), nil + case *exprpb.Constant_BytesValue: + return types.Bytes(c.GetBytesValue()), nil + case *exprpb.Constant_DoubleValue: + return types.Double(c.GetDoubleValue()), nil + case *exprpb.Constant_Int64Value: + return types.Int(c.GetInt64Value()), nil + case *exprpb.Constant_NullValue: + return types.NullValue, nil + case *exprpb.Constant_StringValue: + return types.String(c.GetStringValue()), nil + case *exprpb.Constant_Uint64Value: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return types.Uint(c.GetUint64Value()), nil } return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) } +<<<<<<< HEAD func convertProto(src, dst proto.Message) error { pb, err := proto.Marshal(src) @@ -657,3 +696,5 @@ func convertProto(src, dst proto.Message) error { err = proto.Unmarshal(pb, dst) return err } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go index 9f55cb3b9f..a7386ab564 100644 --- a/vendor/github.com/google/cel-go/common/ast/expr.go +++ b/vendor/github.com/google/cel-go/common/ast/expr.go @@ -158,7 +158,11 @@ type EntryExpr interface { // IDGenerator produces unique ids suitable for tagging expression nodes type IDGenerator func(originalID int64) int64 +<<<<<<< HEAD // CallExpr defines an interface for inspecting a function call and its arguments. +======= +// CallExpr defines an interface for inspecting a function call and its arugments. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CallExpr interface { // FunctionName returns the name of the function. FunctionName() string @@ -269,6 +273,7 @@ type ComprehensionExpr interface { IterRange() Expr // IterVar returns the iteration variable name. +<<<<<<< HEAD // // For one-variable comprehensions, the iter var refers to the element value // when iterating over a list, or the map key when iterating over a map. @@ -285,6 +290,10 @@ type ComprehensionExpr interface { // HasIterVar2 returns true if the second iteration variable is non-empty. HasIterVar2() bool +======= + IterVar() string + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AccuVar returns the accumulation variable name. AccuVar() string @@ -411,7 +420,10 @@ func (e *expr) SetKindCase(other Expr) { e.exprKindCase = &baseComprehensionExpr{ iterRange: c.IterRange(), iterVar: c.IterVar(), +<<<<<<< HEAD iterVar2: c.IterVar2(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar: c.AccuVar(), accuInit: c.AccuInit(), loopCond: c.LoopCondition(), @@ -520,7 +532,10 @@ var _ ComprehensionExpr = &baseComprehensionExpr{} type baseComprehensionExpr struct { iterRange Expr iterVar string +<<<<<<< HEAD iterVar2 string +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar string accuInit Expr loopCond Expr @@ -543,6 +558,7 @@ func (e *baseComprehensionExpr) IterVar() string { return e.iterVar } +<<<<<<< HEAD func (e *baseComprehensionExpr) IterVar2() string { return e.iterVar2 } @@ -551,6 +567,8 @@ func (e *baseComprehensionExpr) HasIterVar2() bool { return e.iterVar2 != "" } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *baseComprehensionExpr) AccuVar() string { return e.accuVar } diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go index d4dcde4d94..e891684782 100644 --- a/vendor/github.com/google/cel-go/common/ast/factory.go +++ b/vendor/github.com/google/cel-go/common/ast/factory.go @@ -27,12 +27,18 @@ type ExprFactory interface { // NewCall creates an Expr value representing a global function call. NewCall(id int64, function string, args ...Expr) Expr +<<<<<<< HEAD // NewComprehension creates an Expr value representing a one-variable comprehension over a value range. NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range. NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr +======= + // NewComprehension creates an Expr value representing a comprehension over a value range. + NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewMemberCall creates an Expr value representing a member function call. NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr @@ -40,18 +46,28 @@ type ExprFactory interface { NewIdent(id int64, name string) Expr // NewAccuIdent creates an Expr value representing an accumulator identifier within a +<<<<<<< HEAD // comprehension. NewAccuIdent(id int64) Expr // AccuIdentName reports the name of the accumulator variable to be used within a comprehension. AccuIdentName() string +======= + //comprehension. + NewAccuIdent(id int64) Expr + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewLiteral creates an Expr value representing a literal value, such as a string or integer. NewLiteral(id int64, value ref.Val) Expr // NewList creates an Expr value representing a list literal expression with optional indices. // +<<<<<<< HEAD // Optional indices will typically be empty unless the CEL optional types are enabled. +======= + // Optional indicies will typically be empty unless the CEL optional types are enabled. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NewList(id int64, elems []Expr, optIndices []int32) Expr // NewMap creates an Expr value representing a map literal expression @@ -81,6 +97,7 @@ type ExprFactory interface { isExprFactory() } +<<<<<<< HEAD type baseExprFactory struct { accumulatorName string } @@ -98,6 +115,13 @@ func NewExprFactoryWithAccumulator(id string) ExprFactory { return &baseExprFactory{ id, } +======= +type baseExprFactory struct{} + +// NewExprFactory creates an ExprFactory instance. +func NewExprFactory() ExprFactory { + return &baseExprFactory{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr { @@ -129,17 +153,23 @@ func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr } func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { +<<<<<<< HEAD // Set the iter_var2 to empty string to indicate the second variable is omitted return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result) } func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fac.newExpr( id, &baseComprehensionExpr{ iterRange: iterRange, iterVar: iterVar, +<<<<<<< HEAD iterVar2: iterVar2, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar: accuVar, accuInit: accuInit, loopCond: loopCond, @@ -153,11 +183,15 @@ func (fac *baseExprFactory) NewIdent(id int64, name string) Expr { } func (fac *baseExprFactory) NewAccuIdent(id int64) Expr { +<<<<<<< HEAD return fac.NewIdent(id, fac.AccuIdentName()) } func (fac *baseExprFactory) AccuIdentName() string { return fac.accumulatorName +======= + return fac.NewIdent(id, "__result__") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr { @@ -251,10 +285,16 @@ func (fac *baseExprFactory) CopyExpr(e Expr) Expr { return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) case ComprehensionKind: compre := e.AsComprehension() +<<<<<<< HEAD return fac.NewComprehensionTwoVar(e.ID(), fac.CopyExpr(compre.IterRange()), compre.IterVar(), compre.IterVar2(), +======= + return fac.NewComprehension(e.ID(), + fac.CopyExpr(compre.IterRange()), + compre.IterVar(), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) compre.AccuVar(), fac.CopyExpr(compre.AccuInit()), fac.CopyExpr(compre.LoopCondition()), diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go index d7a90fb7c3..278e4dd3fd 100644 --- a/vendor/github.com/google/cel-go/common/ast/navigable.go +++ b/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -390,6 +390,7 @@ func (comp navigableComprehensionImpl) IterVar() string { return comp.Expr.AsComprehension().IterVar() } +<<<<<<< HEAD func (comp navigableComprehensionImpl) IterVar2() string { return comp.Expr.AsComprehension().IterVar2() } @@ -398,6 +399,8 @@ func (comp navigableComprehensionImpl) HasIterVar2() bool { return comp.Expr.AsComprehension().HasIterVar2() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (comp navigableComprehensionImpl) AccuVar() string { return comp.Expr.AsComprehension().AccuVar() } diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go index 3097a3f785..d7fbb4e6e3 100644 --- a/vendor/github.com/google/cel-go/common/containers/container.go +++ b/vendor/github.com/google/cel-go/common/containers/container.go @@ -19,7 +19,10 @@ package containers import ( "fmt" "strings" +<<<<<<< HEAD "unicode" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/ast" ) @@ -213,6 +216,7 @@ type ContainerOption func(*Container) (*Container, error) func Abbrevs(qualifiedNames ...string) ContainerOption { return func(c *Container) (*Container, error) { for _, qn := range qualifiedNames { +<<<<<<< HEAD qn = strings.TrimSpace(qn) for _, r := range qn { if !isIdentifierChar(r) { @@ -220,6 +224,8 @@ func Abbrevs(qualifiedNames ...string) ContainerOption { "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ind := strings.LastIndex(qn, ".") if ind <= 0 || ind >= len(qn)-1 { return nil, fmt.Errorf( @@ -286,10 +292,13 @@ func aliasAs(kind, qualifiedName, alias string) ContainerOption { } } +<<<<<<< HEAD func isIdentifierChar(r rune) bool { return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Name sets the fully-qualified name of the Container. func Name(name string) ContainerOption { return func(c *Container) (*Container, error) { diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go index 75f5f0d636..b447cb015e 100644 --- a/vendor/github.com/google/cel-go/common/debug/debug.go +++ b/vendor/github.com/google/cel-go/common/debug/debug.go @@ -215,11 +215,14 @@ func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { w.append(comprehension.IterVar()) w.append(",") w.appendLine() +<<<<<<< HEAD if comprehension.HasIterVar2() { w.append(comprehension.IterVar2()) w.append(",") w.appendLine() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) w.append("// Target") w.appendLine() w.Buffer(comprehension.IterRange()) @@ -257,7 +260,11 @@ func formatLiteral(c ref.Val) string { case types.Bool: return fmt.Sprintf("%t", v) case types.Bytes: +<<<<<<< HEAD return fmt.Sprintf("b%s", strconv.Quote(string(v))) +======= + return fmt.Sprintf("b\"%s\"", string(v)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case types.Double: return fmt.Sprintf("%v", float64(v)) case types.Int: diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go index bfeb52c515..d486bcaf91 100644 --- a/vendor/github.com/google/cel-go/common/decls/decls.go +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -162,9 +162,13 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { if oID == overload.ID() { if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { // Allow redefinition of an overload implementation so long as the signatures match. +<<<<<<< HEAD if overload.hasBinding() { f.overloads[oID] = overload } +======= + f.overloads[oID] = overload +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) @@ -251,6 +255,7 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { // are preserved in order to assist with the function resolution step. switch len(args) { case 1: +<<<<<<< HEAD if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.unaryOp(args[0]) } @@ -260,6 +265,17 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { } } if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { +======= + if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.unaryOp(args[0]) + } + case 2: + if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.binaryOp(args[0], args[1]) + } + } + if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return o.functionOp(args...) } // eventually this will fall through to the noSuchOverload below. @@ -777,6 +793,7 @@ func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) } +<<<<<<< HEAD // TypeVariable creates a new type identifier for use within a types.Provider func TypeVariable(t *types.Type) *VariableDecl { return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) @@ -789,6 +806,10 @@ func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { // variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { +======= +// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. +func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) varType, err := types.TypeToExprType(v.Type()) if err != nil { return nil, err @@ -796,6 +817,7 @@ func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { return chkdecls.NewVar(v.Name(), varType), nil } +<<<<<<< HEAD // FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { return functionDeclToExprDecl(f) @@ -803,6 +825,15 @@ func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { // functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { +======= +// TypeVariable creates a new type identifier for use within a types.Provider +func TypeVariable(t *types.Type) *VariableDecl { + return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) +} + +// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. +func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) for i, oID := range f.overloadOrdinals { o := f.overloads[oID] diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go index 0cf21345e6..f0be04eb2b 100644 --- a/vendor/github.com/google/cel-go/common/error.go +++ b/vendor/github.com/google/cel-go/common/error.go @@ -18,6 +18,11 @@ import ( "fmt" "strings" "unicode/utf8" +<<<<<<< HEAD +======= + + "golang.org/x/text/width" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewError creates an error associated with an expression id with the given message at the given location. @@ -33,15 +38,28 @@ type Error struct { } const ( +<<<<<<< HEAD dot = "." ind = "^" wideDot = "\uff0e" wideInd = "\uff3e" +======= + dot = "." + ind = "^" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. maxSnippetLength = 16384 ) +<<<<<<< HEAD +======= +var ( + wideDot = width.Widen.String(dot) + wideInd = width.Widen.String(ind) +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ToDisplayString decorates the error message with the source location. func (e *Error) ToDisplayString(source Source) string { var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go index c8865df8cd..f4338630e7 100644 --- a/vendor/github.com/google/cel-go/common/errors.go +++ b/vendor/github.com/google/cel-go/common/errors.go @@ -30,6 +30,7 @@ type Errors struct { // NewErrors creates a new instance of the Errors type. func NewErrors(source Source) *Errors { +<<<<<<< HEAD src := source if src == nil { src = NewTextSource("") @@ -37,6 +38,11 @@ func NewErrors(source Source) *Errors { return &Errors{ errors: []*Error{}, source: src, +======= + return &Errors{ + errors: []*Error{}, + source: source, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) maxErrorsToReport: 100, } } @@ -46,11 +52,14 @@ func (e *Errors) ReportError(l Location, format string, args ...any) { e.ReportErrorAtID(0, l, format, args...) } +<<<<<<< HEAD // ReportErrorString records an error at a source location. func (e *Errors) ReportErrorString(l Location, message string) { e.ReportErrorAtID(0, l, "%s", message) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ReportErrorAtID records an error at a source location and expression id. func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) { e.numErrors++ diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go index 021198224d..d13679c044 100644 --- a/vendor/github.com/google/cel-go/common/runes/buffer.go +++ b/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -127,6 +127,7 @@ var nilBuffer = &emptyBuffer{} // elements of the byte or uint16 array, and continue. The underlying storage is an rune array // containing any Unicode character. func NewBuffer(data string) Buffer { +<<<<<<< HEAD buf, _ := newBuffer(data, false) return buf } @@ -159,16 +160,31 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { buf16 []uint16 buf32 []rune offs []int32 +======= + if len(data) == 0 { + return nilBuffer + } + var ( + idx = 0 + buf8 = make([]byte, 0, len(data)) + buf16 []uint16 + buf32 []rune +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s +<<<<<<< HEAD if lines && r == '\n' { offs = append(offs, off+1) } if r < utf8.RuneSelf { buf8 = append(buf8, byte(r)) off++ +======= + if r < utf8.RuneSelf { + buf8 = append(buf8, byte(r)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } if r <= 0xffff { @@ -178,7 +194,10 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { } buf8 = nil buf16 = append(buf16, uint16(r)) +<<<<<<< HEAD off++ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goto copy16 } buf32 = make([]rune, len(buf8), len(data)) @@ -187,6 +206,7 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { } buf8 = nil buf32 = append(buf32, r) +<<<<<<< HEAD off++ goto copy32 } @@ -196,16 +216,28 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { return &asciiBuffer{ arr: buf8, }, offs +======= + goto copy32 + } + return &asciiBuffer{ + arr: buf8, + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) copy16: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s +<<<<<<< HEAD if lines && r == '\n' { offs = append(offs, off+1) } if r <= 0xffff { buf16 = append(buf16, uint16(r)) off++ +======= + if r <= 0xffff { + buf16 = append(buf16, uint16(r)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } buf32 = make([]rune, len(buf16), len(data)) @@ -214,6 +246,7 @@ copy16: } buf16 = nil buf32 = append(buf32, r) +<<<<<<< HEAD off++ goto copy32 } @@ -223,10 +256,18 @@ copy16: return &basicBuffer{ arr: buf16, }, offs +======= + goto copy32 + } + return &basicBuffer{ + arr: buf16, + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) copy32: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s +<<<<<<< HEAD if lines && r == '\n' { offs = append(offs, off+1) } @@ -239,4 +280,11 @@ copy32: return &supplementalBuffer{ arr: buf32, }, offs +======= + buf32 = append(buf32, r) + } + return &supplementalBuffer{ + arr: buf32, + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go index ec79cb5454..3d387a457c 100644 --- a/vendor/github.com/google/cel-go/common/source.go +++ b/vendor/github.com/google/cel-go/common/source.go @@ -15,6 +15,12 @@ package common import ( +<<<<<<< HEAD +======= + "strings" + "unicode/utf8" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/runes" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -77,11 +83,25 @@ func NewTextSource(text string) Source { // NewStringSource creates a new Source from the given contents and description. func NewStringSource(contents string, description string) Source { // Compute line offsets up front as they are referred to frequently. +<<<<<<< HEAD buf, offs := runes.NewBufferAndLineOffsets(contents) return &sourceImpl{ Buffer: buf, description: description, lineOffsets: offs, +======= + lines := strings.Split(contents, "\n") + offsets := make([]int32, len(lines)) + var offset int32 + for i, line := range lines { + offset = offset + int32(utf8.RuneCountInString(line)) + 1 + offsets[int32(i)] = offset + } + return &sourceImpl{ + Buffer: runes.NewBuffer(contents), + description: description, + lineOffsets: offsets, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -163,8 +183,14 @@ func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { for _, lineOffset := range s.lineOffsets { if lineOffset > characterOffset { break +<<<<<<< HEAD } line++ +======= + } else { + line++ + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if line == 1 { return line, 0 diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel index b55f452156..1dea5de7ce 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -12,6 +12,10 @@ go_library( ], importpath = "github.com/google/cel-go/common/stdlib", deps = [ +<<<<<<< HEAD +======= + "//checker/decls:go_default_library", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "//common/decls:go_default_library", "//common/functions:go_default_library", "//common/operators:go_default_library", @@ -19,5 +23,9 @@ go_library( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", +<<<<<<< HEAD +======= + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ], ) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go index 1550c17863..fc3e2e1f3c 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/standard.go +++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -23,11 +23,22 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" +<<<<<<< HEAD +======= + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( stdFunctions []*decls.FunctionDecl +<<<<<<< HEAD + stdTypes []*decls.VariableDecl +======= + stdFnDecls []*exprpb.Decl stdTypes []*decls.VariableDecl + stdTypeDecls []*exprpb.Decl +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { @@ -51,6 +62,18 @@ func init() { decls.TypeVariable(types.UintType), } +<<<<<<< HEAD +======= + stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) + for _, stdType := range stdTypes { + typeVar, err := decls.VariableDeclToExprDecl(stdType) + if err != nil { + panic(err) + } + stdTypeDecls = append(stdTypeDecls, typeVar) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stdFunctions = []*decls.FunctionDecl{ // Logical operators. Special-cased within the interpreter. // Note, the singleton binding prevents extensions from overriding the operator behavior. @@ -563,6 +586,21 @@ func init() { decls.MemberOverload(overloads.DurationToMilliseconds, argTypes(types.DurationType), types.IntType)), } +<<<<<<< HEAD +======= + + stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) + for _, fn := range stdFunctions { + if fn.IsDeclarationDisabled() { + continue + } + ed, err := decls.FunctionDeclToExprDecl(fn) + if err != nil { + panic(err) + } + stdFnDecls = append(stdFnDecls, ed) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Functions returns the set of standard library function declarations and definitions for CEL. @@ -570,11 +608,33 @@ func Functions() []*decls.FunctionDecl { return stdFunctions } +<<<<<<< HEAD +======= +// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads +// in the CEL standard environment. +// +// Deprecated: use Functions +func FunctionExprDecls() []*exprpb.Decl { + return stdFnDecls +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Types returns the set of standard library types for CEL. func Types() []*decls.VariableDecl { return stdTypes } +<<<<<<< HEAD +======= +// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL +// standard environment. +// +// Deprecated: use Types +func TypeExprDecls() []*exprpb.Decl { + return stdTypeDecls +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func notStrictlyFalse(value ref.Val) ref.Val { if types.IsBool(value) { return value diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel index 8f010fae44..822ce2f367 100644 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -40,12 +40,18 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", +<<<<<<< HEAD "@dev_cel_expr//:expr", +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", +<<<<<<< HEAD "@org_golang_google_protobuf//types/dynamicpb:go_default_library", +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_protobuf//types/known/anypb:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index 7e813e291b..2950c3557e 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -58,6 +58,7 @@ func (b Bytes) Compare(other ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { +<<<<<<< HEAD case reflect.Array: if len(b) != typeDesc.Len() { return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) @@ -69,6 +70,9 @@ func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { } return refArr.Interface(), nil case reflect.Slice: +======= + case reflect.Array, reflect.Slice: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go index 17ab1a95e5..5738db7af1 100644 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -62,12 +62,15 @@ func NewErr(format string, args ...any) ref.Val { return &Err{error: fmt.Errorf(format, args...)} } +<<<<<<< HEAD // NewErrFromString creates a new Err with the provided message. // TODO: Audit the use of this function and standardize the error messages and codes. func NewErrFromString(message string) ref.Val { return &Err{error: errors.New(message)} } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewErrWithNodeID creates a new Err described by the format string and args. // TODO: Audit the use of this function and standardize the error messages and codes. func NewErrWithNodeID(id int64, format string, args ...any) ref.Val { diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index 7e68a5daf3..c665e8e100 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -243,7 +243,11 @@ func (l *baseList) Equal(other ref.Val) ref.Val { func (l *baseList) Get(index ref.Val) ref.Val { ind, err := IndexOrError(index) if err != nil { +<<<<<<< HEAD return ValOrErr(index, "%v", err) +======= + return ValOrErr(index, err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if ind < 0 || ind >= l.size { return NewErr("index '%d' out of range in list size '%d'", ind, l.Size()) @@ -256,6 +260,7 @@ func (l *baseList) IsZeroValue() bool { return l.size == 0 } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (index, value) pair in the list. func (l *baseList) Fold(f traits.Folder) { for i := 0; i < l.size; i++ { @@ -265,6 +270,8 @@ func (l *baseList) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Iterator implements the traits.Iterable interface method. func (l *baseList) Iterator() traits.Iterator { return newListIterator(l) @@ -427,7 +434,11 @@ func (l *concatList) Equal(other ref.Val) ref.Val { func (l *concatList) Get(index ref.Val) ref.Val { ind, err := IndexOrError(index) if err != nil { +<<<<<<< HEAD return ValOrErr(index, "%v", err) +======= + return ValOrErr(index, err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } i := Int(ind) if i < l.prevList.Size().(Int) { @@ -442,6 +453,7 @@ func (l *concatList) IsZeroValue() bool { return l.Size().(Int) == 0 } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (index, value) pair in the list. func (l *concatList) Fold(f traits.Folder) { for i := Int(0); i < l.Size().(Int); i++ { @@ -451,6 +463,8 @@ func (l *concatList) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Iterator implements the traits.Iterable interface method. func (l *concatList) Iterator() traits.Iterator { return newListIterator(l) @@ -545,6 +559,7 @@ func IndexOrError(index ref.Val) (int, error) { return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type()) } } +<<<<<<< HEAD // ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration. // @@ -572,3 +587,5 @@ func (l interopFoldableList) Fold(f traits.Folder) { } } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go index cb6cce78b0..44378d6086 100644 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -94,6 +94,7 @@ func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { } } +<<<<<<< HEAD // NewMutableMap constructs a mutable map from an adapter and a set of map values. func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper { mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues)) @@ -112,6 +113,8 @@ func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.Mu return m } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // mapAccessor is a private interface for finding values within a map and iterating over the keys. // This interface implements portions of the API surface area required by the traits.Mapper // interface. @@ -123,9 +126,12 @@ type mapAccessor interface { // Iterator returns an Iterator over the map key set. Iterator() traits.Iterator +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. Fold(traits.Folder) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // baseMap is a reflection based map implementation designed to handle a variety of map-like types. @@ -328,6 +334,7 @@ func (m *baseMap) Value() any { return m.value } +<<<<<<< HEAD // mutableMap holds onto a set of mutable values which are used for intermediate computations. type mutableMap struct { *baseMap @@ -350,6 +357,8 @@ func (m *mutableMap) ToImmutableMap() traits.Mapper { return NewRefValMap(m.Adapter, m.mutableValues) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { return &jsonStructAccessor{ Adapter: adapter, @@ -393,6 +402,7 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator { } } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *jsonStructAccessor) Fold(f traits.Folder) { for k, v := range a.st { @@ -402,6 +412,8 @@ func (a *jsonStructAccessor) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { keyType := value.Type().Key() return &reflectMapAccessor{ @@ -476,6 +488,7 @@ func (m *reflectMapAccessor) Iterator() traits.Iterator { } } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (m *reflectMapAccessor) Fold(f traits.Folder) { mapRange := m.refValue.MapRange() @@ -486,6 +499,8 @@ func (m *reflectMapAccessor) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { return &refValMapAccessor{mapVal: mapVal} } @@ -539,6 +554,7 @@ func (a *refValMapAccessor) Iterator() traits.Iterator { } } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *refValMapAccessor) Fold(f traits.Folder) { for k, v := range a.mapVal { @@ -548,6 +564,8 @@ func (a *refValMapAccessor) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newStringMapAccessor(strMap map[string]string) mapAccessor { return &stringMapAccessor{mapVal: strMap} } @@ -586,6 +604,7 @@ func (a *stringMapAccessor) Iterator() traits.Iterator { } } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *stringMapAccessor) Fold(f traits.Folder) { for k, v := range a.mapVal { @@ -595,6 +614,8 @@ func (a *stringMapAccessor) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { return &stringIfaceMapAccessor{ Adapter: adapter, @@ -637,6 +658,7 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { } } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { for k, v := range a.mapVal { @@ -646,6 +668,8 @@ func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // accessing protoreflect.Map values. type protoMap struct { @@ -858,6 +882,7 @@ func (m *protoMap) Iterator() traits.Iterator { } } +<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (m *protoMap) Fold(f traits.Folder) { m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { @@ -865,6 +890,8 @@ func (m *protoMap) Fold(f traits.Folder) { }) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Size returns the number of entries in the protoreflect.Map. func (m *protoMap) Size() ref.Val { return Int(m.value.Len()) @@ -948,6 +975,7 @@ func (it *stringKeyIterator) Next() ref.Val { } return nil } +<<<<<<< HEAD // ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration. // @@ -1000,3 +1028,5 @@ func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val { } return NewErr("insert failed: key %v already exists", k) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go index 36514ff200..0a330ca4ec 100644 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -35,8 +35,11 @@ var ( // golang reflect type for Null values. nullReflectType = reflect.TypeOf(NullValue) +<<<<<<< HEAD protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ConvertToNative implements ref.Val.ConvertToNative. @@ -63,6 +66,7 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { return structpb.NewNullValue(), nil case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, +<<<<<<< HEAD uint64WrapperType, durationValueType, timestampValueType, protoIfaceType: return nil, nil case jsonListValueType, jsonStructType: @@ -71,6 +75,10 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { if typeDesc.Implements(protoIfaceType) { return nil, nil } +======= + uint64WrapperType: + return nil, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case reflect.Interface: nv := n.Value() diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go index 5377bff8de..b664c27b69 100644 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -151,7 +151,11 @@ func (o *protoObj) Get(index ref.Val) ref.Val { } fv, err := fd.GetFrom(o.value) if err != nil { +<<<<<<< HEAD return NewErrFromString(err.Error()) +======= + return NewErr(err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return o.NativeToValue(fv) } diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index bdd474c95a..d6458a858a 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -427,6 +427,7 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { return structpb.NullValue_NULL_VALUE, true, nil } case *wrapperspb.BoolValue: +<<<<<<< HEAD if v == nil { return nil, true, nil } @@ -470,6 +471,24 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { if v == nil { return nil, true, nil } +======= + return v.GetValue(), true, nil + case *wrapperspb.BytesValue: + return v.GetValue(), true, nil + case *wrapperspb.DoubleValue: + return v.GetValue(), true, nil + case *wrapperspb.FloatValue: + return float64(v.GetValue()), true, nil + case *wrapperspb.Int32Value: + return int64(v.GetValue()), true, nil + case *wrapperspb.Int64Value: + return v.GetValue(), true, nil + case *wrapperspb.StringValue: + return v.GetValue(), true, nil + case *wrapperspb.UInt32Value: + return uint64(v.GetValue()), true, nil + case *wrapperspb.UInt64Value: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v.GetValue(), true, nil } return msg, false, nil diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index 936a4e28b2..4a89b97f3b 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -585,6 +585,7 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { refKind := refValue.Kind() switch refKind { case reflect.Array, reflect.Slice: +<<<<<<< HEAD if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { if refValue.CanAddr() { return Bytes(refValue.Bytes()), true @@ -593,6 +594,8 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { tmp.Elem().Set(refValue) return Bytes(tmp.Elem().Bytes()), true } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return NewDynamicList(a, v), true case reflect.Map: return NewDynamicMap(a, v), true diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go index 91c10f08fc..dc325ee34a 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/iterator.go +++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go @@ -34,6 +34,7 @@ type Iterator interface { // Next returns the next element. Next() ref.Val } +<<<<<<< HEAD // Foldable aggregate types support iteration over (key, value) or (index, value) pairs. type Foldable interface { @@ -47,3 +48,5 @@ type Folder interface { // If the output is true, continue folding. Otherwise, terminate the fold. FoldEntry(key, val any) bool } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go index e54781a602..6256a4ed52 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/lister.go +++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go @@ -27,9 +27,12 @@ type Lister interface { } // MutableLister interface which emits an immutable result after an intermediate computation. +<<<<<<< HEAD // // Note, this interface is intended only to be used within Comprehensions where the mutable // value is not directly observable within the user-authored CEL expression. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type MutableLister interface { Lister ToImmutableList() Lister diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go index d13333f3f6..f844e75c5f 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/mapper.go +++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go @@ -31,6 +31,7 @@ type Mapper interface { // (Unknown|Err, false). Find(key ref.Val) (ref.Val, bool) } +<<<<<<< HEAD // MutableMapper interface which emits an immutable result after an intermediate computation. // @@ -46,3 +47,5 @@ type MutableMapper interface { // ToImmutableMap converts a mutable map into an immutable map. ToImmutableMap() Mapper } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go index 51a09df564..6e3f8bbb00 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/traits.go +++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go @@ -59,6 +59,7 @@ const ( // SizerType types support the size() method. SizerType +<<<<<<< HEAD // SubtractorType types support '-' operations. SubtractorType @@ -76,4 +77,8 @@ const ( // // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators. MapperType = ContainerType | IndexerType | IterableType | SizerType +======= + // SubtractorType type support '-' operations. + SubtractorType +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go index f419beabd0..85ddf981aa 100644 --- a/vendor/github.com/google/cel-go/common/types/types.go +++ b/vendor/github.com/google/cel-go/common/types/types.go @@ -19,13 +19,19 @@ import ( "reflect" "strings" +<<<<<<< HEAD "google.golang.org/protobuf/proto" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) chkdecls "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" +<<<<<<< HEAD celpb "cel.dev/expr" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -669,6 +675,7 @@ func TypeToExprType(t *Type) (*exprpb.Type, error) { // ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. func ExprTypeToType(t *exprpb.Type) (*Type, error) { +<<<<<<< HEAD return AlphaProtoAsType(t) } @@ -690,28 +697,52 @@ func ProtoAsType(t *celpb.Type) (*Type, error) { paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) for i, p := range t.GetAbstractType().GetParameterTypes() { pt, err := ProtoAsType(p) +======= + switch t.GetTypeKind().(type) { + case *exprpb.Type_Dyn: + return DynType, nil + case *exprpb.Type_AbstractType_: + paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) + for i, p := range t.GetAbstractType().GetParameterTypes() { + pt, err := ExprTypeToType(p) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } paramTypes[i] = pt } return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil +<<<<<<< HEAD case *celpb.Type_ListType_: et, err := ProtoAsType(t.GetListType().GetElemType()) +======= + case *exprpb.Type_ListType_: + et, err := ExprTypeToType(t.GetListType().GetElemType()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewListType(et), nil +<<<<<<< HEAD case *celpb.Type_MapType_: kt, err := ProtoAsType(t.GetMapType().GetKeyType()) if err != nil { return nil, err } vt, err := ProtoAsType(t.GetMapType().GetValueType()) +======= + case *exprpb.Type_MapType_: + kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) + if err != nil { + return nil, err + } + vt, err := ExprTypeToType(t.GetMapType().GetValueType()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewMapType(kt, vt), nil +<<<<<<< HEAD case *celpb.Type_MessageType: return NewObjectType(t.GetMessageType()), nil case *celpb.Type_Null: @@ -729,21 +760,49 @@ func ProtoAsType(t *celpb.Type) (*Type, error) { case celpb.Type_STRING: return StringType, nil case celpb.Type_UINT64: +======= + case *exprpb.Type_MessageType: + return NewObjectType(t.GetMessageType()), nil + case *exprpb.Type_Null: + return NullType, nil + case *exprpb.Type_Primitive: + switch t.GetPrimitive() { + case exprpb.Type_BOOL: + return BoolType, nil + case exprpb.Type_BYTES: + return BytesType, nil + case exprpb.Type_DOUBLE: + return DoubleType, nil + case exprpb.Type_INT64: + return IntType, nil + case exprpb.Type_STRING: + return StringType, nil + case exprpb.Type_UINT64: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return UintType, nil default: return nil, fmt.Errorf("unsupported primitive type: %v", t) } +<<<<<<< HEAD case *celpb.Type_TypeParam: return NewTypeParamType(t.GetTypeParam()), nil case *celpb.Type_Type: if t.GetType().GetTypeKind() != nil { p, err := ProtoAsType(t.GetType()) +======= + case *exprpb.Type_TypeParam: + return NewTypeParamType(t.GetTypeParam()), nil + case *exprpb.Type_Type: + if t.GetType().GetTypeKind() != nil { + p, err := ExprTypeToType(t.GetType()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewTypeTypeWithParam(p), nil } return TypeType, nil +<<<<<<< HEAD case *celpb.Type_WellKnown: switch t.GetWellKnown() { case celpb.Type_ANY: @@ -751,23 +810,42 @@ func ProtoAsType(t *celpb.Type) (*Type, error) { case celpb.Type_DURATION: return DurationType, nil case celpb.Type_TIMESTAMP: +======= + case *exprpb.Type_WellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return AnyType, nil + case exprpb.Type_DURATION: + return DurationType, nil + case exprpb.Type_TIMESTAMP: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return TimestampType, nil default: return nil, fmt.Errorf("unsupported well-known type: %v", t) } +<<<<<<< HEAD case *celpb.Type_Wrapper: t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}}) +======= + case *exprpb.Type_Wrapper: + t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewNullableType(t), nil +<<<<<<< HEAD case *celpb.Type_Error: +======= + case *exprpb.Type_Error: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ErrorType, nil default: return nil, fmt.Errorf("unsupported type: %v", t) } } +<<<<<<< HEAD // TypeToProto converts from a CEL-native type representation to canonical CEL celpb.Type protobuf type. func TypeToProto(t *Type) (*celpb.Type, error) { exprType, err := TypeToExprType(t) @@ -781,6 +859,8 @@ func TypeToProto(t *Type) (*celpb.Type, error) { return &pbtype, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { if t.IsAssignableType(NullType) { return chkdecls.NewWrapperType(pbType) @@ -806,6 +886,7 @@ func maybeForeignType(t ref.Type) *Type { return NewObjectType(t.TypeName(), traitMask) } +<<<<<<< HEAD func convertProto(src, dst proto.Message) error { pb, err := proto.Marshal(src) if err != nil { @@ -823,6 +904,8 @@ func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( checkedWellKnowns = map[string]*Type{ // Wrapper types. @@ -867,6 +950,7 @@ var ( } structTypeTraitMask = traits.FieldTesterType | traits.IndexerType +<<<<<<< HEAD boolType = primitiveType(celpb.Type_BOOL) bytesType = primitiveType(celpb.Type_BYTES) @@ -874,4 +958,6 @@ var ( intType = primitiveType(celpb.Type_INT64) stringType = primitiveType(celpb.Type_STRING) uintType = primitiveType(celpb.Type_UINT64) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go index c20d19de1b..45671969c8 100644 --- a/vendor/github.com/google/cel-go/interpreter/activation.go +++ b/vendor/github.com/google/cel-go/interpreter/activation.go @@ -17,6 +17,10 @@ package interpreter import ( "errors" "fmt" +<<<<<<< HEAD +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/types/ref" ) @@ -156,11 +160,14 @@ type PartialActivation interface { UnknownAttributePatterns() []*AttributePattern } +<<<<<<< HEAD // partialActivationConverter indicates whether an Activation implementation supports conversion to a PartialActivation type partialActivationConverter interface { asPartialActivation() (PartialActivation, bool) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // partActivation is the default implementations of the PartialActivation interface. type partActivation struct { Activation @@ -172,6 +179,7 @@ func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { return a.unknowns } +<<<<<<< HEAD // asPartialActivation returns the partActivation as a PartialActivation interface. func (a *partActivation) asPartialActivation() (PartialActivation, bool) { return a, true @@ -188,3 +196,36 @@ func asPartialActivation(vars Activation) (PartialActivation, bool) { } return nil, false } +======= +// varActivation represents a single mutable variable binding. +// +// This activation type should only be used within folds as the fold loop controls the object +// life-cycle. +type varActivation struct { + parent Activation + name string + val ref.Val +} + +// Parent implements the Activation interface method. +func (v *varActivation) Parent() Activation { + return v.parent +} + +// ResolveName implements the Activation interface method. +func (v *varActivation) ResolveName(name string) (any, bool) { + if name == v.name { + return v.val, true + } + return v.parent.ResolveName(name) +} + +var ( + // pool of var activations to reduce allocations during folds. + varActivationPool = &sync.Pool{ + New: func() any { + return &varActivation{} + }, + } +) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 7e5c2db0fc..3362580803 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -178,8 +178,15 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. +<<<<<<< HEAD func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { fac := NewAttributeFactory(container, adapter, provider, opts...) +======= +func NewPartialAttributeFactory(container *containers.Container, + adapter types.Adapter, + provider types.Provider) AttributeFactory { + fac := NewAttributeFactory(container, adapter, provider) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &partialAttributeFactory{ AttributeFactory: fac, container: container, @@ -358,7 +365,11 @@ func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) { func (m *attributeMatcher) Resolve(vars Activation) (any, error) { id := m.NamespacedAttribute.ID() // Bug in how partial activation is resolved, should search parents as well. +<<<<<<< HEAD partial, isPartial := asPartialActivation(vars) +======= + partial, isPartial := toPartialActivation(vars) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if isPartial { unk, err := m.fac.matchesUnknownPatterns( partial, @@ -384,3 +395,17 @@ func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) { func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly) } +<<<<<<< HEAD +======= + +func toPartialActivation(vars Activation) (PartialActivation, bool) { + pv, ok := vars.(PartialActivation) + if ok { + return pv, true + } + if vars.Parent() != nil { + return toPartialActivation(vars.Parent()) + } + return nil, false +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index b1b3aacc83..bb5df51822 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -126,6 +126,7 @@ type NamespacedAttribute interface { Qualifiers() []Qualifier } +<<<<<<< HEAD // AttrFactoryOption specifies a functional option for configuring an attribute factory. type AttrFactoryOption func(*attrFactory) *attrFactory @@ -143,22 +144,35 @@ func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { // types: bool, int, string, and uint. func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { fac := &attrFactory{ +======= +// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values +// capable of resolving types by simple names and qualify the values using the supported qualifier +// types: bool, int, string, and uint. +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { + return &attrFactory{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) container: cont, adapter: a, provider: p, } +<<<<<<< HEAD for _, o := range opts { fac = o(fac) } return fac +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type attrFactory struct { container *containers.Container adapter types.Adapter provider types.Provider +<<<<<<< HEAD errorOnBadPresenceTest bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -167,6 +181,7 @@ type attrFactory struct { // resolution rules. func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { return &absoluteAttribute{ +<<<<<<< HEAD id: id, namespaceNames: names, qualifiers: []Qualifier{}, @@ -174,6 +189,14 @@ func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAtt provider: r.provider, fac: r, errorOnBadPresenceTest: r.errorOnBadPresenceTest, +======= + id: id, + namespaceNames: names, + qualifiers: []Qualifier{}, + adapter: r.adapter, + provider: r.provider, + fac: r, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -207,12 +230,20 @@ func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { // RelativeAttribute refers to an expression and an optional qualifier path. func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { return &relativeAttribute{ +<<<<<<< HEAD id: id, operand: operand, qualifiers: []Qualifier{}, adapter: r.adapter, fac: r, errorOnBadPresenceTest: r.errorOnBadPresenceTest, +======= + id: id, + operand: operand, + qualifiers: []Qualifier{}, + adapter: r.adapter, + fac: r, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -234,7 +265,11 @@ func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, o }, nil } } +<<<<<<< HEAD return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) +======= + return newQualifier(r.adapter, qualID, val, opt) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type absoluteAttribute struct { @@ -246,8 +281,11 @@ type absoluteAttribute struct { adapter types.Adapter provider types.Provider fac AttributeFactory +<<<<<<< HEAD errorOnBadPresenceTest bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID implements the Attribute interface method. @@ -536,8 +574,11 @@ type relativeAttribute struct { qualifiers []Qualifier adapter types.Adapter fac AttributeFactory +<<<<<<< HEAD errorOnBadPresenceTest bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Attribute interface method. @@ -601,7 +642,11 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } +<<<<<<< HEAD func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { +======= +func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var qual Qualifier switch val := v.(type) { case Attribute: @@ -616,6 +661,7 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenc }, nil case string: qual = &stringQualifier{ +<<<<<<< HEAD id: id, value: val, celValue: types.String(val), @@ -748,6 +794,73 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenc adapter: adapter, optional: opt, errorOnBadPresenceTest: errorOnBadPresenceTest, +======= + id: id, + value: val, + celValue: types.String(val), + adapter: adapter, + optional: opt, + } + case int: + qual = &intQualifier{ + id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + } + case int32: + qual = &intQualifier{ + id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + } + case int64: + qual = &intQualifier{ + id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt, + } + case uint: + qual = &uintQualifier{ + id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + } + case uint32: + qual = &uintQualifier{ + id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + } + case uint64: + qual = &uintQualifier{ + id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt, + } + case bool: + qual = &boolQualifier{ + id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt, + } + case float32: + qual = &doubleQualifier{ + id: id, + value: float64(val), + celValue: types.Double(val), + adapter: adapter, + optional: opt, + } + case float64: + qual = &doubleQualifier{ + id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt, + } + case types.String: + qual = &stringQualifier{ + id: id, value: string(val), celValue: val, adapter: adapter, optional: opt, + } + case types.Int: + qual = &intQualifier{ + id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt, + } + case types.Uint: + qual = &uintQualifier{ + id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt, + } + case types.Bool: + qual = &boolQualifier{ + id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt, + } + case types.Double: + qual = &doubleQualifier{ + id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case *types.Unknown: qual = &unknownQualifier{id: id, value: val} @@ -778,12 +891,20 @@ func (q *attrQualifier) IsOptional() bool { } type stringQualifier struct { +<<<<<<< HEAD id int64 value string celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool +======= + id int64 + value string + celValue ref.Val + adapter types.Adapter + optional bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -866,7 +987,11 @@ func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest return obj, true, nil } default: +<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +======= + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -880,12 +1005,20 @@ func (q *stringQualifier) Value() ref.Val { } type intQualifier struct { +<<<<<<< HEAD id int64 value int64 celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool +======= + id int64 + value int64 + celValue ref.Val + adapter types.Adapter + optional bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -991,7 +1124,11 @@ func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, p return o[i], true, nil } default: +<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +======= + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1008,12 +1145,20 @@ func (q *intQualifier) Value() ref.Val { } type uintQualifier struct { +<<<<<<< HEAD id int64 value uint64 celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool +======= + id int64 + value uint64 + celValue ref.Val + adapter types.Adapter + optional bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1060,7 +1205,11 @@ func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: +<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +======= + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1074,12 +1223,20 @@ func (q *uintQualifier) Value() ref.Val { } type boolQualifier struct { +<<<<<<< HEAD id int64 value bool celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool +======= + id int64 + value bool + celValue ref.Val + adapter types.Adapter + optional bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1112,7 +1269,11 @@ func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: +<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +======= + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1187,12 +1348,20 @@ func (q *fieldQualifier) Value() ref.Val { // type may not be known ahead of time and may not conform to the standard types supported as valid // protobuf map key types. type doubleQualifier struct { +<<<<<<< HEAD id int64 value float64 celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool +======= + id int64 + value float64 + celValue ref.Val + adapter types.Adapter + optional bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1216,7 +1385,11 @@ func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnl } func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { +<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +======= + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Value implements the ConstantQualifier interface @@ -1322,7 +1495,11 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. +<<<<<<< HEAD func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { +======= +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { case *types.Unknown: @@ -1379,7 +1556,11 @@ func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, prese } return val, true, nil default: +<<<<<<< HEAD if presenceTest && !errorOnBadPresenceTest { +======= + if presenceTest { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, false, nil } return nil, false, missingKey(idx) diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index 591b7688b7..7cb5528716 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -16,7 +16,10 @@ package interpreter import ( "fmt" +<<<<<<< HEAD "sync" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" @@ -97,7 +100,11 @@ type InterpretableCall interface { Args() []Interpretable } +<<<<<<< HEAD // InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map +======= +// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // or struct. type InterpretableConstructor interface { Interpretable @@ -721,11 +728,15 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) } +<<<<<<< HEAD // InitVals implements the InterpretableConstructor interface method. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *evalObj) InitVals() []Interpretable { return o.vals } +<<<<<<< HEAD // Type implements the InterpretableConstructor interface method. func (o *evalObj) Type() ref.Type { return types.NewObjectType(o.typeName) @@ -746,6 +757,22 @@ type evalFold struct { // note an exhaustive fold will ensure that all branches are evaluated // when using mutable values, these branches will mutate the final result // rather than make a throw-away computation. +======= +func (o *evalObj) Type() ref.Type { + return types.NewObjectTypeValue(o.typeName) +} + +type evalFold struct { + id int64 + accuVar string + iterVar string + iterRange Interpretable + accu Interpretable + cond Interpretable + step Interpretable + result Interpretable + adapter types.Adapter +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exhaustive bool interruptable bool } @@ -757,6 +784,7 @@ func (fold *evalFold) ID() int64 { // Eval implements the Interpretable interface method. func (fold *evalFold) Eval(ctx Activation) ref.Val { +<<<<<<< HEAD // Initialize the folder interface f := newFolder(fold, ctx) defer releaseFolder(f) @@ -784,6 +812,66 @@ func (fold *evalFold) Eval(ctx Activation) ref.Val { } iterable := foldRange.(traits.Iterable) return f.foldIterable(iterable) +======= + foldRange := fold.iterRange.Eval(ctx) + if !foldRange.Type().HasTrait(traits.IterableType) { + return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) + } + // Configure the fold activation with the accumulator initial value. + accuCtx := varActivationPool.Get().(*varActivation) + accuCtx.parent = ctx + accuCtx.name = fold.accuVar + accuCtx.val = fold.accu.Eval(ctx) + // If the accumulator starts as an empty list, then the comprehension will build a list + // so create a mutable list to optimize the cost of the inner loop. + l, ok := accuCtx.val.(traits.Lister) + buildingList := false + if !fold.exhaustive && ok && l.Size() == types.IntZero { + buildingList = true + accuCtx.val = types.NewMutableList(fold.adapter) + } + iterCtx := varActivationPool.Get().(*varActivation) + iterCtx.parent = accuCtx + iterCtx.name = fold.iterVar + + interrupted := false + it := foldRange.(traits.Iterable).Iterator() + for it.HasNext() == types.True { + // Modify the iter var in the fold activation. + iterCtx.val = it.Next() + + // Evaluate the condition, terminate the loop if false. + cond := fold.cond.Eval(iterCtx) + condBool, ok := cond.(types.Bool) + if !fold.exhaustive && ok && condBool != types.True { + break + } + // Evaluate the evaluation step into accu var. + accuCtx.val = fold.step.Eval(iterCtx) + if fold.interruptable { + if stop, found := ctx.ResolveName("#interrupted"); found && stop == true { + interrupted = true + break + } + } + } + varActivationPool.Put(iterCtx) + if interrupted { + varActivationPool.Put(accuCtx) + return types.NewErr("operation interrupted") + } + + // Compute the result. + res := fold.result.Eval(accuCtx) + varActivationPool.Put(accuCtx) + // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result. + if !types.IsUnknownOrError(res) && buildingList { + if _, ok := res.(traits.MutableLister); ok { + res = res.(traits.MutableLister).ToImmutableList() + } + } + return res +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Optional Interpretable implementations that specialize, subsume, or extend the core evaluation @@ -1239,6 +1327,7 @@ func invalidOptionalEntryInit(field any, value ref.Val) ref.Val { func invalidOptionalElementInit(value ref.Val) ref.Val { return types.NewErr("cannot initialize optional list element from non-optional value %v", value) } +<<<<<<< HEAD // newFolder creates or initializes a pooled folder instance. func newFolder(eval *evalFold, ctx Activation) *folder { @@ -1433,3 +1522,5 @@ var ( }, } ) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index f0fd4eaf94..6323cb1678 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -506,7 +506,11 @@ func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { id: expr.ID(), elems: elems, optionals: optionals, +<<<<<<< HEAD hasOptionals: len(optionalIndices) != 0, +======= + hasOptionals: len(optionals) != 0, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) adapter: p.adapter, }, nil } @@ -518,7 +522,10 @@ func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { optionals := make([]bool, len(entries)) keys := make([]Interpretable, len(entries)) vals := make([]Interpretable, len(entries)) +<<<<<<< HEAD hasOptionals := false +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, e := range entries { entry := e.AsMapEntry() keyVal, err := p.Plan(entry.Key()) @@ -533,14 +540,21 @@ func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { } vals[i] = valVal optionals[i] = entry.IsOptional() +<<<<<<< HEAD hasOptionals = hasOptionals || entry.IsOptional() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &evalMap{ id: expr.ID(), keys: keys, vals: vals, optionals: optionals, +<<<<<<< HEAD hasOptionals: hasOptionals, +======= + hasOptionals: len(optionals) != 0, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) adapter: p.adapter, }, nil } @@ -556,7 +570,10 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { optionals := make([]bool, len(objFields)) fields := make([]string, len(objFields)) vals := make([]Interpretable, len(objFields)) +<<<<<<< HEAD hasOptionals := false +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, f := range objFields { field := f.AsStructField() fields[i] = field.Name() @@ -566,7 +583,10 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { } vals[i] = val optionals[i] = field.IsOptional() +<<<<<<< HEAD hasOptionals = hasOptionals || field.IsOptional() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &evalObj{ id: expr.ID(), @@ -574,7 +594,11 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { fields: fields, vals: vals, optionals: optionals, +<<<<<<< HEAD hasOptionals: hasOptionals, +======= + hasOptionals: len(optionals) != 0, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) provider: p.provider, }, nil } @@ -607,7 +631,10 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { accuVar: fold.AccuVar(), accu: accu, iterVar: fold.IterVar(), +<<<<<<< HEAD iterVar2: fold.IterVar2(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) iterRange: iterRange, cond: cond, step: step, diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go index 1662c1c1b3..ae8345043e 100644 --- a/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/vendor/github.com/google/cel-go/interpreter/prune.go @@ -88,7 +88,11 @@ func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *as func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) { switch v := val.(type) { +<<<<<<< HEAD case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint, *types.Optional: +======= + case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.state.SetValue(id, val) return p.NewLiteral(id, val), true case types.Duration: @@ -281,6 +285,7 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { } if macro, found := p.macroCalls[node.ID()]; found { // Ensure that intermediate values for the comprehension are cleared during pruning +<<<<<<< HEAD pruneMacroCall := node.Kind() != ast.UnspecifiedExprKind if node.Kind() == ast.ComprehensionKind { // Only prune cel.bind() calls since the variables of the comprehension are all @@ -304,6 +309,15 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { p.macroCalls[node.ID()] = macro } } +======= + if node.Kind() == ast.ComprehensionKind { + compre := node.AsComprehension() + visit(macro, clearIterVarVisitor(compre.IterVar(), p.state)) + } + // prune the expression in terms of the macro call instead of the expanded form. + if newMacro, pruned := p.prune(macro); pruned { + p.macroCalls[node.ID()] = newMacro +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -437,6 +451,7 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { // the last iteration of the comprehension and not each step in the evaluation which // means that the any residuals computed in between might be inaccurate. if newRange, pruned := p.maybePrune(compre.IterRange()); pruned { +<<<<<<< HEAD if compre.HasIterVar2() { return p.NewComprehensionTwoVar( node.ID(), @@ -450,6 +465,8 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { compre.Result(), ), true } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return p.NewComprehension( node.ID(), newRange, @@ -497,6 +514,19 @@ func getMaxID(expr ast.Expr) int64 { return maxID } +<<<<<<< HEAD +======= +func clearIterVarVisitor(varName string, state EvalState) astVisitor { + return astVisitor{ + visitExpr: func(e ast.Expr) { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + state.SetValue(e.ID(), nil) + } + }, + } +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maxIDVisitor(maxID *int64) astVisitor { return astVisitor{ visitExpr: func(e ast.Expr) { @@ -560,6 +590,7 @@ func visit(expr ast.Expr, visitor astVisitor) { } } } +<<<<<<< HEAD func isCelBindMacro(macro ast.Expr) bool { if macro.Kind() != ast.CallKind { @@ -572,3 +603,5 @@ func isCelBindMacro(macro ast.Expr) bool { target.Kind() == ast.IdentKind && target.AsIdent() == "cel" } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go index 8f47c53d28..6f65f4af6d 100644 --- a/vendor/github.com/google/cel-go/interpreter/runtimecost.go +++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -198,20 +198,35 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re switch call.OverloadID() { // O(n) functions case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString: +<<<<<<< HEAD cost += uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) case overloads.InList: // If a list is composed entirely of constant values this is O(1), but we don't account for that here. // We just assume all list containment checks are O(n). cost += actualSize(args[1]) +======= + cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + case overloads.InList: + // If a list is composed entirely of constant values this is O(1), but we don't account for that here. + // We just assume all list containment checks are O(n). + cost += c.actualSize(args[1]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // O(min(m, n)) functions case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, overloads.Equals, overloads.NotEquals: // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.), +<<<<<<< HEAD // the CostTracker.ActualSize() function by definition returns 1 for each operand, resulting in an overall cost // of 1. lhsSize := actualSize(args[0]) rhsSize := actualSize(args[1]) +======= + // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost + // of 1. + lhsSize := c.actualSize(args[0]) + rhsSize := c.actualSize(args[1]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) minSize := lhsSize if rhsSize < minSize { minSize = rhsSize @@ -220,23 +235,39 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re // O(m+n) functions case overloads.AddString, overloads.AddBytes: // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over. +<<<<<<< HEAD cost += uint64(math.Ceil(float64(actualSize(args[0])+actualSize(args[1])) * common.StringTraversalCostFactor)) +======= + cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // O(nm) functions case overloads.MatchesString: // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 // in case where string is empty but regex is still expensive. +<<<<<<< HEAD strCost := uint64(math.Ceil((1.0 + float64(actualSize(args[0]))) * common.StringTraversalCostFactor)) +======= + strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // We don't know how many expressions are in the regex, just the string length (a huge // improvement here would be to somehow get a count the number of expressions in the regex or // how many states are in the regex state machine and use that to measure regex cost). // For now, we're making a guess that each expression in a regex is typically at least 4 chars // in length. +<<<<<<< HEAD regexCost := uint64(math.Ceil(float64(actualSize(args[1])) * common.RegexStringLengthCostFactor)) cost += strCost * regexCost case overloads.ContainsString: strCost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) substrCost := uint64(math.Ceil(float64(actualSize(args[1])) * common.StringTraversalCostFactor)) +======= + regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor)) + cost += strCost * regexCost + case overloads.ContainsString: + strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cost += strCost * substrCost default: @@ -253,6 +284,7 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re return cost } +<<<<<<< HEAD // actualSize returns the size of the value for all traits.Sizer values, a fixed size for all proto-based // objects, and a size of 1 for all other value types. func actualSize(value ref.Val) uint64 { @@ -262,6 +294,13 @@ func actualSize(value ref.Val) uint64 { if opt, ok := value.(*types.Optional); ok && opt.HasValue() { return actualSize(opt.GetValue()) } +======= +// actualSize returns the size of value +func (c *CostTracker) actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return 1 } diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go index c3cec01a8d..78c39701ca 100644 --- a/vendor/github.com/google/cel-go/parser/errors.go +++ b/vendor/github.com/google/cel-go/parser/errors.go @@ -15,6 +15,11 @@ package parser import ( +<<<<<<< HEAD +======= + "fmt" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common" ) @@ -29,11 +34,19 @@ func (e *parseErrors) errorCount() int { } func (e *parseErrors) internalError(message string) { +<<<<<<< HEAD e.errs.ReportErrorAtID(0, common.NoLocation, "%s", message) } func (e *parseErrors) syntaxError(l common.Location, message string) { e.errs.ReportErrorAtID(0, l, "Syntax error: %s", message) +======= + e.errs.ReportErrorAtID(0, common.NoLocation, message) +} + +func (e *parseErrors) syntaxError(l common.Location, message string) { + e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) { diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel index 3efed87b70..21dc947902 100644 --- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -1,7 +1,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") package( +<<<<<<< HEAD default_visibility = ["//:__subpackages__"], +======= + default_visibility = ["//parser:__subpackages__"], +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) licenses = ["notice"], # Apache 2.0 ) diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 index ee53a844bd..66f41b4096 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 @@ -52,14 +52,22 @@ unary member : primary # PrimaryExpr +<<<<<<< HEAD | member op='.' (opt='?')? id=escapeIdent # Select +======= + | member op='.' (opt='?')? id=IDENTIFIER # Select +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall | member op='[' (opt='?')? index=expr ']' # Index ; primary +<<<<<<< HEAD : leadingDot='.'? id=IDENTIFIER # Ident | leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')') # GlobalCall +======= + : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) | '(' e=expr ')' # Nested | op='[' elems=listInit? ','? ']' # CreateList | op='{' entries=mapInitializerList? ','? '}' # CreateStruct @@ -81,18 +89,25 @@ fieldInitializerList ; optField +<<<<<<< HEAD : (opt='?')? escapeIdent +======= + : (opt='?')? IDENTIFIER +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ; mapInitializerList : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)* ; +<<<<<<< HEAD escapeIdent : id=IDENTIFIER # SimpleIdentifier | id=ESC_IDENTIFIER # EscapedIdentifier ; +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) optExpr : (opt='?')? e=expr ; @@ -204,4 +219,7 @@ STRING BYTES : ('b' | 'B') STRING; IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*; -ESC_IDENTIFIER : '`' (LETTER | DIGIT | '_' | '.' | '-' | '/' | ' ')+ '`'; \ No newline at end of file +<<<<<<< HEAD +ESC_IDENTIFIER : '`' (LETTER | DIGIT | '_' | '.' | '-' | '/' | ' ')+ '`'; +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/vendor/github.com/google/cel-go/parser/gen/CEL.interp index e085bab574..d978c626bd 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.interp +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.interp @@ -36,7 +36,10 @@ null null null null +<<<<<<< HEAD null +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) token symbolic names: null @@ -76,7 +79,10 @@ NUM_UINT STRING BYTES IDENTIFIER +<<<<<<< HEAD ESC_IDENTIFIER +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rule names: start @@ -93,10 +99,17 @@ listInit fieldInitializerList optField mapInitializerList +<<<<<<< HEAD escapeIdent +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) optExpr literal atn: -[4, 1, 37, 259, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 44, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 49, 8, 2, 10, 2, 12, 2, 52, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 57, 8, 3, 10, 3, 12, 3, 60, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 68, 8, 4, 10, 4, 12, 4, 71, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 82, 8, 5, 10, 5, 12, 5, 85, 9, 5, 1, 6, 1, 6, 4, 6, 89, 8, 6, 11, 6, 12, 6, 90, 1, 6, 1, 6, 4, 6, 95, 8, 6, 11, 6, 12, 6, 96, 1, 6, 3, 6, 100, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 108, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 116, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 122, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 127, 8, 7, 10, 7, 12, 7, 130, 9, 7, 1, 8, 3, 8, 133, 8, 8, 1, 8, 1, 8, 3, 8, 137, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 142, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 159, 8, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 3, 8, 166, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 171, 8, 8, 10, 8, 12, 8, 174, 9, 8, 1, 8, 1, 8, 3, 8, 178, 8, 8, 1, 8, 3, 8, 181, 8, 8, 1, 8, 1, 8, 3, 8, 185, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 190, 8, 9, 10, 9, 12, 9, 193, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 198, 8, 10, 10, 10, 12, 10, 201, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 211, 8, 11, 10, 11, 12, 11, 214, 9, 11, 1, 12, 3, 12, 217, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 229, 8, 13, 10, 13, 12, 13, 232, 9, 13, 1, 14, 1, 14, 3, 14, 236, 8, 14, 1, 15, 3, 15, 239, 8, 15, 1, 15, 1, 15, 1, 16, 3, 16, 244, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 249, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 257, 8, 16, 1, 16, 0, 3, 8, 10, 14, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 290, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 45, 1, 0, 0, 0, 6, 53, 1, 0, 0, 0, 8, 61, 1, 0, 0, 0, 10, 72, 1, 0, 0, 0, 12, 99, 1, 0, 0, 0, 14, 101, 1, 0, 0, 0, 16, 184, 1, 0, 0, 0, 18, 186, 1, 0, 0, 0, 20, 194, 1, 0, 0, 0, 22, 202, 1, 0, 0, 0, 24, 216, 1, 0, 0, 0, 26, 220, 1, 0, 0, 0, 28, 235, 1, 0, 0, 0, 30, 238, 1, 0, 0, 0, 32, 256, 1, 0, 0, 0, 34, 35, 3, 2, 1, 0, 35, 36, 5, 0, 0, 1, 36, 1, 1, 0, 0, 0, 37, 43, 3, 4, 2, 0, 38, 39, 5, 20, 0, 0, 39, 40, 3, 4, 2, 0, 40, 41, 5, 21, 0, 0, 41, 42, 3, 2, 1, 0, 42, 44, 1, 0, 0, 0, 43, 38, 1, 0, 0, 0, 43, 44, 1, 0, 0, 0, 44, 3, 1, 0, 0, 0, 45, 50, 3, 6, 3, 0, 46, 47, 5, 9, 0, 0, 47, 49, 3, 6, 3, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 5, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 58, 3, 8, 4, 0, 54, 55, 5, 8, 0, 0, 55, 57, 3, 8, 4, 0, 56, 54, 1, 0, 0, 0, 57, 60, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 58, 59, 1, 0, 0, 0, 59, 7, 1, 0, 0, 0, 60, 58, 1, 0, 0, 0, 61, 62, 6, 4, -1, 0, 62, 63, 3, 10, 5, 0, 63, 69, 1, 0, 0, 0, 64, 65, 10, 1, 0, 0, 65, 66, 7, 0, 0, 0, 66, 68, 3, 8, 4, 2, 67, 64, 1, 0, 0, 0, 68, 71, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 69, 70, 1, 0, 0, 0, 70, 9, 1, 0, 0, 0, 71, 69, 1, 0, 0, 0, 72, 73, 6, 5, -1, 0, 73, 74, 3, 12, 6, 0, 74, 83, 1, 0, 0, 0, 75, 76, 10, 2, 0, 0, 76, 77, 7, 1, 0, 0, 77, 82, 3, 10, 5, 3, 78, 79, 10, 1, 0, 0, 79, 80, 7, 2, 0, 0, 80, 82, 3, 10, 5, 2, 81, 75, 1, 0, 0, 0, 81, 78, 1, 0, 0, 0, 82, 85, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 11, 1, 0, 0, 0, 85, 83, 1, 0, 0, 0, 86, 100, 3, 14, 7, 0, 87, 89, 5, 19, 0, 0, 88, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 90, 91, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 100, 3, 14, 7, 0, 93, 95, 5, 18, 0, 0, 94, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 100, 3, 14, 7, 0, 99, 86, 1, 0, 0, 0, 99, 88, 1, 0, 0, 0, 99, 94, 1, 0, 0, 0, 100, 13, 1, 0, 0, 0, 101, 102, 6, 7, -1, 0, 102, 103, 3, 16, 8, 0, 103, 128, 1, 0, 0, 0, 104, 105, 10, 3, 0, 0, 105, 107, 5, 16, 0, 0, 106, 108, 5, 20, 0, 0, 107, 106, 1, 0, 0, 0, 107, 108, 1, 0, 0, 0, 108, 109, 1, 0, 0, 0, 109, 127, 3, 28, 14, 0, 110, 111, 10, 2, 0, 0, 111, 112, 5, 16, 0, 0, 112, 113, 5, 36, 0, 0, 113, 115, 5, 14, 0, 0, 114, 116, 3, 18, 9, 0, 115, 114, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 127, 5, 15, 0, 0, 118, 119, 10, 1, 0, 0, 119, 121, 5, 10, 0, 0, 120, 122, 5, 20, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 124, 3, 2, 1, 0, 124, 125, 5, 11, 0, 0, 125, 127, 1, 0, 0, 0, 126, 104, 1, 0, 0, 0, 126, 110, 1, 0, 0, 0, 126, 118, 1, 0, 0, 0, 127, 130, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 15, 1, 0, 0, 0, 130, 128, 1, 0, 0, 0, 131, 133, 5, 16, 0, 0, 132, 131, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 185, 5, 36, 0, 0, 135, 137, 5, 16, 0, 0, 136, 135, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 139, 5, 36, 0, 0, 139, 141, 5, 14, 0, 0, 140, 142, 3, 18, 9, 0, 141, 140, 1, 0, 0, 0, 141, 142, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 185, 5, 15, 0, 0, 144, 145, 5, 14, 0, 0, 145, 146, 3, 2, 1, 0, 146, 147, 5, 15, 0, 0, 147, 185, 1, 0, 0, 0, 148, 150, 5, 10, 0, 0, 149, 151, 3, 20, 10, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 185, 5, 11, 0, 0, 156, 158, 5, 12, 0, 0, 157, 159, 3, 26, 13, 0, 158, 157, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 161, 1, 0, 0, 0, 160, 162, 5, 17, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 185, 5, 13, 0, 0, 164, 166, 5, 16, 0, 0, 165, 164, 1, 0, 0, 0, 165, 166, 1, 0, 0, 0, 166, 167, 1, 0, 0, 0, 167, 172, 5, 36, 0, 0, 168, 169, 5, 16, 0, 0, 169, 171, 5, 36, 0, 0, 170, 168, 1, 0, 0, 0, 171, 174, 1, 0, 0, 0, 172, 170, 1, 0, 0, 0, 172, 173, 1, 0, 0, 0, 173, 175, 1, 0, 0, 0, 174, 172, 1, 0, 0, 0, 175, 177, 5, 12, 0, 0, 176, 178, 3, 22, 11, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 180, 1, 0, 0, 0, 179, 181, 5, 17, 0, 0, 180, 179, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 185, 5, 13, 0, 0, 183, 185, 3, 32, 16, 0, 184, 132, 1, 0, 0, 0, 184, 136, 1, 0, 0, 0, 184, 144, 1, 0, 0, 0, 184, 148, 1, 0, 0, 0, 184, 156, 1, 0, 0, 0, 184, 165, 1, 0, 0, 0, 184, 183, 1, 0, 0, 0, 185, 17, 1, 0, 0, 0, 186, 191, 3, 2, 1, 0, 187, 188, 5, 17, 0, 0, 188, 190, 3, 2, 1, 0, 189, 187, 1, 0, 0, 0, 190, 193, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 192, 1, 0, 0, 0, 192, 19, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 194, 199, 3, 30, 15, 0, 195, 196, 5, 17, 0, 0, 196, 198, 3, 30, 15, 0, 197, 195, 1, 0, 0, 0, 198, 201, 1, 0, 0, 0, 199, 197, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 21, 1, 0, 0, 0, 201, 199, 1, 0, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 212, 3, 2, 1, 0, 205, 206, 5, 17, 0, 0, 206, 207, 3, 24, 12, 0, 207, 208, 5, 21, 0, 0, 208, 209, 3, 2, 1, 0, 209, 211, 1, 0, 0, 0, 210, 205, 1, 0, 0, 0, 211, 214, 1, 0, 0, 0, 212, 210, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 23, 1, 0, 0, 0, 214, 212, 1, 0, 0, 0, 215, 217, 5, 20, 0, 0, 216, 215, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 218, 1, 0, 0, 0, 218, 219, 3, 28, 14, 0, 219, 25, 1, 0, 0, 0, 220, 221, 3, 30, 15, 0, 221, 222, 5, 21, 0, 0, 222, 230, 3, 2, 1, 0, 223, 224, 5, 17, 0, 0, 224, 225, 3, 30, 15, 0, 225, 226, 5, 21, 0, 0, 226, 227, 3, 2, 1, 0, 227, 229, 1, 0, 0, 0, 228, 223, 1, 0, 0, 0, 229, 232, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 27, 1, 0, 0, 0, 232, 230, 1, 0, 0, 0, 233, 236, 5, 36, 0, 0, 234, 236, 5, 37, 0, 0, 235, 233, 1, 0, 0, 0, 235, 234, 1, 0, 0, 0, 236, 29, 1, 0, 0, 0, 237, 239, 5, 20, 0, 0, 238, 237, 1, 0, 0, 0, 238, 239, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 241, 3, 2, 1, 0, 241, 31, 1, 0, 0, 0, 242, 244, 5, 18, 0, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 257, 5, 32, 0, 0, 246, 257, 5, 33, 0, 0, 247, 249, 5, 18, 0, 0, 248, 247, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 257, 5, 31, 0, 0, 251, 257, 5, 34, 0, 0, 252, 257, 5, 35, 0, 0, 253, 257, 5, 26, 0, 0, 254, 257, 5, 27, 0, 0, 255, 257, 5, 28, 0, 0, 256, 243, 1, 0, 0, 0, 256, 246, 1, 0, 0, 0, 256, 248, 1, 0, 0, 0, 256, 251, 1, 0, 0, 0, 256, 252, 1, 0, 0, 0, 256, 253, 1, 0, 0, 0, 256, 254, 1, 0, 0, 0, 256, 255, 1, 0, 0, 0, 257, 33, 1, 0, 0, 0, 36, 43, 50, 58, 69, 81, 83, 90, 96, 99, 107, 115, 121, 126, 128, 132, 136, 141, 150, 153, 158, 161, 165, 172, 177, 180, 184, 191, 199, 212, 216, 230, 235, 238, 243, 248, 256] \ No newline at end of file +<<<<<<< HEAD +[4, 1, 37, 259, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 44, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 49, 8, 2, 10, 2, 12, 2, 52, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 57, 8, 3, 10, 3, 12, 3, 60, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 68, 8, 4, 10, 4, 12, 4, 71, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 82, 8, 5, 10, 5, 12, 5, 85, 9, 5, 1, 6, 1, 6, 4, 6, 89, 8, 6, 11, 6, 12, 6, 90, 1, 6, 1, 6, 4, 6, 95, 8, 6, 11, 6, 12, 6, 96, 1, 6, 3, 6, 100, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 108, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 116, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 122, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 127, 8, 7, 10, 7, 12, 7, 130, 9, 7, 1, 8, 3, 8, 133, 8, 8, 1, 8, 1, 8, 3, 8, 137, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 142, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 159, 8, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 3, 8, 166, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 171, 8, 8, 10, 8, 12, 8, 174, 9, 8, 1, 8, 1, 8, 3, 8, 178, 8, 8, 1, 8, 3, 8, 181, 8, 8, 1, 8, 1, 8, 3, 8, 185, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 190, 8, 9, 10, 9, 12, 9, 193, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 198, 8, 10, 10, 10, 12, 10, 201, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 211, 8, 11, 10, 11, 12, 11, 214, 9, 11, 1, 12, 3, 12, 217, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 229, 8, 13, 10, 13, 12, 13, 232, 9, 13, 1, 14, 1, 14, 3, 14, 236, 8, 14, 1, 15, 3, 15, 239, 8, 15, 1, 15, 1, 15, 1, 16, 3, 16, 244, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 249, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 257, 8, 16, 1, 16, 0, 3, 8, 10, 14, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 290, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 45, 1, 0, 0, 0, 6, 53, 1, 0, 0, 0, 8, 61, 1, 0, 0, 0, 10, 72, 1, 0, 0, 0, 12, 99, 1, 0, 0, 0, 14, 101, 1, 0, 0, 0, 16, 184, 1, 0, 0, 0, 18, 186, 1, 0, 0, 0, 20, 194, 1, 0, 0, 0, 22, 202, 1, 0, 0, 0, 24, 216, 1, 0, 0, 0, 26, 220, 1, 0, 0, 0, 28, 235, 1, 0, 0, 0, 30, 238, 1, 0, 0, 0, 32, 256, 1, 0, 0, 0, 34, 35, 3, 2, 1, 0, 35, 36, 5, 0, 0, 1, 36, 1, 1, 0, 0, 0, 37, 43, 3, 4, 2, 0, 38, 39, 5, 20, 0, 0, 39, 40, 3, 4, 2, 0, 40, 41, 5, 21, 0, 0, 41, 42, 3, 2, 1, 0, 42, 44, 1, 0, 0, 0, 43, 38, 1, 0, 0, 0, 43, 44, 1, 0, 0, 0, 44, 3, 1, 0, 0, 0, 45, 50, 3, 6, 3, 0, 46, 47, 5, 9, 0, 0, 47, 49, 3, 6, 3, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 5, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 58, 3, 8, 4, 0, 54, 55, 5, 8, 0, 0, 55, 57, 3, 8, 4, 0, 56, 54, 1, 0, 0, 0, 57, 60, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 58, 59, 1, 0, 0, 0, 59, 7, 1, 0, 0, 0, 60, 58, 1, 0, 0, 0, 61, 62, 6, 4, -1, 0, 62, 63, 3, 10, 5, 0, 63, 69, 1, 0, 0, 0, 64, 65, 10, 1, 0, 0, 65, 66, 7, 0, 0, 0, 66, 68, 3, 8, 4, 2, 67, 64, 1, 0, 0, 0, 68, 71, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 69, 70, 1, 0, 0, 0, 70, 9, 1, 0, 0, 0, 71, 69, 1, 0, 0, 0, 72, 73, 6, 5, -1, 0, 73, 74, 3, 12, 6, 0, 74, 83, 1, 0, 0, 0, 75, 76, 10, 2, 0, 0, 76, 77, 7, 1, 0, 0, 77, 82, 3, 10, 5, 3, 78, 79, 10, 1, 0, 0, 79, 80, 7, 2, 0, 0, 80, 82, 3, 10, 5, 2, 81, 75, 1, 0, 0, 0, 81, 78, 1, 0, 0, 0, 82, 85, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 11, 1, 0, 0, 0, 85, 83, 1, 0, 0, 0, 86, 100, 3, 14, 7, 0, 87, 89, 5, 19, 0, 0, 88, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 90, 91, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 100, 3, 14, 7, 0, 93, 95, 5, 18, 0, 0, 94, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 100, 3, 14, 7, 0, 99, 86, 1, 0, 0, 0, 99, 88, 1, 0, 0, 0, 99, 94, 1, 0, 0, 0, 100, 13, 1, 0, 0, 0, 101, 102, 6, 7, -1, 0, 102, 103, 3, 16, 8, 0, 103, 128, 1, 0, 0, 0, 104, 105, 10, 3, 0, 0, 105, 107, 5, 16, 0, 0, 106, 108, 5, 20, 0, 0, 107, 106, 1, 0, 0, 0, 107, 108, 1, 0, 0, 0, 108, 109, 1, 0, 0, 0, 109, 127, 3, 28, 14, 0, 110, 111, 10, 2, 0, 0, 111, 112, 5, 16, 0, 0, 112, 113, 5, 36, 0, 0, 113, 115, 5, 14, 0, 0, 114, 116, 3, 18, 9, 0, 115, 114, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 127, 5, 15, 0, 0, 118, 119, 10, 1, 0, 0, 119, 121, 5, 10, 0, 0, 120, 122, 5, 20, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 124, 3, 2, 1, 0, 124, 125, 5, 11, 0, 0, 125, 127, 1, 0, 0, 0, 126, 104, 1, 0, 0, 0, 126, 110, 1, 0, 0, 0, 126, 118, 1, 0, 0, 0, 127, 130, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 15, 1, 0, 0, 0, 130, 128, 1, 0, 0, 0, 131, 133, 5, 16, 0, 0, 132, 131, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 185, 5, 36, 0, 0, 135, 137, 5, 16, 0, 0, 136, 135, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 139, 5, 36, 0, 0, 139, 141, 5, 14, 0, 0, 140, 142, 3, 18, 9, 0, 141, 140, 1, 0, 0, 0, 141, 142, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 185, 5, 15, 0, 0, 144, 145, 5, 14, 0, 0, 145, 146, 3, 2, 1, 0, 146, 147, 5, 15, 0, 0, 147, 185, 1, 0, 0, 0, 148, 150, 5, 10, 0, 0, 149, 151, 3, 20, 10, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 185, 5, 11, 0, 0, 156, 158, 5, 12, 0, 0, 157, 159, 3, 26, 13, 0, 158, 157, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 161, 1, 0, 0, 0, 160, 162, 5, 17, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 185, 5, 13, 0, 0, 164, 166, 5, 16, 0, 0, 165, 164, 1, 0, 0, 0, 165, 166, 1, 0, 0, 0, 166, 167, 1, 0, 0, 0, 167, 172, 5, 36, 0, 0, 168, 169, 5, 16, 0, 0, 169, 171, 5, 36, 0, 0, 170, 168, 1, 0, 0, 0, 171, 174, 1, 0, 0, 0, 172, 170, 1, 0, 0, 0, 172, 173, 1, 0, 0, 0, 173, 175, 1, 0, 0, 0, 174, 172, 1, 0, 0, 0, 175, 177, 5, 12, 0, 0, 176, 178, 3, 22, 11, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 180, 1, 0, 0, 0, 179, 181, 5, 17, 0, 0, 180, 179, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 185, 5, 13, 0, 0, 183, 185, 3, 32, 16, 0, 184, 132, 1, 0, 0, 0, 184, 136, 1, 0, 0, 0, 184, 144, 1, 0, 0, 0, 184, 148, 1, 0, 0, 0, 184, 156, 1, 0, 0, 0, 184, 165, 1, 0, 0, 0, 184, 183, 1, 0, 0, 0, 185, 17, 1, 0, 0, 0, 186, 191, 3, 2, 1, 0, 187, 188, 5, 17, 0, 0, 188, 190, 3, 2, 1, 0, 189, 187, 1, 0, 0, 0, 190, 193, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 192, 1, 0, 0, 0, 192, 19, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 194, 199, 3, 30, 15, 0, 195, 196, 5, 17, 0, 0, 196, 198, 3, 30, 15, 0, 197, 195, 1, 0, 0, 0, 198, 201, 1, 0, 0, 0, 199, 197, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 21, 1, 0, 0, 0, 201, 199, 1, 0, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 212, 3, 2, 1, 0, 205, 206, 5, 17, 0, 0, 206, 207, 3, 24, 12, 0, 207, 208, 5, 21, 0, 0, 208, 209, 3, 2, 1, 0, 209, 211, 1, 0, 0, 0, 210, 205, 1, 0, 0, 0, 211, 214, 1, 0, 0, 0, 212, 210, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 23, 1, 0, 0, 0, 214, 212, 1, 0, 0, 0, 215, 217, 5, 20, 0, 0, 216, 215, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 218, 1, 0, 0, 0, 218, 219, 3, 28, 14, 0, 219, 25, 1, 0, 0, 0, 220, 221, 3, 30, 15, 0, 221, 222, 5, 21, 0, 0, 222, 230, 3, 2, 1, 0, 223, 224, 5, 17, 0, 0, 224, 225, 3, 30, 15, 0, 225, 226, 5, 21, 0, 0, 226, 227, 3, 2, 1, 0, 227, 229, 1, 0, 0, 0, 228, 223, 1, 0, 0, 0, 229, 232, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 27, 1, 0, 0, 0, 232, 230, 1, 0, 0, 0, 233, 236, 5, 36, 0, 0, 234, 236, 5, 37, 0, 0, 235, 233, 1, 0, 0, 0, 235, 234, 1, 0, 0, 0, 236, 29, 1, 0, 0, 0, 237, 239, 5, 20, 0, 0, 238, 237, 1, 0, 0, 0, 238, 239, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 241, 3, 2, 1, 0, 241, 31, 1, 0, 0, 0, 242, 244, 5, 18, 0, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 257, 5, 32, 0, 0, 246, 257, 5, 33, 0, 0, 247, 249, 5, 18, 0, 0, 248, 247, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 257, 5, 31, 0, 0, 251, 257, 5, 34, 0, 0, 252, 257, 5, 35, 0, 0, 253, 257, 5, 26, 0, 0, 254, 257, 5, 27, 0, 0, 255, 257, 5, 28, 0, 0, 256, 243, 1, 0, 0, 0, 256, 246, 1, 0, 0, 0, 256, 248, 1, 0, 0, 0, 256, 251, 1, 0, 0, 0, 256, 252, 1, 0, 0, 0, 256, 253, 1, 0, 0, 0, 256, 254, 1, 0, 0, 0, 256, 255, 1, 0, 0, 0, 257, 33, 1, 0, 0, 0, 36, 43, 50, 58, 69, 81, 83, 90, 96, 99, 107, 115, 121, 126, 128, 132, 136, 141, 150, 153, 158, 161, 165, 172, 177, 180, 184, 191, 199, 212, 216, 230, 235, 238, 243, 248, 256] +======= +[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens index aa1f5eee6f..b4fbf00e51 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens @@ -34,7 +34,10 @@ NUM_UINT=33 STRING=34 BYTES=35 IDENTIFIER=36 +<<<<<<< HEAD ESC_IDENTIFIER=37 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) '=='=1 '!='=2 'in'=3 diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp index 162d52188c..a7a6f50efd 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp +++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp @@ -36,7 +36,10 @@ null null null null +<<<<<<< HEAD null +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) token symbolic names: null @@ -76,7 +79,10 @@ NUM_UINT STRING BYTES IDENTIFIER +<<<<<<< HEAD ESC_IDENTIFIER +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rule names: EQUALS @@ -126,7 +132,10 @@ NUM_UINT STRING BYTES IDENTIFIER +<<<<<<< HEAD ESC_IDENTIFIER +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channel names: DEFAULT_TOKEN_CHANNEL @@ -136,4 +145,8 @@ mode names: DEFAULT_MODE atn: -[4, 0, 37, 435, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 179, 8, 31, 1, 31, 4, 31, 182, 8, 31, 11, 31, 12, 31, 183, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 194, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 227, 8, 38, 1, 39, 4, 39, 230, 8, 39, 11, 39, 12, 39, 231, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 240, 8, 40, 10, 40, 12, 40, 243, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 248, 8, 41, 11, 41, 12, 41, 249, 1, 41, 1, 41, 4, 41, 254, 8, 41, 11, 41, 12, 41, 255, 1, 41, 3, 41, 259, 8, 41, 1, 41, 4, 41, 262, 8, 41, 11, 41, 12, 41, 263, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 270, 8, 41, 11, 41, 12, 41, 271, 1, 41, 3, 41, 275, 8, 41, 3, 41, 277, 8, 41, 1, 42, 4, 42, 280, 8, 42, 11, 42, 12, 42, 281, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 288, 8, 42, 11, 42, 12, 42, 289, 3, 42, 292, 8, 42, 1, 43, 4, 43, 295, 8, 43, 11, 43, 12, 43, 296, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 305, 8, 43, 11, 43, 12, 43, 306, 1, 43, 1, 43, 3, 43, 311, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 316, 8, 44, 10, 44, 12, 44, 319, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 325, 8, 44, 10, 44, 12, 44, 328, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 337, 8, 44, 10, 44, 12, 44, 340, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 351, 8, 44, 10, 44, 12, 44, 354, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 362, 8, 44, 10, 44, 12, 44, 365, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 372, 8, 44, 10, 44, 12, 44, 375, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 385, 8, 44, 10, 44, 12, 44, 388, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 400, 8, 44, 10, 44, 12, 44, 403, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 409, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 416, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 421, 8, 46, 10, 46, 12, 46, 424, 9, 46, 1, 47, 1, 47, 1, 47, 1, 47, 4, 47, 430, 8, 47, 11, 47, 12, 47, 431, 1, 47, 1, 47, 4, 338, 352, 386, 401, 0, 48, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 95, 37, 1, 0, 17, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 3, 0, 32, 32, 45, 47, 95, 95, 471, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 1, 97, 1, 0, 0, 0, 3, 100, 1, 0, 0, 0, 5, 103, 1, 0, 0, 0, 7, 106, 1, 0, 0, 0, 9, 108, 1, 0, 0, 0, 11, 111, 1, 0, 0, 0, 13, 114, 1, 0, 0, 0, 15, 116, 1, 0, 0, 0, 17, 119, 1, 0, 0, 0, 19, 122, 1, 0, 0, 0, 21, 124, 1, 0, 0, 0, 23, 126, 1, 0, 0, 0, 25, 128, 1, 0, 0, 0, 27, 130, 1, 0, 0, 0, 29, 132, 1, 0, 0, 0, 31, 134, 1, 0, 0, 0, 33, 136, 1, 0, 0, 0, 35, 138, 1, 0, 0, 0, 37, 140, 1, 0, 0, 0, 39, 142, 1, 0, 0, 0, 41, 144, 1, 0, 0, 0, 43, 146, 1, 0, 0, 0, 45, 148, 1, 0, 0, 0, 47, 150, 1, 0, 0, 0, 49, 152, 1, 0, 0, 0, 51, 154, 1, 0, 0, 0, 53, 159, 1, 0, 0, 0, 55, 165, 1, 0, 0, 0, 57, 170, 1, 0, 0, 0, 59, 172, 1, 0, 0, 0, 61, 174, 1, 0, 0, 0, 63, 176, 1, 0, 0, 0, 65, 185, 1, 0, 0, 0, 67, 187, 1, 0, 0, 0, 69, 193, 1, 0, 0, 0, 71, 195, 1, 0, 0, 0, 73, 198, 1, 0, 0, 0, 75, 203, 1, 0, 0, 0, 77, 226, 1, 0, 0, 0, 79, 229, 1, 0, 0, 0, 81, 235, 1, 0, 0, 0, 83, 276, 1, 0, 0, 0, 85, 291, 1, 0, 0, 0, 87, 310, 1, 0, 0, 0, 89, 408, 1, 0, 0, 0, 91, 410, 1, 0, 0, 0, 93, 415, 1, 0, 0, 0, 95, 425, 1, 0, 0, 0, 97, 98, 5, 61, 0, 0, 98, 99, 5, 61, 0, 0, 99, 2, 1, 0, 0, 0, 100, 101, 5, 33, 0, 0, 101, 102, 5, 61, 0, 0, 102, 4, 1, 0, 0, 0, 103, 104, 5, 105, 0, 0, 104, 105, 5, 110, 0, 0, 105, 6, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 8, 1, 0, 0, 0, 108, 109, 5, 60, 0, 0, 109, 110, 5, 61, 0, 0, 110, 10, 1, 0, 0, 0, 111, 112, 5, 62, 0, 0, 112, 113, 5, 61, 0, 0, 113, 12, 1, 0, 0, 0, 114, 115, 5, 62, 0, 0, 115, 14, 1, 0, 0, 0, 116, 117, 5, 38, 0, 0, 117, 118, 5, 38, 0, 0, 118, 16, 1, 0, 0, 0, 119, 120, 5, 124, 0, 0, 120, 121, 5, 124, 0, 0, 121, 18, 1, 0, 0, 0, 122, 123, 5, 91, 0, 0, 123, 20, 1, 0, 0, 0, 124, 125, 5, 93, 0, 0, 125, 22, 1, 0, 0, 0, 126, 127, 5, 123, 0, 0, 127, 24, 1, 0, 0, 0, 128, 129, 5, 125, 0, 0, 129, 26, 1, 0, 0, 0, 130, 131, 5, 40, 0, 0, 131, 28, 1, 0, 0, 0, 132, 133, 5, 41, 0, 0, 133, 30, 1, 0, 0, 0, 134, 135, 5, 46, 0, 0, 135, 32, 1, 0, 0, 0, 136, 137, 5, 44, 0, 0, 137, 34, 1, 0, 0, 0, 138, 139, 5, 45, 0, 0, 139, 36, 1, 0, 0, 0, 140, 141, 5, 33, 0, 0, 141, 38, 1, 0, 0, 0, 142, 143, 5, 63, 0, 0, 143, 40, 1, 0, 0, 0, 144, 145, 5, 58, 0, 0, 145, 42, 1, 0, 0, 0, 146, 147, 5, 43, 0, 0, 147, 44, 1, 0, 0, 0, 148, 149, 5, 42, 0, 0, 149, 46, 1, 0, 0, 0, 150, 151, 5, 47, 0, 0, 151, 48, 1, 0, 0, 0, 152, 153, 5, 37, 0, 0, 153, 50, 1, 0, 0, 0, 154, 155, 5, 116, 0, 0, 155, 156, 5, 114, 0, 0, 156, 157, 5, 117, 0, 0, 157, 158, 5, 101, 0, 0, 158, 52, 1, 0, 0, 0, 159, 160, 5, 102, 0, 0, 160, 161, 5, 97, 0, 0, 161, 162, 5, 108, 0, 0, 162, 163, 5, 115, 0, 0, 163, 164, 5, 101, 0, 0, 164, 54, 1, 0, 0, 0, 165, 166, 5, 110, 0, 0, 166, 167, 5, 117, 0, 0, 167, 168, 5, 108, 0, 0, 168, 169, 5, 108, 0, 0, 169, 56, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 58, 1, 0, 0, 0, 172, 173, 7, 0, 0, 0, 173, 60, 1, 0, 0, 0, 174, 175, 2, 48, 57, 0, 175, 62, 1, 0, 0, 0, 176, 178, 7, 1, 0, 0, 177, 179, 7, 2, 0, 0, 178, 177, 1, 0, 0, 0, 178, 179, 1, 0, 0, 0, 179, 181, 1, 0, 0, 0, 180, 182, 3, 61, 30, 0, 181, 180, 1, 0, 0, 0, 182, 183, 1, 0, 0, 0, 183, 181, 1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184, 64, 1, 0, 0, 0, 185, 186, 7, 3, 0, 0, 186, 66, 1, 0, 0, 0, 187, 188, 7, 4, 0, 0, 188, 68, 1, 0, 0, 0, 189, 194, 3, 71, 35, 0, 190, 194, 3, 75, 37, 0, 191, 194, 3, 77, 38, 0, 192, 194, 3, 73, 36, 0, 193, 189, 1, 0, 0, 0, 193, 190, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 193, 192, 1, 0, 0, 0, 194, 70, 1, 0, 0, 0, 195, 196, 3, 57, 28, 0, 196, 197, 7, 5, 0, 0, 197, 72, 1, 0, 0, 0, 198, 199, 3, 57, 28, 0, 199, 200, 2, 48, 51, 0, 200, 201, 2, 48, 55, 0, 201, 202, 2, 48, 55, 0, 202, 74, 1, 0, 0, 0, 203, 204, 3, 57, 28, 0, 204, 205, 7, 6, 0, 0, 205, 206, 3, 65, 32, 0, 206, 207, 3, 65, 32, 0, 207, 76, 1, 0, 0, 0, 208, 209, 3, 57, 28, 0, 209, 210, 5, 117, 0, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 213, 3, 65, 32, 0, 213, 214, 3, 65, 32, 0, 214, 227, 1, 0, 0, 0, 215, 216, 3, 57, 28, 0, 216, 217, 5, 85, 0, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 224, 3, 65, 32, 0, 224, 225, 3, 65, 32, 0, 225, 227, 1, 0, 0, 0, 226, 208, 1, 0, 0, 0, 226, 215, 1, 0, 0, 0, 227, 78, 1, 0, 0, 0, 228, 230, 7, 7, 0, 0, 229, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 234, 6, 39, 0, 0, 234, 80, 1, 0, 0, 0, 235, 236, 5, 47, 0, 0, 236, 237, 5, 47, 0, 0, 237, 241, 1, 0, 0, 0, 238, 240, 8, 8, 0, 0, 239, 238, 1, 0, 0, 0, 240, 243, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 244, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 244, 245, 6, 40, 0, 0, 245, 82, 1, 0, 0, 0, 246, 248, 3, 61, 30, 0, 247, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, 253, 5, 46, 0, 0, 252, 254, 3, 61, 30, 0, 253, 252, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 258, 1, 0, 0, 0, 257, 259, 3, 63, 31, 0, 258, 257, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 277, 1, 0, 0, 0, 260, 262, 3, 61, 30, 0, 261, 260, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 265, 1, 0, 0, 0, 265, 266, 3, 63, 31, 0, 266, 277, 1, 0, 0, 0, 267, 269, 5, 46, 0, 0, 268, 270, 3, 61, 30, 0, 269, 268, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 269, 1, 0, 0, 0, 271, 272, 1, 0, 0, 0, 272, 274, 1, 0, 0, 0, 273, 275, 3, 63, 31, 0, 274, 273, 1, 0, 0, 0, 274, 275, 1, 0, 0, 0, 275, 277, 1, 0, 0, 0, 276, 247, 1, 0, 0, 0, 276, 261, 1, 0, 0, 0, 276, 267, 1, 0, 0, 0, 277, 84, 1, 0, 0, 0, 278, 280, 3, 61, 30, 0, 279, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 279, 1, 0, 0, 0, 281, 282, 1, 0, 0, 0, 282, 292, 1, 0, 0, 0, 283, 284, 5, 48, 0, 0, 284, 285, 5, 120, 0, 0, 285, 287, 1, 0, 0, 0, 286, 288, 3, 65, 32, 0, 287, 286, 1, 0, 0, 0, 288, 289, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289, 290, 1, 0, 0, 0, 290, 292, 1, 0, 0, 0, 291, 279, 1, 0, 0, 0, 291, 283, 1, 0, 0, 0, 292, 86, 1, 0, 0, 0, 293, 295, 3, 61, 30, 0, 294, 293, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 294, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 299, 7, 9, 0, 0, 299, 311, 1, 0, 0, 0, 300, 301, 5, 48, 0, 0, 301, 302, 5, 120, 0, 0, 302, 304, 1, 0, 0, 0, 303, 305, 3, 65, 32, 0, 304, 303, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 304, 1, 0, 0, 0, 306, 307, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308, 309, 7, 9, 0, 0, 309, 311, 1, 0, 0, 0, 310, 294, 1, 0, 0, 0, 310, 300, 1, 0, 0, 0, 311, 88, 1, 0, 0, 0, 312, 317, 5, 34, 0, 0, 313, 316, 3, 69, 34, 0, 314, 316, 8, 10, 0, 0, 315, 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 320, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 409, 5, 34, 0, 0, 321, 326, 5, 39, 0, 0, 322, 325, 3, 69, 34, 0, 323, 325, 8, 11, 0, 0, 324, 322, 1, 0, 0, 0, 324, 323, 1, 0, 0, 0, 325, 328, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 326, 327, 1, 0, 0, 0, 327, 329, 1, 0, 0, 0, 328, 326, 1, 0, 0, 0, 329, 409, 5, 39, 0, 0, 330, 331, 5, 34, 0, 0, 331, 332, 5, 34, 0, 0, 332, 333, 5, 34, 0, 0, 333, 338, 1, 0, 0, 0, 334, 337, 3, 69, 34, 0, 335, 337, 8, 12, 0, 0, 336, 334, 1, 0, 0, 0, 336, 335, 1, 0, 0, 0, 337, 340, 1, 0, 0, 0, 338, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 341, 1, 0, 0, 0, 340, 338, 1, 0, 0, 0, 341, 342, 5, 34, 0, 0, 342, 343, 5, 34, 0, 0, 343, 409, 5, 34, 0, 0, 344, 345, 5, 39, 0, 0, 345, 346, 5, 39, 0, 0, 346, 347, 5, 39, 0, 0, 347, 352, 1, 0, 0, 0, 348, 351, 3, 69, 34, 0, 349, 351, 8, 12, 0, 0, 350, 348, 1, 0, 0, 0, 350, 349, 1, 0, 0, 0, 351, 354, 1, 0, 0, 0, 352, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 355, 1, 0, 0, 0, 354, 352, 1, 0, 0, 0, 355, 356, 5, 39, 0, 0, 356, 357, 5, 39, 0, 0, 357, 409, 5, 39, 0, 0, 358, 359, 3, 67, 33, 0, 359, 363, 5, 34, 0, 0, 360, 362, 8, 13, 0, 0, 361, 360, 1, 0, 0, 0, 362, 365, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 363, 364, 1, 0, 0, 0, 364, 366, 1, 0, 0, 0, 365, 363, 1, 0, 0, 0, 366, 367, 5, 34, 0, 0, 367, 409, 1, 0, 0, 0, 368, 369, 3, 67, 33, 0, 369, 373, 5, 39, 0, 0, 370, 372, 8, 14, 0, 0, 371, 370, 1, 0, 0, 0, 372, 375, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 373, 374, 1, 0, 0, 0, 374, 376, 1, 0, 0, 0, 375, 373, 1, 0, 0, 0, 376, 377, 5, 39, 0, 0, 377, 409, 1, 0, 0, 0, 378, 379, 3, 67, 33, 0, 379, 380, 5, 34, 0, 0, 380, 381, 5, 34, 0, 0, 381, 382, 5, 34, 0, 0, 382, 386, 1, 0, 0, 0, 383, 385, 9, 0, 0, 0, 384, 383, 1, 0, 0, 0, 385, 388, 1, 0, 0, 0, 386, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 389, 1, 0, 0, 0, 388, 386, 1, 0, 0, 0, 389, 390, 5, 34, 0, 0, 390, 391, 5, 34, 0, 0, 391, 392, 5, 34, 0, 0, 392, 409, 1, 0, 0, 0, 393, 394, 3, 67, 33, 0, 394, 395, 5, 39, 0, 0, 395, 396, 5, 39, 0, 0, 396, 397, 5, 39, 0, 0, 397, 401, 1, 0, 0, 0, 398, 400, 9, 0, 0, 0, 399, 398, 1, 0, 0, 0, 400, 403, 1, 0, 0, 0, 401, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 404, 1, 0, 0, 0, 403, 401, 1, 0, 0, 0, 404, 405, 5, 39, 0, 0, 405, 406, 5, 39, 0, 0, 406, 407, 5, 39, 0, 0, 407, 409, 1, 0, 0, 0, 408, 312, 1, 0, 0, 0, 408, 321, 1, 0, 0, 0, 408, 330, 1, 0, 0, 0, 408, 344, 1, 0, 0, 0, 408, 358, 1, 0, 0, 0, 408, 368, 1, 0, 0, 0, 408, 378, 1, 0, 0, 0, 408, 393, 1, 0, 0, 0, 409, 90, 1, 0, 0, 0, 410, 411, 7, 15, 0, 0, 411, 412, 3, 89, 44, 0, 412, 92, 1, 0, 0, 0, 413, 416, 3, 59, 29, 0, 414, 416, 5, 95, 0, 0, 415, 413, 1, 0, 0, 0, 415, 414, 1, 0, 0, 0, 416, 422, 1, 0, 0, 0, 417, 421, 3, 59, 29, 0, 418, 421, 3, 61, 30, 0, 419, 421, 5, 95, 0, 0, 420, 417, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 419, 1, 0, 0, 0, 421, 424, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 422, 423, 1, 0, 0, 0, 423, 94, 1, 0, 0, 0, 424, 422, 1, 0, 0, 0, 425, 429, 5, 96, 0, 0, 426, 430, 3, 59, 29, 0, 427, 430, 3, 61, 30, 0, 428, 430, 7, 16, 0, 0, 429, 426, 1, 0, 0, 0, 429, 427, 1, 0, 0, 0, 429, 428, 1, 0, 0, 0, 430, 431, 1, 0, 0, 0, 431, 429, 1, 0, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 434, 5, 96, 0, 0, 434, 96, 1, 0, 0, 0, 38, 0, 178, 183, 193, 226, 231, 241, 249, 255, 258, 263, 271, 274, 276, 281, 289, 291, 296, 306, 310, 315, 317, 324, 326, 336, 338, 350, 352, 363, 373, 386, 401, 408, 415, 420, 422, 429, 431, 1, 0, 1, 0] \ No newline at end of file +<<<<<<< HEAD +[4, 0, 37, 435, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 179, 8, 31, 1, 31, 4, 31, 182, 8, 31, 11, 31, 12, 31, 183, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 194, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 227, 8, 38, 1, 39, 4, 39, 230, 8, 39, 11, 39, 12, 39, 231, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 240, 8, 40, 10, 40, 12, 40, 243, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 248, 8, 41, 11, 41, 12, 41, 249, 1, 41, 1, 41, 4, 41, 254, 8, 41, 11, 41, 12, 41, 255, 1, 41, 3, 41, 259, 8, 41, 1, 41, 4, 41, 262, 8, 41, 11, 41, 12, 41, 263, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 270, 8, 41, 11, 41, 12, 41, 271, 1, 41, 3, 41, 275, 8, 41, 3, 41, 277, 8, 41, 1, 42, 4, 42, 280, 8, 42, 11, 42, 12, 42, 281, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 288, 8, 42, 11, 42, 12, 42, 289, 3, 42, 292, 8, 42, 1, 43, 4, 43, 295, 8, 43, 11, 43, 12, 43, 296, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 305, 8, 43, 11, 43, 12, 43, 306, 1, 43, 1, 43, 3, 43, 311, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 316, 8, 44, 10, 44, 12, 44, 319, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 325, 8, 44, 10, 44, 12, 44, 328, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 337, 8, 44, 10, 44, 12, 44, 340, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 351, 8, 44, 10, 44, 12, 44, 354, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 362, 8, 44, 10, 44, 12, 44, 365, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 372, 8, 44, 10, 44, 12, 44, 375, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 385, 8, 44, 10, 44, 12, 44, 388, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 400, 8, 44, 10, 44, 12, 44, 403, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 409, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 416, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 421, 8, 46, 10, 46, 12, 46, 424, 9, 46, 1, 47, 1, 47, 1, 47, 1, 47, 4, 47, 430, 8, 47, 11, 47, 12, 47, 431, 1, 47, 1, 47, 4, 338, 352, 386, 401, 0, 48, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 95, 37, 1, 0, 17, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 3, 0, 32, 32, 45, 47, 95, 95, 471, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 1, 97, 1, 0, 0, 0, 3, 100, 1, 0, 0, 0, 5, 103, 1, 0, 0, 0, 7, 106, 1, 0, 0, 0, 9, 108, 1, 0, 0, 0, 11, 111, 1, 0, 0, 0, 13, 114, 1, 0, 0, 0, 15, 116, 1, 0, 0, 0, 17, 119, 1, 0, 0, 0, 19, 122, 1, 0, 0, 0, 21, 124, 1, 0, 0, 0, 23, 126, 1, 0, 0, 0, 25, 128, 1, 0, 0, 0, 27, 130, 1, 0, 0, 0, 29, 132, 1, 0, 0, 0, 31, 134, 1, 0, 0, 0, 33, 136, 1, 0, 0, 0, 35, 138, 1, 0, 0, 0, 37, 140, 1, 0, 0, 0, 39, 142, 1, 0, 0, 0, 41, 144, 1, 0, 0, 0, 43, 146, 1, 0, 0, 0, 45, 148, 1, 0, 0, 0, 47, 150, 1, 0, 0, 0, 49, 152, 1, 0, 0, 0, 51, 154, 1, 0, 0, 0, 53, 159, 1, 0, 0, 0, 55, 165, 1, 0, 0, 0, 57, 170, 1, 0, 0, 0, 59, 172, 1, 0, 0, 0, 61, 174, 1, 0, 0, 0, 63, 176, 1, 0, 0, 0, 65, 185, 1, 0, 0, 0, 67, 187, 1, 0, 0, 0, 69, 193, 1, 0, 0, 0, 71, 195, 1, 0, 0, 0, 73, 198, 1, 0, 0, 0, 75, 203, 1, 0, 0, 0, 77, 226, 1, 0, 0, 0, 79, 229, 1, 0, 0, 0, 81, 235, 1, 0, 0, 0, 83, 276, 1, 0, 0, 0, 85, 291, 1, 0, 0, 0, 87, 310, 1, 0, 0, 0, 89, 408, 1, 0, 0, 0, 91, 410, 1, 0, 0, 0, 93, 415, 1, 0, 0, 0, 95, 425, 1, 0, 0, 0, 97, 98, 5, 61, 0, 0, 98, 99, 5, 61, 0, 0, 99, 2, 1, 0, 0, 0, 100, 101, 5, 33, 0, 0, 101, 102, 5, 61, 0, 0, 102, 4, 1, 0, 0, 0, 103, 104, 5, 105, 0, 0, 104, 105, 5, 110, 0, 0, 105, 6, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 8, 1, 0, 0, 0, 108, 109, 5, 60, 0, 0, 109, 110, 5, 61, 0, 0, 110, 10, 1, 0, 0, 0, 111, 112, 5, 62, 0, 0, 112, 113, 5, 61, 0, 0, 113, 12, 1, 0, 0, 0, 114, 115, 5, 62, 0, 0, 115, 14, 1, 0, 0, 0, 116, 117, 5, 38, 0, 0, 117, 118, 5, 38, 0, 0, 118, 16, 1, 0, 0, 0, 119, 120, 5, 124, 0, 0, 120, 121, 5, 124, 0, 0, 121, 18, 1, 0, 0, 0, 122, 123, 5, 91, 0, 0, 123, 20, 1, 0, 0, 0, 124, 125, 5, 93, 0, 0, 125, 22, 1, 0, 0, 0, 126, 127, 5, 123, 0, 0, 127, 24, 1, 0, 0, 0, 128, 129, 5, 125, 0, 0, 129, 26, 1, 0, 0, 0, 130, 131, 5, 40, 0, 0, 131, 28, 1, 0, 0, 0, 132, 133, 5, 41, 0, 0, 133, 30, 1, 0, 0, 0, 134, 135, 5, 46, 0, 0, 135, 32, 1, 0, 0, 0, 136, 137, 5, 44, 0, 0, 137, 34, 1, 0, 0, 0, 138, 139, 5, 45, 0, 0, 139, 36, 1, 0, 0, 0, 140, 141, 5, 33, 0, 0, 141, 38, 1, 0, 0, 0, 142, 143, 5, 63, 0, 0, 143, 40, 1, 0, 0, 0, 144, 145, 5, 58, 0, 0, 145, 42, 1, 0, 0, 0, 146, 147, 5, 43, 0, 0, 147, 44, 1, 0, 0, 0, 148, 149, 5, 42, 0, 0, 149, 46, 1, 0, 0, 0, 150, 151, 5, 47, 0, 0, 151, 48, 1, 0, 0, 0, 152, 153, 5, 37, 0, 0, 153, 50, 1, 0, 0, 0, 154, 155, 5, 116, 0, 0, 155, 156, 5, 114, 0, 0, 156, 157, 5, 117, 0, 0, 157, 158, 5, 101, 0, 0, 158, 52, 1, 0, 0, 0, 159, 160, 5, 102, 0, 0, 160, 161, 5, 97, 0, 0, 161, 162, 5, 108, 0, 0, 162, 163, 5, 115, 0, 0, 163, 164, 5, 101, 0, 0, 164, 54, 1, 0, 0, 0, 165, 166, 5, 110, 0, 0, 166, 167, 5, 117, 0, 0, 167, 168, 5, 108, 0, 0, 168, 169, 5, 108, 0, 0, 169, 56, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 58, 1, 0, 0, 0, 172, 173, 7, 0, 0, 0, 173, 60, 1, 0, 0, 0, 174, 175, 2, 48, 57, 0, 175, 62, 1, 0, 0, 0, 176, 178, 7, 1, 0, 0, 177, 179, 7, 2, 0, 0, 178, 177, 1, 0, 0, 0, 178, 179, 1, 0, 0, 0, 179, 181, 1, 0, 0, 0, 180, 182, 3, 61, 30, 0, 181, 180, 1, 0, 0, 0, 182, 183, 1, 0, 0, 0, 183, 181, 1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184, 64, 1, 0, 0, 0, 185, 186, 7, 3, 0, 0, 186, 66, 1, 0, 0, 0, 187, 188, 7, 4, 0, 0, 188, 68, 1, 0, 0, 0, 189, 194, 3, 71, 35, 0, 190, 194, 3, 75, 37, 0, 191, 194, 3, 77, 38, 0, 192, 194, 3, 73, 36, 0, 193, 189, 1, 0, 0, 0, 193, 190, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 193, 192, 1, 0, 0, 0, 194, 70, 1, 0, 0, 0, 195, 196, 3, 57, 28, 0, 196, 197, 7, 5, 0, 0, 197, 72, 1, 0, 0, 0, 198, 199, 3, 57, 28, 0, 199, 200, 2, 48, 51, 0, 200, 201, 2, 48, 55, 0, 201, 202, 2, 48, 55, 0, 202, 74, 1, 0, 0, 0, 203, 204, 3, 57, 28, 0, 204, 205, 7, 6, 0, 0, 205, 206, 3, 65, 32, 0, 206, 207, 3, 65, 32, 0, 207, 76, 1, 0, 0, 0, 208, 209, 3, 57, 28, 0, 209, 210, 5, 117, 0, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 213, 3, 65, 32, 0, 213, 214, 3, 65, 32, 0, 214, 227, 1, 0, 0, 0, 215, 216, 3, 57, 28, 0, 216, 217, 5, 85, 0, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 224, 3, 65, 32, 0, 224, 225, 3, 65, 32, 0, 225, 227, 1, 0, 0, 0, 226, 208, 1, 0, 0, 0, 226, 215, 1, 0, 0, 0, 227, 78, 1, 0, 0, 0, 228, 230, 7, 7, 0, 0, 229, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 234, 6, 39, 0, 0, 234, 80, 1, 0, 0, 0, 235, 236, 5, 47, 0, 0, 236, 237, 5, 47, 0, 0, 237, 241, 1, 0, 0, 0, 238, 240, 8, 8, 0, 0, 239, 238, 1, 0, 0, 0, 240, 243, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 244, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 244, 245, 6, 40, 0, 0, 245, 82, 1, 0, 0, 0, 246, 248, 3, 61, 30, 0, 247, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, 253, 5, 46, 0, 0, 252, 254, 3, 61, 30, 0, 253, 252, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 258, 1, 0, 0, 0, 257, 259, 3, 63, 31, 0, 258, 257, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 277, 1, 0, 0, 0, 260, 262, 3, 61, 30, 0, 261, 260, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 265, 1, 0, 0, 0, 265, 266, 3, 63, 31, 0, 266, 277, 1, 0, 0, 0, 267, 269, 5, 46, 0, 0, 268, 270, 3, 61, 30, 0, 269, 268, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 269, 1, 0, 0, 0, 271, 272, 1, 0, 0, 0, 272, 274, 1, 0, 0, 0, 273, 275, 3, 63, 31, 0, 274, 273, 1, 0, 0, 0, 274, 275, 1, 0, 0, 0, 275, 277, 1, 0, 0, 0, 276, 247, 1, 0, 0, 0, 276, 261, 1, 0, 0, 0, 276, 267, 1, 0, 0, 0, 277, 84, 1, 0, 0, 0, 278, 280, 3, 61, 30, 0, 279, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 279, 1, 0, 0, 0, 281, 282, 1, 0, 0, 0, 282, 292, 1, 0, 0, 0, 283, 284, 5, 48, 0, 0, 284, 285, 5, 120, 0, 0, 285, 287, 1, 0, 0, 0, 286, 288, 3, 65, 32, 0, 287, 286, 1, 0, 0, 0, 288, 289, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289, 290, 1, 0, 0, 0, 290, 292, 1, 0, 0, 0, 291, 279, 1, 0, 0, 0, 291, 283, 1, 0, 0, 0, 292, 86, 1, 0, 0, 0, 293, 295, 3, 61, 30, 0, 294, 293, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 294, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 299, 7, 9, 0, 0, 299, 311, 1, 0, 0, 0, 300, 301, 5, 48, 0, 0, 301, 302, 5, 120, 0, 0, 302, 304, 1, 0, 0, 0, 303, 305, 3, 65, 32, 0, 304, 303, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 304, 1, 0, 0, 0, 306, 307, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308, 309, 7, 9, 0, 0, 309, 311, 1, 0, 0, 0, 310, 294, 1, 0, 0, 0, 310, 300, 1, 0, 0, 0, 311, 88, 1, 0, 0, 0, 312, 317, 5, 34, 0, 0, 313, 316, 3, 69, 34, 0, 314, 316, 8, 10, 0, 0, 315, 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 320, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 409, 5, 34, 0, 0, 321, 326, 5, 39, 0, 0, 322, 325, 3, 69, 34, 0, 323, 325, 8, 11, 0, 0, 324, 322, 1, 0, 0, 0, 324, 323, 1, 0, 0, 0, 325, 328, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 326, 327, 1, 0, 0, 0, 327, 329, 1, 0, 0, 0, 328, 326, 1, 0, 0, 0, 329, 409, 5, 39, 0, 0, 330, 331, 5, 34, 0, 0, 331, 332, 5, 34, 0, 0, 332, 333, 5, 34, 0, 0, 333, 338, 1, 0, 0, 0, 334, 337, 3, 69, 34, 0, 335, 337, 8, 12, 0, 0, 336, 334, 1, 0, 0, 0, 336, 335, 1, 0, 0, 0, 337, 340, 1, 0, 0, 0, 338, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 341, 1, 0, 0, 0, 340, 338, 1, 0, 0, 0, 341, 342, 5, 34, 0, 0, 342, 343, 5, 34, 0, 0, 343, 409, 5, 34, 0, 0, 344, 345, 5, 39, 0, 0, 345, 346, 5, 39, 0, 0, 346, 347, 5, 39, 0, 0, 347, 352, 1, 0, 0, 0, 348, 351, 3, 69, 34, 0, 349, 351, 8, 12, 0, 0, 350, 348, 1, 0, 0, 0, 350, 349, 1, 0, 0, 0, 351, 354, 1, 0, 0, 0, 352, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 355, 1, 0, 0, 0, 354, 352, 1, 0, 0, 0, 355, 356, 5, 39, 0, 0, 356, 357, 5, 39, 0, 0, 357, 409, 5, 39, 0, 0, 358, 359, 3, 67, 33, 0, 359, 363, 5, 34, 0, 0, 360, 362, 8, 13, 0, 0, 361, 360, 1, 0, 0, 0, 362, 365, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 363, 364, 1, 0, 0, 0, 364, 366, 1, 0, 0, 0, 365, 363, 1, 0, 0, 0, 366, 367, 5, 34, 0, 0, 367, 409, 1, 0, 0, 0, 368, 369, 3, 67, 33, 0, 369, 373, 5, 39, 0, 0, 370, 372, 8, 14, 0, 0, 371, 370, 1, 0, 0, 0, 372, 375, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 373, 374, 1, 0, 0, 0, 374, 376, 1, 0, 0, 0, 375, 373, 1, 0, 0, 0, 376, 377, 5, 39, 0, 0, 377, 409, 1, 0, 0, 0, 378, 379, 3, 67, 33, 0, 379, 380, 5, 34, 0, 0, 380, 381, 5, 34, 0, 0, 381, 382, 5, 34, 0, 0, 382, 386, 1, 0, 0, 0, 383, 385, 9, 0, 0, 0, 384, 383, 1, 0, 0, 0, 385, 388, 1, 0, 0, 0, 386, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 389, 1, 0, 0, 0, 388, 386, 1, 0, 0, 0, 389, 390, 5, 34, 0, 0, 390, 391, 5, 34, 0, 0, 391, 392, 5, 34, 0, 0, 392, 409, 1, 0, 0, 0, 393, 394, 3, 67, 33, 0, 394, 395, 5, 39, 0, 0, 395, 396, 5, 39, 0, 0, 396, 397, 5, 39, 0, 0, 397, 401, 1, 0, 0, 0, 398, 400, 9, 0, 0, 0, 399, 398, 1, 0, 0, 0, 400, 403, 1, 0, 0, 0, 401, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 404, 1, 0, 0, 0, 403, 401, 1, 0, 0, 0, 404, 405, 5, 39, 0, 0, 405, 406, 5, 39, 0, 0, 406, 407, 5, 39, 0, 0, 407, 409, 1, 0, 0, 0, 408, 312, 1, 0, 0, 0, 408, 321, 1, 0, 0, 0, 408, 330, 1, 0, 0, 0, 408, 344, 1, 0, 0, 0, 408, 358, 1, 0, 0, 0, 408, 368, 1, 0, 0, 0, 408, 378, 1, 0, 0, 0, 408, 393, 1, 0, 0, 0, 409, 90, 1, 0, 0, 0, 410, 411, 7, 15, 0, 0, 411, 412, 3, 89, 44, 0, 412, 92, 1, 0, 0, 0, 413, 416, 3, 59, 29, 0, 414, 416, 5, 95, 0, 0, 415, 413, 1, 0, 0, 0, 415, 414, 1, 0, 0, 0, 416, 422, 1, 0, 0, 0, 417, 421, 3, 59, 29, 0, 418, 421, 3, 61, 30, 0, 419, 421, 5, 95, 0, 0, 420, 417, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 419, 1, 0, 0, 0, 421, 424, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 422, 423, 1, 0, 0, 0, 423, 94, 1, 0, 0, 0, 424, 422, 1, 0, 0, 0, 425, 429, 5, 96, 0, 0, 426, 430, 3, 59, 29, 0, 427, 430, 3, 61, 30, 0, 428, 430, 7, 16, 0, 0, 429, 426, 1, 0, 0, 0, 429, 427, 1, 0, 0, 0, 429, 428, 1, 0, 0, 0, 430, 431, 1, 0, 0, 0, 431, 429, 1, 0, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 434, 5, 96, 0, 0, 434, 96, 1, 0, 0, 0, 38, 0, 178, 183, 193, 226, 231, 241, 249, 255, 258, 263, 271, 274, 276, 281, 289, 291, 296, 306, 310, 315, 317, 324, 326, 336, 338, 350, 352, 363, 373, 386, 401, 408, 415, 420, 422, 429, 431, 1, 0, 1, 0] +======= +[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens index aa1f5eee6f..b4fbf00e51 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens +++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens @@ -34,7 +34,10 @@ NUM_UINT=33 STRING=34 BYTES=35 IDENTIFIER=36 +<<<<<<< HEAD ESC_IDENTIFIER=37 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) '=='=1 '!='=2 'in'=3 diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go index 514f2082fe..acdcd41577 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +======= +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" @@ -98,6 +102,7 @@ func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {} // ExitIndex is called when production Index is exited. func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {} +<<<<<<< HEAD // EnterIdent is called when production Ident is entered. func (s *BaseCELListener) EnterIdent(ctx *IdentContext) {} @@ -109,6 +114,13 @@ func (s *BaseCELListener) EnterGlobalCall(ctx *GlobalCallContext) {} // ExitGlobalCall is called when production GlobalCall is exited. func (s *BaseCELListener) ExitGlobalCall(ctx *GlobalCallContext) {} +======= +// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered. +func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} + +// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited. +func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterNested is called when production Nested is entered. func (s *BaseCELListener) EnterNested(ctx *NestedContext) {} @@ -170,6 +182,7 @@ func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext // ExitMapInitializerList is called when production mapInitializerList is exited. func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {} +<<<<<<< HEAD // EnterSimpleIdentifier is called when production SimpleIdentifier is entered. func (s *BaseCELListener) EnterSimpleIdentifier(ctx *SimpleIdentifierContext) {} @@ -182,6 +195,8 @@ func (s *BaseCELListener) EnterEscapedIdentifier(ctx *EscapedIdentifierContext) // ExitEscapedIdentifier is called when production EscapedIdentifier is exited. func (s *BaseCELListener) ExitEscapedIdentifier(ctx *EscapedIdentifierContext) {} +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterOptExpr is called when production optExpr is entered. func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go index 8a12cb65e3..2b156faf16 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go @@ -1,8 +1,16 @@ +<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +======= +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BaseCELVisitor struct { *antlr.BaseParseTreeVisitor } @@ -59,11 +67,15 @@ func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} { return v.VisitChildren(ctx) } +<<<<<<< HEAD func (v *BaseCELVisitor) VisitIdent(ctx *IdentContext) interface{} { return v.VisitChildren(ctx) } func (v *BaseCELVisitor) VisitGlobalCall(ctx *GlobalCallContext) interface{} { +======= +func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v.VisitChildren(ctx) } @@ -107,6 +119,7 @@ func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) return v.VisitChildren(ctx) } +<<<<<<< HEAD func (v *BaseCELVisitor) VisitSimpleIdentifier(ctx *SimpleIdentifierContext) interface{} { return v.VisitChildren(ctx) } @@ -115,6 +128,8 @@ func (v *BaseCELVisitor) VisitEscapedIdentifier(ctx *EscapedIdentifierContext) i return v.VisitChildren(ctx) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} { return v.VisitChildren(ctx) } diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go index 896562f5fb..dec4b1e458 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go @@ -1,3 +1,4 @@ +<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen @@ -9,19 +10,39 @@ import ( "unicode" ) +======= +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen +import ( + "fmt" + "sync" + "unicode" + "github.com/antlr4-go/antlr/v4" +) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Suppress unused import error var _ = fmt.Printf var _ = sync.Once{} var _ = unicode.IsLetter +<<<<<<< HEAD type CELLexer struct { *antlr.BaseLexer channelNames []string modeNames []string +======= + +type CELLexer struct { + *antlr.BaseLexer + channelNames []string + modeNames []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO: EOF string } var CELLexerLexerStaticData struct { +<<<<<<< HEAD once sync.Once serializedATN []int32 ChannelNames []string @@ -280,6 +301,260 @@ func cellexerLexerInit() { for index, state := range atn.DecisionToState { decisionToDFA[index] = antlr.NewDFA(state, index) } +======= + once sync.Once + serializedATN []int32 + ChannelNames []string + ModeNames []string + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func cellexerLexerInit() { + staticData := &CELLexerLexerStaticData + staticData.ChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.ModeNames = []string{ + "DEFAULT_MODE", + } + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", + "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", + "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", + "BYTES", "IDENTIFIER", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, + 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, + 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, + 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, + 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, + 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, + 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, + 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, + 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, + 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, + 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, + 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, + 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, + 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, + 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, + 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, + 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, + 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, + 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, + 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, + 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, + 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, + 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, + 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, + 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, + 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, + 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, + 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, + 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, + 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, + 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, + 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, + 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, + 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, + 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, + 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, + 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, + 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, + 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, + 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, + 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, + 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, + 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, + 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, + 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, + 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, + 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, + 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, + 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, + 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, + 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, + 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, + 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, + 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, + 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, + 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, + 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, + 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, + 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, + 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, + 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, + 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, + 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, + 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, + 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, + 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, + 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, + 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, + 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, + 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, + 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, + 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, + 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, + 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, + 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, + 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, + 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, + 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, + 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, + 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, + 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, + 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, + 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, + 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, + 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, + 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, + 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, + 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, + 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, + 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, + 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, + 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, + 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, + 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, + 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, + 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, + 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, + 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, + 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, + 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, + 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, + 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, + 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, + 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, + 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, + 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, + 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, + 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, + 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, + 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, + 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, + 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, + 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, + 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, + 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, + 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, + 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, + 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, + 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, + 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, + 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, + 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, + 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, + 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, + 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, + 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, + 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, + 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, + 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, + 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, + 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, + 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, + 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, + 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, + 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, + 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, + 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, + 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, + 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, + 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, + 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, + 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, + 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, + 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, + 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, + 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, + 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, + 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, + 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, + 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, + 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, + 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, + 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, + 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, + 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, + 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, + 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, + 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, + 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, + 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, + 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, + 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, + 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, + 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, + 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, + 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, + 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, + 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, + 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, + 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, + 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, + 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, + 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, + 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, + 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, + 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, + 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, + 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, + 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, + 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, + 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, + 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, + 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, + 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, + 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, + 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, + 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, + 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, + 413, 418, 420, 1, 0, 1, 0, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CELLexerInit initializes any static state used to implement CELLexer. By default the @@ -287,16 +562,28 @@ func cellexerLexerInit() { // NewCELLexer(). You can call this function if you wish to initialize the static state ahead // of time. func CELLexerInit() { +<<<<<<< HEAD staticData := &CELLexerLexerStaticData staticData.once.Do(cellexerLexerInit) +======= + staticData := &CELLexerLexerStaticData + staticData.once.Do(cellexerLexerInit) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewCELLexer produces a new lexer instance for the optional input antlr.CharStream. func NewCELLexer(input antlr.CharStream) *CELLexer { +<<<<<<< HEAD CELLexerInit() l := new(CELLexer) l.BaseLexer = antlr.NewBaseLexer(input) staticData := &CELLexerLexerStaticData +======= + CELLexerInit() + l := new(CELLexer) + l.BaseLexer = antlr.NewBaseLexer(input) + staticData := &CELLexerLexerStaticData +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) l.channelNames = staticData.ChannelNames l.modeNames = staticData.ModeNames @@ -311,6 +598,7 @@ func NewCELLexer(input antlr.CharStream) *CELLexer { // CELLexer tokens. const ( +<<<<<<< HEAD CELLexerEQUALS = 1 CELLexerNOT_EQUALS = 2 CELLexerIN = 3 @@ -349,3 +637,43 @@ const ( CELLexerIDENTIFIER = 36 CELLexerESC_IDENTIFIER = 37 ) +======= + CELLexerEQUALS = 1 + CELLexerNOT_EQUALS = 2 + CELLexerIN = 3 + CELLexerLESS = 4 + CELLexerLESS_EQUALS = 5 + CELLexerGREATER_EQUALS = 6 + CELLexerGREATER = 7 + CELLexerLOGICAL_AND = 8 + CELLexerLOGICAL_OR = 9 + CELLexerLBRACKET = 10 + CELLexerRPRACKET = 11 + CELLexerLBRACE = 12 + CELLexerRBRACE = 13 + CELLexerLPAREN = 14 + CELLexerRPAREN = 15 + CELLexerDOT = 16 + CELLexerCOMMA = 17 + CELLexerMINUS = 18 + CELLexerEXCLAM = 19 + CELLexerQUESTIONMARK = 20 + CELLexerCOLON = 21 + CELLexerPLUS = 22 + CELLexerSTAR = 23 + CELLexerSLASH = 24 + CELLexerPERCENT = 25 + CELLexerCEL_TRUE = 26 + CELLexerCEL_FALSE = 27 + CELLexerNUL = 28 + CELLexerWHITESPACE = 29 + CELLexerCOMMENT = 30 + CELLexerNUM_FLOAT = 31 + CELLexerNUM_INT = 32 + CELLexerNUM_UINT = 33 + CELLexerSTRING = 34 + CELLexerBYTES = 35 + CELLexerIDENTIFIER = 36 +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go index da477c4b7f..3b81db4edf 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go @@ -1,8 +1,16 @@ +<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +======= +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CELListener is a complete listener for a parse tree produced by CELParser. type CELListener interface { antlr.ParseTreeListener @@ -46,11 +54,16 @@ type CELListener interface { // EnterIndex is called when entering the Index production. EnterIndex(c *IndexContext) +<<<<<<< HEAD // EnterIdent is called when entering the Ident production. EnterIdent(c *IdentContext) // EnterGlobalCall is called when entering the GlobalCall production. EnterGlobalCall(c *GlobalCallContext) +======= + // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production. + EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterNested is called when entering the Nested production. EnterNested(c *NestedContext) @@ -82,12 +95,15 @@ type CELListener interface { // EnterMapInitializerList is called when entering the mapInitializerList production. EnterMapInitializerList(c *MapInitializerListContext) +<<<<<<< HEAD // EnterSimpleIdentifier is called when entering the SimpleIdentifier production. EnterSimpleIdentifier(c *SimpleIdentifierContext) // EnterEscapedIdentifier is called when entering the EscapedIdentifier production. EnterEscapedIdentifier(c *EscapedIdentifierContext) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterOptExpr is called when entering the optExpr production. EnterOptExpr(c *OptExprContext) @@ -154,11 +170,16 @@ type CELListener interface { // ExitIndex is called when exiting the Index production. ExitIndex(c *IndexContext) +<<<<<<< HEAD // ExitIdent is called when exiting the Ident production. ExitIdent(c *IdentContext) // ExitGlobalCall is called when exiting the GlobalCall production. ExitGlobalCall(c *GlobalCallContext) +======= + // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production. + ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ExitNested is called when exiting the Nested production. ExitNested(c *NestedContext) @@ -190,12 +211,15 @@ type CELListener interface { // ExitMapInitializerList is called when exiting the mapInitializerList production. ExitMapInitializerList(c *MapInitializerListContext) +<<<<<<< HEAD // ExitSimpleIdentifier is called when exiting the SimpleIdentifier production. ExitSimpleIdentifier(c *SimpleIdentifierContext) // ExitEscapedIdentifier is called when exiting the EscapedIdentifier production. ExitEscapedIdentifier(c *EscapedIdentifierContext) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ExitOptExpr is called when exiting the optExpr production. ExitOptExpr(c *OptExprContext) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go index 38693df58d..310f1a6c53 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go @@ -1,10 +1,18 @@ +<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +======= +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import ( "fmt" "strconv" +<<<<<<< HEAD "sync" +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/antlr4-go/antlr/v4" ) @@ -14,11 +22,16 @@ var _ = fmt.Printf var _ = strconv.Itoa var _ = sync.Once{} +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CELParser struct { *antlr.BaseParser } var CELParserStaticData struct { +<<<<<<< HEAD once sync.Once serializedATN []int32 LiteralNames []string @@ -178,6 +191,163 @@ func celParserInit() { for index, state := range atn.DecisionToState { decisionToDFA[index] = antlr.NewDFA(state, index) } +======= + once sync.Once + serializedATN []int32 + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func celParserInit() { + staticData := &CELParserStaticData + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", + "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", + "optField", "mapInitializerList", "optExpr", "literal", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, + 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, + 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, + 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, + 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, + 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, + 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, + 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, + 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, + 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, + 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, + 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, + 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, + 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, + 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, + 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, + 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, + 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, + 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, + 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, + 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, + 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, + 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, + 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, + 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, + 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, + 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, + 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, + 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, + 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, + 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, + 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, + 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, + 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, + 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, + 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, + 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, + 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, + 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, + 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, + 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, + 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, + 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, + 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, + 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, + 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, + 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, + 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, + 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, + 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, + 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, + 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, + 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, + 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, + 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, + 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, + 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, + 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, + 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, + 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, + 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, + 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, + 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, + 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, + 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, + 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, + 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, + 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, + 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, + 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, + 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, + 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, + 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, + 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, + 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, + 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, + 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, + 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, + 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, + 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, + 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, + 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, + 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, + 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, + 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, + 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, + 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, + 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, + 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, + 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, + 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, + 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, + 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, + 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, + 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, + 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, + 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, + 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, + 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, + 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, + 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, + 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, + 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, + 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, + 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, + 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, + 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, + 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, + 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, + 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, + 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, + 240, 248, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CELParserInit initializes any static state used to implement CELParser. By default the @@ -185,8 +355,13 @@ func celParserInit() { // NewCELParser(). You can call this function if you wish to initialize the static state ahead // of time. func CELParserInit() { +<<<<<<< HEAD staticData := &CELParserStaticData staticData.once.Do(celParserInit) +======= + staticData := &CELParserStaticData + staticData.once.Do(celParserInit) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewCELParser produces a new parser instance for the optional input antlr.TokenStream. @@ -194,7 +369,11 @@ func NewCELParser(input antlr.TokenStream) *CELParser { CELParserInit() this := new(CELParser) this.BaseParser = antlr.NewBaseParser(input) +<<<<<<< HEAD staticData := &CELParserStaticData +======= + staticData := &CELParserStaticData +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) this.RuleNames = staticData.RuleNames this.LiteralNames = staticData.LiteralNames @@ -204,6 +383,7 @@ func NewCELParser(input antlr.TokenStream) *CELParser { return this } +<<<<<<< HEAD // CELParser tokens. const ( CELParserEOF = antlr.TokenEOF @@ -244,10 +424,53 @@ const ( CELParserBYTES = 35 CELParserIDENTIFIER = 36 CELParserESC_IDENTIFIER = 37 +======= + +// CELParser tokens. +const ( + CELParserEOF = antlr.TokenEOF + CELParserEQUALS = 1 + CELParserNOT_EQUALS = 2 + CELParserIN = 3 + CELParserLESS = 4 + CELParserLESS_EQUALS = 5 + CELParserGREATER_EQUALS = 6 + CELParserGREATER = 7 + CELParserLOGICAL_AND = 8 + CELParserLOGICAL_OR = 9 + CELParserLBRACKET = 10 + CELParserRPRACKET = 11 + CELParserLBRACE = 12 + CELParserRBRACE = 13 + CELParserLPAREN = 14 + CELParserRPAREN = 15 + CELParserDOT = 16 + CELParserCOMMA = 17 + CELParserMINUS = 18 + CELParserEXCLAM = 19 + CELParserQUESTIONMARK = 20 + CELParserCOLON = 21 + CELParserPLUS = 22 + CELParserSTAR = 23 + CELParserSLASH = 24 + CELParserPERCENT = 25 + CELParserCEL_TRUE = 26 + CELParserCEL_FALSE = 27 + CELParserNUL = 28 + CELParserWHITESPACE = 29 + CELParserCOMMENT = 30 + CELParserNUM_FLOAT = 31 + CELParserNUM_INT = 32 + CELParserNUM_UINT = 33 + CELParserSTRING = 34 + CELParserBYTES = 35 + CELParserIDENTIFIER = 36 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // CELParser rules. const ( +<<<<<<< HEAD CELParserRULE_start = 0 CELParserRULE_expr = 1 CELParserRULE_conditionalOr = 2 @@ -265,6 +488,24 @@ const ( CELParserRULE_escapeIdent = 14 CELParserRULE_optExpr = 15 CELParserRULE_literal = 16 +======= + CELParserRULE_start = 0 + CELParserRULE_expr = 1 + CELParserRULE_conditionalOr = 2 + CELParserRULE_conditionalAnd = 3 + CELParserRULE_relation = 4 + CELParserRULE_calc = 5 + CELParserRULE_unary = 6 + CELParserRULE_member = 7 + CELParserRULE_primary = 8 + CELParserRULE_exprList = 9 + CELParserRULE_listInit = 10 + CELParserRULE_fieldInitializerList = 11 + CELParserRULE_optField = 12 + CELParserRULE_mapInitializerList = 13 + CELParserRULE_optExpr = 14 + CELParserRULE_literal = 15 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // IStartContext is an interface to support dynamic dispatch. @@ -277,9 +518,17 @@ type IStartContext interface { // GetE returns the e rule contexts. GetE() IExprContext +<<<<<<< HEAD + // SetE sets the e rule contexts. + SetE(IExprContext) + +======= + // SetE sets the e rule contexts. SetE(IExprContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures EOF() antlr.TerminalNode Expr() IExprContext @@ -291,7 +540,11 @@ type IStartContext interface { type StartContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD e IExprContext +======= + e IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyStartContext() *StartContext { @@ -301,7 +554,11 @@ func NewEmptyStartContext() *StartContext { return p } +<<<<<<< HEAD func InitEmptyStartContext(p *StartContext) { +======= +func InitEmptyStartContext(p *StartContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_start } @@ -323,17 +580,31 @@ func (s *StartContext) GetParser() antlr.Parser { return s.parser } func (s *StartContext) GetE() IExprContext { return s.e } +<<<<<<< HEAD +func (s *StartContext) SetE(v IExprContext) { s.e = v } + +======= + func (s *StartContext) SetE(v IExprContext) { s.e = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StartContext) EOF() antlr.TerminalNode { return s.GetToken(CELParserEOF, 0) } func (s *StartContext) Expr() IExprContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -353,6 +624,10 @@ func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterStart(s) @@ -375,11 +650,18 @@ func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Start_() (localctx IStartContext) { localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 0, CELParserRULE_start) p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(34) var _x = p.Expr() @@ -395,6 +677,26 @@ func (p *CELParser) Start_() (localctx IStartContext) { } } +======= + p.SetState(32) + + var _x = p.Expr() + + + localctx.(*StartContext).e = _x + } + { + p.SetState(33) + p.Match(CELParserEOF) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -408,6 +710,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IExprContext is an interface to support dynamic dispatch. type IExprContext interface { antlr.ParserRuleContext @@ -416,10 +722,19 @@ type IExprContext interface { GetParser() antlr.Parser // GetOp returns the op token. +<<<<<<< HEAD GetOp() antlr.Token // SetOp sets the op token. SetOp(antlr.Token) +======= + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IConditionalOrContext @@ -430,6 +745,10 @@ type IExprContext interface { // GetE2 returns the e2 rule contexts. GetE2() IExprContext +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetE sets the e rule contexts. SetE(IConditionalOrContext) @@ -439,6 +758,10 @@ type IExprContext interface { // SetE2 sets the e2 rule contexts. SetE2(IExprContext) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllConditionalOr() []IConditionalOrContext ConditionalOr(i int) IConditionalOrContext @@ -453,10 +776,17 @@ type IExprContext interface { type ExprContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD e IConditionalOrContext op antlr.Token e1 IConditionalOrContext e2 IExprContext +======= + e IConditionalOrContext + op antlr.Token + e1 IConditionalOrContext + e2 IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyExprContext() *ExprContext { @@ -466,7 +796,11 @@ func NewEmptyExprContext() *ExprContext { return p } +<<<<<<< HEAD func InitEmptyExprContext(p *ExprContext) { +======= +func InitEmptyExprContext(p *ExprContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_expr } @@ -488,20 +822,35 @@ func (s *ExprContext) GetParser() antlr.Parser { return s.parser } func (s *ExprContext) GetOp() antlr.Token { return s.op } +<<<<<<< HEAD +func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } + +======= + func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) GetE() IConditionalOrContext { return s.e } func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } func (s *ExprContext) GetE2() IExprContext { return s.e2 } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { children := s.GetChildren() len := 0 @@ -524,12 +873,20 @@ func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { } func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalOrContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -552,10 +909,17 @@ func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode { } func (s *ExprContext) Expr() IExprContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -575,6 +939,10 @@ func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExpr(s) @@ -597,6 +965,12 @@ func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Expr() (localctx IExprContext) { localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 2, CELParserRULE_expr) @@ -604,6 +978,7 @@ func (p *CELParser) Expr() (localctx IExprContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(37) var _x = p.ConditionalOr() @@ -611,27 +986,66 @@ func (p *CELParser) Expr() (localctx IExprContext) { localctx.(*ExprContext).e = _x } p.SetState(43) +======= + p.SetState(35) + + var _x = p.ConditionalOr() + + + localctx.(*ExprContext).e = _x + } + p.SetState(41) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(38) +======= + + if _la == CELParserQUESTIONMARK { + { + p.SetState(36) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*ExprContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(37) + + var _x = p.ConditionalOr() + + + localctx.(*ExprContext).e1 = _x + } + { + p.SetState(38) + p.Match(CELParserCOLON) + if p.HasError() { + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } { p.SetState(39) +<<<<<<< HEAD var _x = p.ConditionalOr() localctx.(*ExprContext).e1 = _x @@ -649,11 +1063,21 @@ func (p *CELParser) Expr() (localctx IExprContext) { var _x = p.Expr() +======= + var _x = p.Expr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ExprContext).e2 = _x } } +<<<<<<< HEAD +======= + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -667,6 +1091,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IConditionalOrContext is an interface to support dynamic dispatch. type IConditionalOrContext interface { antlr.ParserRuleContext @@ -675,34 +1103,66 @@ type IConditionalOrContext interface { GetParser() antlr.Parser // GetS9 returns the s9 token. +<<<<<<< HEAD GetS9() antlr.Token // SetS9 sets the s9 token. SetS9(antlr.Token) +======= + GetS9() antlr.Token + + + // SetS9 sets the s9 token. + SetS9(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetOps returns the ops token list. GetOps() []antlr.Token +<<<<<<< HEAD // SetOps sets the ops token list. SetOps([]antlr.Token) +======= + + // SetOps sets the ops token list. + SetOps([]antlr.Token) + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IConditionalAndContext // Get_conditionalAnd returns the _conditionalAnd rule contexts. Get_conditionalAnd() IConditionalAndContext +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetE sets the e rule contexts. SetE(IConditionalAndContext) // Set_conditionalAnd sets the _conditionalAnd rule contexts. Set_conditionalAnd(IConditionalAndContext) +<<<<<<< HEAD // GetE1 returns the e1 rule context list. GetE1() []IConditionalAndContext // SetE1 sets the e1 rule context list. SetE1([]IConditionalAndContext) +======= + + // GetE1 returns the e1 rule context list. + GetE1() []IConditionalAndContext + + + // SetE1 sets the e1 rule context list. + SetE1([]IConditionalAndContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllConditionalAnd() []IConditionalAndContext @@ -716,12 +1176,21 @@ type IConditionalOrContext interface { type ConditionalOrContext struct { antlr.BaseParserRuleContext +<<<<<<< HEAD parser antlr.Parser e IConditionalAndContext s9 antlr.Token ops []antlr.Token _conditionalAnd IConditionalAndContext e1 []IConditionalAndContext +======= + parser antlr.Parser + e IConditionalAndContext + s9 antlr.Token + ops []antlr.Token + _conditionalAnd IConditionalAndContext + e1 []IConditionalAndContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyConditionalOrContext() *ConditionalOrContext { @@ -731,7 +1200,11 @@ func NewEmptyConditionalOrContext() *ConditionalOrContext { return p } +<<<<<<< HEAD func InitEmptyConditionalOrContext(p *ConditionalOrContext) { +======= +func InitEmptyConditionalOrContext(p *ConditionalOrContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalOr } @@ -753,24 +1226,51 @@ func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } +<<<<<<< HEAD +func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } + +func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } + +func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } + +======= + func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } + func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } + func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } +<<<<<<< HEAD +func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } + +func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } + +======= + func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } + func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { children := s.GetChildren() len := 0 @@ -793,12 +1293,20 @@ func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { } func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalAndContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -828,6 +1336,10 @@ func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Reco return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalOr(s) @@ -850,6 +1362,12 @@ func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) @@ -857,6 +1375,7 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(45) var _x = p.ConditionalAnd() @@ -864,34 +1383,65 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { localctx.(*ConditionalOrContext).e = _x } p.SetState(50) +======= + p.SetState(43) + + var _x = p.ConditionalAnd() + + + localctx.(*ConditionalOrContext).e = _x + } + p.SetState(48) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD for _la == CELParserLOGICAL_OR { { p.SetState(46) +======= + + for _la == CELParserLOGICAL_OR { + { + p.SetState(44) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLOGICAL_OR) localctx.(*ConditionalOrContext).s9 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) { +<<<<<<< HEAD p.SetState(47) var _x = p.ConditionalAnd() +======= + p.SetState(45) + + var _x = p.ConditionalAnd() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ConditionalOrContext)._conditionalAnd = _x } localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) +<<<<<<< HEAD p.SetState(52) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -900,6 +1450,19 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { _la = p.GetTokenStream().LA(1) } +======= + + p.SetState(50) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -913,6 +1476,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IConditionalAndContext is an interface to support dynamic dispatch. type IConditionalAndContext interface { antlr.ParserRuleContext @@ -921,34 +1488,66 @@ type IConditionalAndContext interface { GetParser() antlr.Parser // GetS8 returns the s8 token. +<<<<<<< HEAD GetS8() antlr.Token // SetS8 sets the s8 token. SetS8(antlr.Token) +======= + GetS8() antlr.Token + + + // SetS8 sets the s8 token. + SetS8(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetOps returns the ops token list. GetOps() []antlr.Token +<<<<<<< HEAD + // SetOps sets the ops token list. + SetOps([]antlr.Token) + +======= + // SetOps sets the ops token list. SetOps([]antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IRelationContext // Get_relation returns the _relation rule contexts. Get_relation() IRelationContext +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetE sets the e rule contexts. SetE(IRelationContext) // Set_relation sets the _relation rule contexts. Set_relation(IRelationContext) +<<<<<<< HEAD // GetE1 returns the e1 rule context list. GetE1() []IRelationContext // SetE1 sets the e1 rule context list. SetE1([]IRelationContext) +======= + + // GetE1 returns the e1 rule context list. + GetE1() []IRelationContext + + + // SetE1 sets the e1 rule context list. + SetE1([]IRelationContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllRelation() []IRelationContext @@ -962,12 +1561,21 @@ type IConditionalAndContext interface { type ConditionalAndContext struct { antlr.BaseParserRuleContext +<<<<<<< HEAD parser antlr.Parser e IRelationContext s8 antlr.Token ops []antlr.Token _relation IRelationContext e1 []IRelationContext +======= + parser antlr.Parser + e IRelationContext + s8 antlr.Token + ops []antlr.Token + _relation IRelationContext + e1 []IRelationContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyConditionalAndContext() *ConditionalAndContext { @@ -977,7 +1585,11 @@ func NewEmptyConditionalAndContext() *ConditionalAndContext { return p } +<<<<<<< HEAD func InitEmptyConditionalAndContext(p *ConditionalAndContext) { +======= +func InitEmptyConditionalAndContext(p *ConditionalAndContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalAnd } @@ -999,24 +1611,51 @@ func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } +<<<<<<< HEAD func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } +======= + +func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } + + +func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } + + +func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } +<<<<<<< HEAD +func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } + +func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } + +======= + func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } + func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) AllRelation() []IRelationContext { children := s.GetChildren() len := 0 @@ -1039,12 +1678,20 @@ func (s *ConditionalAndContext) AllRelation() []IRelationContext { } func (s *ConditionalAndContext) Relation(i int) IRelationContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -1074,6 +1721,10 @@ func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Rec return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalAnd(s) @@ -1096,6 +1747,12 @@ func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) @@ -1103,34 +1760,58 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(53) +======= + p.SetState(51) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _x = p.relation(0) localctx.(*ConditionalAndContext).e = _x } +<<<<<<< HEAD p.SetState(58) +======= + p.SetState(56) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD for _la == CELParserLOGICAL_AND { { p.SetState(54) +======= + + for _la == CELParserLOGICAL_AND { + { + p.SetState(52) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLOGICAL_AND) localctx.(*ConditionalAndContext).s8 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) { +<<<<<<< HEAD p.SetState(55) +======= + p.SetState(53) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _x = p.relation(0) @@ -1138,6 +1819,7 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { } localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) +<<<<<<< HEAD p.SetState(60) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -1146,6 +1828,19 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { _la = p.GetTokenStream().LA(1) } +======= + + p.SetState(58) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -1159,6 +1854,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IRelationContext is an interface to support dynamic dispatch. type IRelationContext interface { antlr.ParserRuleContext @@ -1167,10 +1866,19 @@ type IRelationContext interface { GetParser() antlr.Parser // GetOp returns the op token. +<<<<<<< HEAD GetOp() antlr.Token // SetOp sets the op token. SetOp(antlr.Token) +======= + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures Calc() ICalcContext @@ -1191,7 +1899,11 @@ type IRelationContext interface { type RelationContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD op antlr.Token +======= + op antlr.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyRelationContext() *RelationContext { @@ -1201,7 +1913,11 @@ func NewEmptyRelationContext() *RelationContext { return p } +<<<<<<< HEAD func InitEmptyRelationContext(p *RelationContext) { +======= +func InitEmptyRelationContext(p *RelationContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_relation } @@ -1223,6 +1939,7 @@ func (s *RelationContext) GetParser() antlr.Parser { return s.parser } func (s *RelationContext) GetOp() antlr.Token { return s.op } +<<<<<<< HEAD func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } func (s *RelationContext) Calc() ICalcContext { @@ -1230,6 +1947,17 @@ func (s *RelationContext) Calc() ICalcContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { t = ctx.(antlr.RuleContext) +======= + +func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *RelationContext) Calc() ICalcContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICalcContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -1263,12 +1991,20 @@ func (s *RelationContext) AllRelation() []IRelationContext { } func (s *RelationContext) Relation(i int) IRelationContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -1318,6 +2054,10 @@ func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterRelation(s) @@ -1340,6 +2080,13 @@ func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Relation() (localctx IRelationContext) { return p.relation(0) } @@ -1359,12 +2106,20 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(62) +======= + p.SetState(60) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.calc(0) } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) +<<<<<<< HEAD p.SetState(69) +======= + p.SetState(67) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -1381,14 +2136,22 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { _prevctx = localctx localctx = NewRelationContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation) +<<<<<<< HEAD p.SetState(64) +======= + p.SetState(62) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 1)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) goto errorExit } { +<<<<<<< HEAD p.SetState(65) +======= + p.SetState(63) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _lt = p.GetTokenStream().LT(1) @@ -1396,7 +2159,11 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&254) != 0) { +======= + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*RelationContext).op = _ri @@ -1406,6 +2173,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { } } { +<<<<<<< HEAD p.SetState(66) p.relation(2) } @@ -1416,13 +2184,32 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { if p.HasError() { goto errorExit } +======= + p.SetState(64) + p.relation(2) + } + + + } + p.SetState(69) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } +<<<<<<< HEAD errorExit: +======= + + + errorExit: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.HasError() { v := p.GetError() localctx.SetException(v) @@ -1435,6 +2222,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ICalcContext is an interface to support dynamic dispatch. type ICalcContext interface { antlr.ParserRuleContext @@ -1443,10 +2234,19 @@ type ICalcContext interface { GetParser() antlr.Parser // GetOp returns the op token. +<<<<<<< HEAD GetOp() antlr.Token // SetOp sets the op token. SetOp(antlr.Token) +======= + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures Unary() IUnaryContext @@ -1465,7 +2265,11 @@ type ICalcContext interface { type CalcContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD op antlr.Token +======= + op antlr.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyCalcContext() *CalcContext { @@ -1475,7 +2279,11 @@ func NewEmptyCalcContext() *CalcContext { return p } +<<<<<<< HEAD func InitEmptyCalcContext(p *CalcContext) { +======= +func InitEmptyCalcContext(p *CalcContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_calc } @@ -1497,6 +2305,7 @@ func (s *CalcContext) GetParser() antlr.Parser { return s.parser } func (s *CalcContext) GetOp() antlr.Token { return s.op } +<<<<<<< HEAD func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } func (s *CalcContext) Unary() IUnaryContext { @@ -1504,6 +2313,17 @@ func (s *CalcContext) Unary() IUnaryContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(IUnaryContext); ok { t = ctx.(antlr.RuleContext) +======= + +func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CalcContext) Unary() IUnaryContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUnaryContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -1537,12 +2357,20 @@ func (s *CalcContext) AllCalc() []ICalcContext { } func (s *CalcContext) Calc(i int) ICalcContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -1584,6 +2412,10 @@ func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCalc(s) @@ -1606,6 +2438,13 @@ func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Calc() (localctx ICalcContext) { return p.calc(0) } @@ -1625,12 +2464,20 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(73) +======= + p.SetState(71) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.Unary() } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) +<<<<<<< HEAD p.SetState(83) +======= + p.SetState(81) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -1645,7 +2492,11 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.TriggerExitRuleEvent() } _prevctx = localctx +<<<<<<< HEAD p.SetState(81) +======= + p.SetState(79) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -1655,14 +2506,22 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { case 1: localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) +<<<<<<< HEAD p.SetState(75) +======= + p.SetState(73) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 2)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) goto errorExit } { +<<<<<<< HEAD p.SetState(76) +======= + p.SetState(74) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _lt = p.GetTokenStream().LT(1) @@ -1670,7 +2529,11 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&58720256) != 0) { +======= + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*CalcContext).op = _ri @@ -1680,6 +2543,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { } } { +<<<<<<< HEAD p.SetState(77) p.calc(3) } @@ -1688,13 +2552,28 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) p.SetState(78) +======= + p.SetState(75) + p.calc(3) + } + + + case 2: + localctx = NewCalcContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) + p.SetState(76) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 1)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) goto errorExit } { +<<<<<<< HEAD p.SetState(79) +======= + p.SetState(77) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _lt = p.GetTokenStream().LT(1) @@ -1712,7 +2591,11 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { } } { +<<<<<<< HEAD p.SetState(80) +======= + p.SetState(78) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.calc(2) } @@ -1721,18 +2604,32 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { } } +<<<<<<< HEAD p.SetState(85) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } +======= + p.SetState(83) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } +<<<<<<< HEAD errorExit: +======= + + + errorExit: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.HasError() { v := p.GetError() localctx.SetException(v) @@ -1745,6 +2642,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IUnaryContext is an interface to support dynamic dispatch. type IUnaryContext interface { antlr.ParserRuleContext @@ -1767,7 +2668,11 @@ func NewEmptyUnaryContext() *UnaryContext { return p } +<<<<<<< HEAD func InitEmptyUnaryContext(p *UnaryContext) { +======= +func InitEmptyUnaryContext(p *UnaryContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_unary } @@ -1799,6 +2704,12 @@ func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type LogicalNotContext struct { UnaryContext s19 antlr.Token @@ -1815,12 +2726,25 @@ func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Log return p } +<<<<<<< HEAD func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } +======= + +func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } + + +func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } + + +func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v } func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { @@ -1828,10 +2752,17 @@ func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { } func (s *LogicalNotContext) Member() IMemberContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -1851,6 +2782,10 @@ func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode { return s.GetToken(CELParserEXCLAM, i) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterLogicalNot(s) @@ -1873,6 +2808,10 @@ func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type MemberExprContext struct { UnaryContext } @@ -1892,10 +2831,17 @@ func (s *MemberExprContext) GetRuleContext() antlr.RuleContext { } func (s *MemberExprContext) Member() IMemberContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -1907,6 +2853,10 @@ func (s *MemberExprContext) Member() IMemberContext { return t.(IMemberContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberExpr(s) @@ -1929,6 +2879,10 @@ func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type NegateContext struct { UnaryContext s18 antlr.Token @@ -1945,12 +2899,25 @@ func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateC return p } +<<<<<<< HEAD +func (s *NegateContext) GetS18() antlr.Token { return s.s18 } + +func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } + +func (s *NegateContext) GetOps() []antlr.Token { return s.ops } + +======= + func (s *NegateContext) GetS18() antlr.Token { return s.s18 } + func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } + func (s *NegateContext) GetOps() []antlr.Token { return s.ops } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v } func (s *NegateContext) GetRuleContext() antlr.RuleContext { @@ -1958,10 +2925,17 @@ func (s *NegateContext) GetRuleContext() antlr.RuleContext { } func (s *NegateContext) Member() IMemberContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -1981,6 +2955,10 @@ func (s *NegateContext) MINUS(i int) antlr.TerminalNode { return s.GetToken(CELParserMINUS, i) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNegate(s) @@ -2003,6 +2981,11 @@ func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 12, CELParserRULE_unary) @@ -2010,7 +2993,11 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { var _alt int +<<<<<<< HEAD p.SetState(99) +======= + p.SetState(97) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2021,6 +3008,7 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewMemberExprContext(p, localctx) p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(86) p.member(0) } @@ -2029,26 +3017,50 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewLogicalNotContext(p, localctx) p.EnterOuterAlt(localctx, 2) p.SetState(88) +======= + p.SetState(84) + p.member(0) + } + + + case 2: + localctx = NewLogicalNotContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + p.SetState(86) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD for ok := true; ok; ok = _la == CELParserEXCLAM { { p.SetState(87) +======= + + for ok := true; ok; ok = _la == CELParserEXCLAM { + { + p.SetState(85) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserEXCLAM) localctx.(*LogicalNotContext).s19 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19) +<<<<<<< HEAD p.SetState(90) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -2065,6 +3077,26 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewNegateContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(94) +======= + + p.SetState(88) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(90) + p.member(0) + } + + + case 3: + localctx = NewNegateContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + p.SetState(92) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2073,6 +3105,7 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { switch _alt { case 1: +<<<<<<< HEAD { p.SetState(93) @@ -2085,13 +3118,34 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { } } localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) +======= + { + p.SetState(91) + + var _m = p.Match(CELParserMINUS) + + localctx.(*NegateContext).s18 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) goto errorExit } +<<<<<<< HEAD p.SetState(96) +======= + p.SetState(94) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext()) if p.HasError() { @@ -2099,7 +3153,11 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { } } { +<<<<<<< HEAD p.SetState(98) +======= + p.SetState(96) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.member(0) } @@ -2107,6 +3165,10 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { goto errorExit } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -2120,6 +3182,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IMemberContext is an interface to support dynamic dispatch. type IMemberContext interface { antlr.ParserRuleContext @@ -2142,7 +3208,11 @@ func NewEmptyMemberContext() *MemberContext { return p } +<<<<<<< HEAD func InitEmptyMemberContext(p *MemberContext) { +======= +func InitEmptyMemberContext(p *MemberContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_member } @@ -2174,12 +3244,25 @@ func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD type MemberCallContext struct { MemberContext op antlr.Token id antlr.Token open antlr.Token args IExprListContext +======= + + + + +type MemberCallContext struct { + MemberContext + op antlr.Token + id antlr.Token + open antlr.Token + args IExprListContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext { @@ -2192,20 +3275,35 @@ func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Mem return p } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) GetOp() antlr.Token { return s.op } func (s *MemberCallContext) GetId() antlr.Token { return s.id } func (s *MemberCallContext) GetOpen() antlr.Token { return s.open } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v } func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v } func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v } +<<<<<<< HEAD +func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } + +======= + func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v } func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { @@ -2213,10 +3311,17 @@ func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { } func (s *MemberCallContext) Member() IMemberContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2245,10 +3350,17 @@ func (s *MemberCallContext) LPAREN() antlr.TerminalNode { } func (s *MemberCallContext) ExprList() IExprListContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprListContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2260,6 +3372,10 @@ func (s *MemberCallContext) ExprList() IExprListContext { return t.(IExprListContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberCall(s) @@ -2282,11 +3398,20 @@ func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD type SelectContext struct { MemberContext op antlr.Token opt antlr.Token id IEscapeIdentContext +======= + +type SelectContext struct { + MemberContext + op antlr.Token + opt antlr.Token + id antlr.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext { @@ -2299,27 +3424,48 @@ func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectC return p } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) GetOp() antlr.Token { return s.op } func (s *SelectContext) GetOpt() antlr.Token { return s.opt } +<<<<<<< HEAD +======= +func (s *SelectContext) GetId() antlr.Token { return s.id } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) SetOp(v antlr.Token) { s.op = v } func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v } +<<<<<<< HEAD func (s *SelectContext) GetId() IEscapeIdentContext { return s.id } func (s *SelectContext) SetId(v IEscapeIdentContext) { s.id = v } +======= +func (s *SelectContext) SetId(v antlr.Token) { s.id = v } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) GetRuleContext() antlr.RuleContext { return s } func (s *SelectContext) Member() IMemberContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2335,6 +3481,7 @@ func (s *SelectContext) DOT() antlr.TerminalNode { return s.GetToken(CELParserDOT, 0) } +<<<<<<< HEAD func (s *SelectContext) EscapeIdent() IEscapeIdentContext { var t antlr.RuleContext for _, ctx := range s.GetChildren() { @@ -2349,12 +3496,20 @@ func (s *SelectContext) EscapeIdent() IEscapeIdentContext { } return t.(IEscapeIdentContext) +======= +func (s *SelectContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterSelect(s) @@ -2377,6 +3532,10 @@ func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type PrimaryExprContext struct { MemberContext } @@ -2396,10 +3555,17 @@ func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext { } func (s *PrimaryExprContext) Primary() IPrimaryContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IPrimaryContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPrimaryContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2411,6 +3577,10 @@ func (s *PrimaryExprContext) Primary() IPrimaryContext { return t.(IPrimaryContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterPrimaryExpr(s) @@ -2433,11 +3603,20 @@ func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } +<<<<<<< HEAD type IndexContext struct { MemberContext op antlr.Token opt antlr.Token index IExprContext +======= + +type IndexContext struct { + MemberContext + op antlr.Token + opt antlr.Token + index IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext { @@ -2450,16 +3629,31 @@ func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexCon return p } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) GetOp() antlr.Token { return s.op } func (s *IndexContext) GetOpt() antlr.Token { return s.opt } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) SetOp(v antlr.Token) { s.op = v } func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v } +<<<<<<< HEAD func (s *IndexContext) GetIndex() IExprContext { return s.index } +======= + +func (s *IndexContext) GetIndex() IExprContext { return s.index } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) SetIndex(v IExprContext) { s.index = v } func (s *IndexContext) GetRuleContext() antlr.RuleContext { @@ -2467,10 +3661,17 @@ func (s *IndexContext) GetRuleContext() antlr.RuleContext { } func (s *IndexContext) Member() IMemberContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2491,10 +3692,17 @@ func (s *IndexContext) LBRACKET() antlr.TerminalNode { } func (s *IndexContext) Expr() IExprContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2510,6 +3718,10 @@ func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterIndex(s) @@ -2532,6 +3744,11 @@ func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Member() (localctx IMemberContext) { return p.member(0) } @@ -2555,12 +3772,20 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { _prevctx = localctx { +<<<<<<< HEAD p.SetState(102) +======= + p.SetState(100) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.Primary() } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) +<<<<<<< HEAD p.SetState(128) +======= + p.SetState(126) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2575,7 +3800,11 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { p.TriggerExitRuleEvent() } _prevctx = localctx +<<<<<<< HEAD p.SetState(126) +======= + p.SetState(124) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2585,45 +3814,74 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { case 1: localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) +<<<<<<< HEAD p.SetState(104) +======= + p.SetState(102) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 3)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) goto errorExit } { +<<<<<<< HEAD p.SetState(105) +======= + p.SetState(103) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserDOT) localctx.(*SelectContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(107) +======= + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(105) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(106) +======= + + if _la == CELParserQUESTIONMARK { + { + p.SetState(104) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*SelectContext).opt = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(109) var _x = p.EscapeIdent() @@ -2635,62 +3893,122 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(110) +======= + p.SetState(107) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*SelectContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 2: + localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(108) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 2)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) goto errorExit } { +<<<<<<< HEAD p.SetState(111) +======= + p.SetState(109) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserDOT) localctx.(*MemberCallContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } { p.SetState(112) +======= + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(110) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserIDENTIFIER) localctx.(*MemberCallContext).id = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } { p.SetState(113) +======= + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(111) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLPAREN) localctx.(*MemberCallContext).open = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(115) +======= + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(113) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 { { p.SetState(114) var _x = p.ExprList() +======= + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { + { + p.SetState(112) + + var _x = p.ExprList() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MemberCallContext).args = _x } } { +<<<<<<< HEAD p.SetState(117) p.Match(CELParserRPAREN) if p.HasError() { @@ -2703,44 +4021,84 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(118) +======= + p.SetState(115) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(116) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 1)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) goto errorExit } { +<<<<<<< HEAD p.SetState(119) +======= + p.SetState(117) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACKET) localctx.(*IndexContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(121) +======= + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(119) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(120) +======= + + if _la == CELParserQUESTIONMARK { + { + p.SetState(118) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*IndexContext).opt = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(123) var _x = p.Expr() @@ -2753,6 +4111,21 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { if p.HasError() { // Recognition error - abort rule goto errorExit +======= + p.SetState(121) + + var _x = p.Expr() + + + localctx.(*IndexContext).index = _x + } + { + p.SetState(122) + p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -2761,18 +4134,32 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { } } +<<<<<<< HEAD p.SetState(130) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } +======= + p.SetState(128) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } +<<<<<<< HEAD errorExit: +======= + + + errorExit: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.HasError() { v := p.GetError() localctx.SetException(v) @@ -2785,6 +4172,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IPrimaryContext is an interface to support dynamic dispatch. type IPrimaryContext interface { antlr.ParserRuleContext @@ -2807,7 +4198,11 @@ func NewEmptyPrimaryContext() *PrimaryContext { return p } +<<<<<<< HEAD func InitEmptyPrimaryContext(p *PrimaryContext) { +======= +func InitEmptyPrimaryContext(p *PrimaryContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_primary } @@ -2839,10 +4234,20 @@ func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD type CreateListContext struct { PrimaryContext op antlr.Token elems IListInitContext +======= + + + +type CreateListContext struct { + PrimaryContext + op antlr.Token + elems IListInitContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext { @@ -2855,12 +4260,25 @@ func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Cre return p } +<<<<<<< HEAD +func (s *CreateListContext) GetOp() antlr.Token { return s.op } + +func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } + +func (s *CreateListContext) GetElems() IListInitContext { return s.elems } + +======= + func (s *CreateListContext) GetOp() antlr.Token { return s.op } + func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateListContext) GetElems() IListInitContext { return s.elems } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v } func (s *CreateListContext) GetRuleContext() antlr.RuleContext { @@ -2880,10 +4298,17 @@ func (s *CreateListContext) COMMA() antlr.TerminalNode { } func (s *CreateListContext) ListInit() IListInitContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IListInitContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IListInitContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2895,6 +4320,10 @@ func (s *CreateListContext) ListInit() IListInitContext { return t.(IListInitContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateList(s) @@ -2917,6 +4346,7 @@ func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD type IdentContext struct { PrimaryContext leadingDot antlr.Token @@ -2979,6 +4409,13 @@ type CreateStructContext struct { PrimaryContext op antlr.Token entries IMapInitializerListContext +======= + +type CreateStructContext struct { + PrimaryContext + op antlr.Token + entries IMapInitializerListContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext { @@ -2991,12 +4428,25 @@ func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *C return p } +<<<<<<< HEAD +func (s *CreateStructContext) GetOp() antlr.Token { return s.op } + +func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } + +func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } + +======= + func (s *CreateStructContext) GetOp() antlr.Token { return s.op } + func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v } func (s *CreateStructContext) GetRuleContext() antlr.RuleContext { @@ -3016,10 +4466,17 @@ func (s *CreateStructContext) COMMA() antlr.TerminalNode { } func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMapInitializerListContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMapInitializerListContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3031,6 +4488,10 @@ func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { return t.(IMapInitializerListContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateStruct(s) @@ -3053,6 +4514,10 @@ func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ConstantLiteralContext struct { PrimaryContext } @@ -3072,10 +4537,17 @@ func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext { } func (s *ConstantLiteralContext) Literal() ILiteralContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(ILiteralContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILiteralContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3087,6 +4559,10 @@ func (s *ConstantLiteralContext) Literal() ILiteralContext { return t.(ILiteralContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConstantLiteral(s) @@ -3109,12 +4585,19 @@ func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interfac } } +<<<<<<< HEAD type NestedContext struct { PrimaryContext e IExprContext -} +======= -func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { +type NestedContext struct { + PrimaryContext + e IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +} + +func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { var p = new(NestedContext) InitEmptyPrimaryContext(&p.PrimaryContext) @@ -3124,8 +4607,15 @@ func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedC return p } +<<<<<<< HEAD func (s *NestedContext) GetE() IExprContext { return s.e } +======= + +func (s *NestedContext) GetE() IExprContext { return s.e } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NestedContext) SetE(v IExprContext) { s.e = v } func (s *NestedContext) GetRuleContext() antlr.RuleContext { @@ -3141,10 +4631,17 @@ func (s *NestedContext) RPAREN() antlr.TerminalNode { } func (s *NestedContext) Expr() IExprContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3156,6 +4653,10 @@ func (s *NestedContext) Expr() IExprContext { return t.(IExprContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNested(s) @@ -3178,6 +4679,7 @@ func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD type CreateMessageContext struct { PrimaryContext leadingDot antlr.Token @@ -3187,6 +4689,18 @@ type CreateMessageContext struct { ops []antlr.Token op antlr.Token entries IFieldInitializerListContext +======= + +type CreateMessageContext struct { + PrimaryContext + leadingDot antlr.Token + _IDENTIFIER antlr.Token + ids []antlr.Token + s16 antlr.Token + ops []antlr.Token + op antlr.Token + entries IFieldInitializerListContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext { @@ -3199,6 +4713,10 @@ func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) * return p } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER } @@ -3207,6 +4725,10 @@ func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 } func (s *CreateMessageContext) GetOp() antlr.Token { return s.op } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v } @@ -3215,16 +4737,31 @@ func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v } func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids } func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v } func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v } +<<<<<<< HEAD func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } +======= + +func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v } func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext { @@ -3260,10 +4797,17 @@ func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode { } func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext { +<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IFieldInitializerListContext); ok { t = ctx.(antlr.RuleContext) +======= + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFieldInitializerListContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3275,6 +4819,10 @@ func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListConte return t.(IFieldInitializerListContext) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateMessage(s) @@ -3297,6 +4845,7 @@ func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } +<<<<<<< HEAD type GlobalCallContext struct { PrimaryContext leadingDot antlr.Token @@ -3307,6 +4856,19 @@ type GlobalCallContext struct { func NewGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *GlobalCallContext { var p = new(GlobalCallContext) +======= + +type IdentOrGlobalCallContext struct { + PrimaryContext + leadingDot antlr.Token + id antlr.Token + op antlr.Token + args IExprListContext +} + +func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext { + var p = new(IdentOrGlobalCallContext) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser @@ -3315,6 +4877,7 @@ func NewGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Glo return p } +<<<<<<< HEAD func (s *GlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *GlobalCallContext) GetId() antlr.Token { return s.id } @@ -3356,6 +4919,53 @@ func (s *GlobalCallContext) ExprList() IExprListContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { t = ctx.(antlr.RuleContext) +======= + +func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } + +func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id } + +func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op } + + +func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } + +func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v } + +func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args } + + +func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v } + +func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IdentOrGlobalCallContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *IdentOrGlobalCallContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *IdentOrGlobalCallContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprListContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3367,6 +4977,7 @@ func (s *GlobalCallContext) ExprList() IExprListContext { return t.(IExprListContext) } +<<<<<<< HEAD func (s *GlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterGlobalCall(s) @@ -3383,18 +4994,46 @@ func (s *GlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { switch t := visitor.(type) { case CELVisitor: return t.VisitGlobalCall(s) +======= + +func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterIdentOrGlobalCall(s) + } +} + +func (s *IdentOrGlobalCallContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitIdentOrGlobalCall(s) + } +} + +func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitIdentOrGlobalCall(s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return t.VisitChildren(s) } } +<<<<<<< HEAD +======= + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Primary() (localctx IPrimaryContext) { localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 16, CELParserRULE_primary) var _la int +<<<<<<< HEAD p.SetState(184) +======= + p.SetState(180) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -3402,15 +5041,22 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) { case 1: +<<<<<<< HEAD localctx = NewIdentContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(132) +======= + localctx = NewIdentOrGlobalCallContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + p.SetState(130) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserDOT { { p.SetState(131) @@ -3421,11 +5067,25 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit +======= + + if _la == CELParserDOT { + { + p.SetState(129) + + var _m = p.Match(CELParserDOT) + + localctx.(*IdentOrGlobalCallContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(134) var _m = p.Match(CELParserIDENTIFIER) @@ -3542,39 +5202,158 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.EnterOuterAlt(localctx, 4) { p.SetState(148) +======= + p.SetState(132) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*IdentOrGlobalCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(138) + p.GetErrorHandler().Sync(p) + + + if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { + { + p.SetState(133) + + var _m = p.Match(CELParserLPAREN) + + localctx.(*IdentOrGlobalCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(135) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { + { + p.SetState(134) + + var _x = p.ExprList() + + + localctx.(*IdentOrGlobalCallContext).args = _x + } + + } + { + p.SetState(137) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } else if p.HasError() { // JIM + goto errorExit + } + + + case 2: + localctx = NewNestedContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + { + p.SetState(140) + p.Match(CELParserLPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(141) + + var _x = p.Expr() + + + localctx.(*NestedContext).e = _x + } + { + p.SetState(142) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewCreateListContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + { + p.SetState(144) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACKET) localctx.(*CreateListContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(150) +======= + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(146) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { { p.SetState(149) var _x = p.ListInit() +======= + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { + { + p.SetState(145) + + var _x = p.ListInit() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*CreateListContext).elems = _x } } +<<<<<<< HEAD p.SetState(153) +======= + p.SetState(149) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserCOMMA { { p.SetState(152) @@ -3582,11 +5361,22 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit +======= + + if _la == CELParserCOMMA { + { + p.SetState(148) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(155) p.Match(CELParserRPRACKET) if p.HasError() { @@ -3600,32 +5390,102 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.EnterOuterAlt(localctx, 5) { p.SetState(156) +======= + p.SetState(151) + p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 4: + localctx = NewCreateStructContext(p, localctx) + p.EnterOuterAlt(localctx, 4) + { + p.SetState(152) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACE) localctx.(*CreateStructContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(158) +======= + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(154) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { { p.SetState(157) var _x = p.MapInitializerList() +======= + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { + { + p.SetState(153) + + var _x = p.MapInitializerList() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*CreateStructContext).entries = _x } } +<<<<<<< HEAD +======= + p.SetState(157) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(156) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(159) + p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 5: + localctx = NewCreateMessageContext(p, localctx) + p.EnterOuterAlt(localctx, 5) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.SetState(161) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -3633,6 +5493,7 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserCOMMA { { p.SetState(160) @@ -3640,12 +5501,26 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit +======= + + if _la == CELParserDOT { + { + p.SetState(160) + + var _m = p.Match(CELParserDOT) + + localctx.(*CreateMessageContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { p.SetState(163) +<<<<<<< HEAD p.Match(CELParserRBRACE) if p.HasError() { // Recognition error - abort rule @@ -3679,91 +5554,160 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { } { p.SetState(167) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) p.SetState(172) +======= + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + p.SetState(168) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD for _la == CELParserDOT { { p.SetState(168) +======= + + for _la == CELParserDOT { + { + p.SetState(164) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserDOT) localctx.(*CreateMessageContext).s16 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16) { +<<<<<<< HEAD p.SetState(169) +======= + p.SetState(165) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) - p.SetState(174) +<<<<<<< HEAD + p.SetState(174) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(175) +======= + + p.SetState(170) p.GetErrorHandler().Sync(p) if p.HasError() { - goto errorExit - } + goto errorExit + } _la = p.GetTokenStream().LA(1) } { - p.SetState(175) + p.SetState(171) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACE) localctx.(*CreateMessageContext).op = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(177) +======= + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(173) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&206159478784) != 0 { { p.SetState(176) var _x = p.FieldInitializerList() +======= + + if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER { + { + p.SetState(172) + + var _x = p.FieldInitializerList() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*CreateMessageContext).entries = _x } } +<<<<<<< HEAD p.SetState(180) +======= + p.SetState(176) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserCOMMA { { p.SetState(179) @@ -3771,11 +5715,22 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit +======= + + if _la == CELParserCOMMA { + { + p.SetState(175) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(182) p.Match(CELParserRBRACE) if p.HasError() { @@ -3789,6 +5744,22 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.EnterOuterAlt(localctx, 7) { p.SetState(183) +======= + p.SetState(178) + p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 6: + localctx = NewConstantLiteralContext(p, localctx) + p.EnterOuterAlt(localctx, 6) + { + p.SetState(179) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.Literal() } @@ -3796,6 +5767,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { goto errorExit } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -3809,6 +5784,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IExprListContext is an interface to support dynamic dispatch. type IExprListContext interface { antlr.ParserRuleContext @@ -3819,6 +5798,7 @@ type IExprListContext interface { // Get_expr returns the _expr rule contexts. Get_expr() IExprContext +<<<<<<< HEAD // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) @@ -3827,6 +5807,20 @@ type IExprListContext interface { // SetE sets the e rule context list. SetE([]IExprContext) +======= + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetE returns the e rule context list. + GetE() []IExprContext + + + // SetE sets the e rule context list. + SetE([]IExprContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllExpr() []IExprContext @@ -3841,8 +5835,13 @@ type IExprListContext interface { type ExprListContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD _expr IExprContext e []IExprContext +======= + _expr IExprContext + e []IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyExprListContext() *ExprListContext { @@ -3852,7 +5851,11 @@ func NewEmptyExprListContext() *ExprListContext { return p } +<<<<<<< HEAD func InitEmptyExprListContext(p *ExprListContext) { +======= +func InitEmptyExprListContext(p *ExprListContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_exprList } @@ -3874,12 +5877,25 @@ func (s *ExprListContext) GetParser() antlr.Parser { return s.parser } func (s *ExprListContext) Get_expr() IExprContext { return s._expr } +<<<<<<< HEAD +func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } + +func (s *ExprListContext) GetE() []IExprContext { return s.e } + +func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } + +======= + func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *ExprListContext) GetE() []IExprContext { return s.e } + func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprListContext) AllExpr() []IExprContext { children := s.GetChildren() len := 0 @@ -3902,12 +5918,20 @@ func (s *ExprListContext) AllExpr() []IExprContext { } func (s *ExprListContext) Expr(i int) IExprContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -3937,6 +5961,10 @@ func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExprList(s) @@ -3959,6 +5987,12 @@ func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ExprList() (localctx IExprListContext) { localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 18, CELParserRULE_exprList) @@ -3966,6 +6000,7 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(186) var _x = p.Expr() @@ -3974,12 +6009,24 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) p.SetState(191) +======= + p.SetState(182) + + var _x = p.Expr() + + + localctx.(*ExprListContext)._expr = _x + } + localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + p.SetState(187) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD for _la == CELParserCOMMA { { p.SetState(187) @@ -3994,10 +6041,29 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { var _x = p.Expr() +======= + + for _la == CELParserCOMMA { + { + p.SetState(183) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(184) + + var _x = p.Expr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ExprListContext)._expr = _x } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) +<<<<<<< HEAD p.SetState(193) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -4006,6 +6072,19 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { _la = p.GetTokenStream().LA(1) } +======= + + p.SetState(189) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -4019,6 +6098,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IListInitContext is an interface to support dynamic dispatch. type IListInitContext interface { antlr.ParserRuleContext @@ -4029,6 +6112,7 @@ type IListInitContext interface { // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext +<<<<<<< HEAD // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) @@ -4037,6 +6121,20 @@ type IListInitContext interface { // SetElems sets the elems rule context list. SetElems([]IOptExprContext) +======= + + // Set_optExpr sets the _optExpr rule contexts. + Set_optExpr(IOptExprContext) + + + // GetElems returns the elems rule context list. + GetElems() []IOptExprContext + + + // SetElems sets the elems rule context list. + SetElems([]IOptExprContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllOptExpr() []IOptExprContext @@ -4050,9 +6148,15 @@ type IListInitContext interface { type ListInitContext struct { antlr.BaseParserRuleContext +<<<<<<< HEAD parser antlr.Parser _optExpr IOptExprContext elems []IOptExprContext +======= + parser antlr.Parser + _optExpr IOptExprContext + elems []IOptExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyListInitContext() *ListInitContext { @@ -4062,7 +6166,11 @@ func NewEmptyListInitContext() *ListInitContext { return p } +<<<<<<< HEAD func InitEmptyListInitContext(p *ListInitContext) { +======= +func InitEmptyListInitContext(p *ListInitContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_listInit } @@ -4084,12 +6192,25 @@ func (s *ListInitContext) GetParser() antlr.Parser { return s.parser } func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr } +<<<<<<< HEAD +func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + +func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } + +func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } + +======= + func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } + func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ListInitContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -4112,12 +6233,20 @@ func (s *ListInitContext) AllOptExpr() []IOptExprContext { } func (s *ListInitContext) OptExpr(i int) IOptExprContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -4147,6 +6276,10 @@ func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterListInit(s) @@ -4169,6 +6302,12 @@ func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ListInit() (localctx IListInitContext) { localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 20, CELParserRULE_listInit) @@ -4176,6 +6315,7 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(194) var _x = p.OptExpr() @@ -4184,6 +6324,17 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) p.SetState(199) +======= + p.SetState(190) + + var _x = p.OptExpr() + + + localctx.(*ListInitContext)._optExpr = _x + } + localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + p.SetState(195) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -4195,6 +6346,7 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { +<<<<<<< HEAD p.SetState(195) p.Match(CELParserCOMMA) if p.HasError() { @@ -4207,22 +6359,52 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { var _x = p.OptExpr() +======= + p.SetState(191) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(192) + + var _x = p.OptExpr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ListInitContext)._optExpr = _x } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) +<<<<<<< HEAD } p.SetState(201) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } +======= + + } + p.SetState(197) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } +<<<<<<< HEAD +======= + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -4236,6 +6418,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IFieldInitializerListContext is an interface to support dynamic dispatch. type IFieldInitializerListContext interface { antlr.ParserRuleContext @@ -4244,40 +6430,75 @@ type IFieldInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. +<<<<<<< HEAD GetS21() antlr.Token // SetS21 sets the s21 token. SetS21(antlr.Token) +======= + GetS21() antlr.Token + + + // SetS21 sets the s21 token. + SetS21(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetCols returns the cols token list. GetCols() []antlr.Token +<<<<<<< HEAD + // SetCols sets the cols token list. + SetCols([]antlr.Token) + +======= + // SetCols sets the cols token list. SetCols([]antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Get_optField returns the _optField rule contexts. Get_optField() IOptFieldContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set_optField sets the _optField rule contexts. Set_optField(IOptFieldContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetFields returns the fields rule context list. GetFields() []IOptFieldContext // GetValues returns the values rule context list. GetValues() []IExprContext +<<<<<<< HEAD // SetFields sets the fields rule context list. SetFields([]IOptFieldContext) // SetValues sets the values rule context list. SetValues([]IExprContext) +======= + + // SetFields sets the fields rule context list. + SetFields([]IOptFieldContext) + + // SetValues sets the values rule context list. + SetValues([]IExprContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllOptField() []IOptFieldContext @@ -4295,6 +6516,7 @@ type IFieldInitializerListContext interface { type FieldInitializerListContext struct { antlr.BaseParserRuleContext +<<<<<<< HEAD parser antlr.Parser _optField IOptFieldContext fields []IOptFieldContext @@ -4302,6 +6524,15 @@ type FieldInitializerListContext struct { cols []antlr.Token _expr IExprContext values []IExprContext +======= + parser antlr.Parser + _optField IOptFieldContext + fields []IOptFieldContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { @@ -4311,7 +6542,11 @@ func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { return p } +<<<<<<< HEAD func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { +======= +func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_fieldInitializerList } @@ -4333,28 +6568,57 @@ func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 } +<<<<<<< HEAD +func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + +func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } + +func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + +======= + func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } + func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField } func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v } func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields } func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v } func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { children := s.GetChildren() len := 0 @@ -4377,12 +6641,20 @@ func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { } func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptFieldContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -4426,12 +6698,20 @@ func (s *FieldInitializerListContext) AllExpr() []IExprContext { } func (s *FieldInitializerListContext) Expr(i int) IExprContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -4461,6 +6741,10 @@ func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog ant return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterFieldInitializerList(s) @@ -4483,6 +6767,12 @@ func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) int } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) @@ -4490,26 +6780,44 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(202) var _x = p.OptField() +======= + p.SetState(198) + + var _x = p.OptField() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) { +<<<<<<< HEAD p.SetState(203) +======= + p.SetState(199) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { +<<<<<<< HEAD p.SetState(204) var _x = p.Expr() @@ -4518,6 +6826,17 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) p.SetState(212) +======= + p.SetState(200) + + var _x = p.Expr() + + + localctx.(*FieldInitializerListContext)._expr = _x + } + localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + p.SetState(208) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -4529,6 +6848,7 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { +<<<<<<< HEAD p.SetState(205) p.Match(CELParserCOMMA) if p.HasError() { @@ -4541,42 +6861,89 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.OptField() +======= + p.SetState(201) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(202) + + var _x = p.OptField() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) { +<<<<<<< HEAD p.SetState(207) +======= + p.SetState(203) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { +<<<<<<< HEAD p.SetState(208) var _x = p.Expr() +======= + p.SetState(204) + + var _x = p.Expr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*FieldInitializerListContext)._expr = _x } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) +<<<<<<< HEAD } p.SetState(214) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } +======= + + } + p.SetState(210) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } +<<<<<<< HEAD +======= + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -4590,6 +6957,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IOptFieldContext is an interface to support dynamic dispatch. type IOptFieldContext interface { antlr.ParserRuleContext @@ -4598,6 +6969,7 @@ type IOptFieldContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. +<<<<<<< HEAD GetOpt() antlr.Token // SetOpt sets the opt token. @@ -4605,6 +6977,17 @@ type IOptFieldContext interface { // Getter signatures EscapeIdent() IEscapeIdentContext +======= + GetOpt() antlr.Token + + + // SetOpt sets the opt token. + SetOpt(antlr.Token) + + + // Getter signatures + IDENTIFIER() antlr.TerminalNode +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) QUESTIONMARK() antlr.TerminalNode // IsOptFieldContext differentiates from other interfaces. @@ -4614,7 +6997,11 @@ type IOptFieldContext interface { type OptFieldContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD opt antlr.Token +======= + opt antlr.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyOptFieldContext() *OptFieldContext { @@ -4624,7 +7011,11 @@ func NewEmptyOptFieldContext() *OptFieldContext { return p } +<<<<<<< HEAD func InitEmptyOptFieldContext(p *OptFieldContext) { +======= +func InitEmptyOptFieldContext(p *OptFieldContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optField } @@ -4646,6 +7037,7 @@ func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser } func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt } +<<<<<<< HEAD func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } func (s *OptFieldContext) EscapeIdent() IEscapeIdentContext { @@ -4662,6 +7054,14 @@ func (s *OptFieldContext) EscapeIdent() IEscapeIdentContext { } return t.(IEscapeIdentContext) +======= + +func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *OptFieldContext) QUESTIONMARK() antlr.TerminalNode { @@ -4676,6 +7076,10 @@ func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptField(s) @@ -4698,38 +7102,73 @@ func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) OptField() (localctx IOptFieldContext) { localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 24, CELParserRULE_optField) var _la int p.EnterOuterAlt(localctx, 1) +<<<<<<< HEAD p.SetState(216) +======= + p.SetState(212) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(215) +======= + + if _la == CELParserQUESTIONMARK { + { + p.SetState(211) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptFieldContext).opt = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(218) p.EscapeIdent() } +======= + p.SetState(214) + p.Match(CELParserIDENTIFIER) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -4743,6 +7182,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IMapInitializerListContext is an interface to support dynamic dispatch. type IMapInitializerListContext interface { antlr.ParserRuleContext @@ -4751,40 +7194,75 @@ type IMapInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. +<<<<<<< HEAD GetS21() antlr.Token // SetS21 sets the s21 token. SetS21(antlr.Token) +======= + GetS21() antlr.Token + + + // SetS21 sets the s21 token. + SetS21(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + + // GetCols returns the cols token list. + GetCols() []antlr.Token + +<<<<<<< HEAD + // SetCols sets the cols token list. + SetCols([]antlr.Token) - // GetCols returns the cols token list. - GetCols() []antlr.Token +======= // SetCols sets the cols token list. SetCols([]antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetKeys returns the keys rule context list. GetKeys() []IOptExprContext // GetValues returns the values rule context list. GetValues() []IExprContext +<<<<<<< HEAD // SetKeys sets the keys rule context list. SetKeys([]IOptExprContext) // SetValues sets the values rule context list. SetValues([]IExprContext) +======= + + // SetKeys sets the keys rule context list. + SetKeys([]IOptExprContext) + + // SetValues sets the values rule context list. + SetValues([]IExprContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllOptExpr() []IOptExprContext @@ -4802,6 +7280,7 @@ type IMapInitializerListContext interface { type MapInitializerListContext struct { antlr.BaseParserRuleContext +<<<<<<< HEAD parser antlr.Parser _optExpr IOptExprContext keys []IOptExprContext @@ -4809,6 +7288,15 @@ type MapInitializerListContext struct { cols []antlr.Token _expr IExprContext values []IExprContext +======= + parser antlr.Parser + _optExpr IOptExprContext + keys []IOptExprContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyMapInitializerListContext() *MapInitializerListContext { @@ -4818,7 +7306,11 @@ func NewEmptyMapInitializerListContext() *MapInitializerListContext { return p } +<<<<<<< HEAD func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { +======= +func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_mapInitializerList } @@ -4840,28 +7332,57 @@ func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser } func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 } +<<<<<<< HEAD +func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + +func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } + +func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + +======= + func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } + func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr } func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys } func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v } func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -4884,12 +7405,20 @@ func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { } func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -4933,12 +7462,20 @@ func (s *MapInitializerListContext) AllExpr() []IExprContext { } func (s *MapInitializerListContext) Expr(i int) IExprContext { +<<<<<<< HEAD var t antlr.RuleContext +======= + var t antlr.RuleContext; +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { +<<<<<<< HEAD t = ctx.(antlr.RuleContext) +======= + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -4968,6 +7505,10 @@ func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMapInitializerList(s) @@ -4990,6 +7531,12 @@ func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) inter } } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) @@ -4997,26 +7544,44 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { p.EnterOuterAlt(localctx, 1) { +<<<<<<< HEAD p.SetState(220) var _x = p.OptExpr() +======= + p.SetState(216) + + var _x = p.OptExpr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) { +<<<<<<< HEAD p.SetState(221) +======= + p.SetState(217) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { +<<<<<<< HEAD p.SetState(222) var _x = p.Expr() @@ -5025,6 +7590,17 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) p.SetState(230) +======= + p.SetState(218) + + var _x = p.Expr() + + + localctx.(*MapInitializerListContext)._expr = _x + } + localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + p.SetState(226) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -5036,6 +7612,7 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { +<<<<<<< HEAD p.SetState(223) p.Match(CELParserCOMMA) if p.HasError() { @@ -5048,42 +7625,85 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.OptExpr() +======= + p.SetState(219) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(220) + + var _x = p.OptExpr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) { +<<<<<<< HEAD p.SetState(225) +======= + p.SetState(221) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { +<<<<<<< HEAD p.SetState(226) var _x = p.Expr() +======= + p.SetState(222) + + var _x = p.Expr() + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MapInitializerListContext)._expr = _x } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) +<<<<<<< HEAD } p.SetState(232) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } +======= + + } + p.SetState(228) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } +<<<<<<< HEAD errorExit: if p.HasError() { v := p.GetError() @@ -5293,6 +7913,9 @@ func (p *CELParser) EscapeIdent() (localctx IEscapeIdentContext) { p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) goto errorExit } +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { @@ -5307,6 +7930,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IOptExprContext is an interface to support dynamic dispatch. type IOptExprContext interface { antlr.ParserRuleContext @@ -5315,17 +7942,34 @@ type IOptExprContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. +<<<<<<< HEAD GetOpt() antlr.Token // SetOpt sets the opt token. SetOpt(antlr.Token) +======= + GetOpt() antlr.Token + + + // SetOpt sets the opt token. + SetOpt(antlr.Token) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IExprContext +<<<<<<< HEAD + // SetE sets the e rule contexts. + SetE(IExprContext) + +======= + // SetE sets the e rule contexts. SetE(IExprContext) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures Expr() IExprContext QUESTIONMARK() antlr.TerminalNode @@ -5337,8 +7981,13 @@ type IOptExprContext interface { type OptExprContext struct { antlr.BaseParserRuleContext parser antlr.Parser +<<<<<<< HEAD opt antlr.Token e IExprContext +======= + opt antlr.Token + e IExprContext +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyOptExprContext() *OptExprContext { @@ -5348,7 +7997,11 @@ func NewEmptyOptExprContext() *OptExprContext { return p } +<<<<<<< HEAD func InitEmptyOptExprContext(p *OptExprContext) { +======= +func InitEmptyOptExprContext(p *OptExprContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optExpr } @@ -5370,6 +8023,7 @@ func (s *OptExprContext) GetParser() antlr.Parser { return s.parser } func (s *OptExprContext) GetOpt() antlr.Token { return s.opt } +<<<<<<< HEAD func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } func (s *OptExprContext) GetE() IExprContext { return s.e } @@ -5381,6 +8035,23 @@ func (s *OptExprContext) Expr() IExprContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) +======= + +func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *OptExprContext) GetE() IExprContext { return s.e } + + +func (s *OptExprContext) SetE(v IExprContext) { s.e = v } + + +func (s *OptExprContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -5404,6 +8075,10 @@ func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptExpr(s) @@ -5426,6 +8101,7 @@ func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD func (p *CELParser) OptExpr() (localctx IOptExprContext) { localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 30, CELParserRULE_optExpr) @@ -5433,27 +8109,52 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { p.EnterOuterAlt(localctx, 1) p.SetState(238) +======= + + + +func (p *CELParser) OptExpr() (localctx IOptExprContext) { + localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 28, CELParserRULE_optExpr) + var _la int + + p.EnterOuterAlt(localctx, 1) + p.SetState(230) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(237) +======= + + if _la == CELParserQUESTIONMARK { + { + p.SetState(229) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptExprContext).opt = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(240) var _x = p.Expr() @@ -5461,6 +8162,18 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { localctx.(*OptExprContext).e = _x } +======= + p.SetState(232) + + var _x = p.Expr() + + + localctx.(*OptExprContext).e = _x + } + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -5474,6 +8187,10 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ILiteralContext is an interface to support dynamic dispatch. type ILiteralContext interface { antlr.ParserRuleContext @@ -5496,7 +8213,11 @@ func NewEmptyLiteralContext() *LiteralContext { return p } +<<<<<<< HEAD func InitEmptyLiteralContext(p *LiteralContext) { +======= +func InitEmptyLiteralContext(p *LiteralContext) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_literal } @@ -5528,6 +8249,12 @@ func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } +<<<<<<< HEAD +======= + + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BytesContext struct { LiteralContext tok antlr.Token @@ -5543,8 +8270,15 @@ func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesCon return p } +<<<<<<< HEAD +func (s *BytesContext) GetTok() antlr.Token { return s.tok } + +======= + func (s *BytesContext) GetTok() antlr.Token { return s.tok } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v } func (s *BytesContext) GetRuleContext() antlr.RuleContext { @@ -5555,6 +8289,10 @@ func (s *BytesContext) BYTES() antlr.TerminalNode { return s.GetToken(CELParserBYTES, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBytes(s) @@ -5577,6 +8315,10 @@ func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type UintContext struct { LiteralContext tok antlr.Token @@ -5592,8 +8334,15 @@ func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintConte return p } +<<<<<<< HEAD func (s *UintContext) GetTok() antlr.Token { return s.tok } +======= + +func (s *UintContext) GetTok() antlr.Token { return s.tok } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *UintContext) SetTok(v antlr.Token) { s.tok = v } func (s *UintContext) GetRuleContext() antlr.RuleContext { @@ -5604,6 +8353,10 @@ func (s *UintContext) NUM_UINT() antlr.TerminalNode { return s.GetToken(CELParserNUM_UINT, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterUint(s) @@ -5626,6 +8379,10 @@ func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type NullContext struct { LiteralContext tok antlr.Token @@ -5641,8 +8398,15 @@ func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullConte return p } +<<<<<<< HEAD +func (s *NullContext) GetTok() antlr.Token { return s.tok } + +======= + func (s *NullContext) GetTok() antlr.Token { return s.tok } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NullContext) SetTok(v antlr.Token) { s.tok = v } func (s *NullContext) GetRuleContext() antlr.RuleContext { @@ -5653,6 +8417,10 @@ func (s *NullContext) NUL() antlr.TerminalNode { return s.GetToken(CELParserNUL, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNull(s) @@ -5675,6 +8443,10 @@ func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BoolFalseContext struct { LiteralContext tok antlr.Token @@ -5690,8 +8462,15 @@ func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Bool return p } +<<<<<<< HEAD func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } +======= + +func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext { @@ -5702,6 +8481,10 @@ func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode { return s.GetToken(CELParserCEL_FALSE, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolFalse(s) @@ -5724,6 +8507,10 @@ func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type StringContext struct { LiteralContext tok antlr.Token @@ -5739,8 +8526,15 @@ func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringC return p } +<<<<<<< HEAD +func (s *StringContext) GetTok() antlr.Token { return s.tok } + +======= + func (s *StringContext) GetTok() antlr.Token { return s.tok } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StringContext) SetTok(v antlr.Token) { s.tok = v } func (s *StringContext) GetRuleContext() antlr.RuleContext { @@ -5751,6 +8545,10 @@ func (s *StringContext) STRING() antlr.TerminalNode { return s.GetToken(CELParserSTRING, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterString(s) @@ -5773,10 +8571,18 @@ func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD type DoubleContext struct { LiteralContext sign antlr.Token tok antlr.Token +======= + +type DoubleContext struct { + LiteralContext + sign antlr.Token + tok antlr.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext { @@ -5789,10 +8595,18 @@ func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleC return p } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *DoubleContext) GetSign() antlr.Token { return s.sign } func (s *DoubleContext) GetTok() antlr.Token { return s.tok } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v } func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v } @@ -5809,6 +8623,10 @@ func (s *DoubleContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterDouble(s) @@ -5831,6 +8649,10 @@ func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BoolTrueContext struct { LiteralContext tok antlr.Token @@ -5846,8 +8668,15 @@ func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolT return p } +<<<<<<< HEAD func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } +======= + +func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext { @@ -5858,6 +8687,10 @@ func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode { return s.GetToken(CELParserCEL_TRUE, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolTrue(s) @@ -5880,10 +8713,18 @@ func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD type IntContext struct { LiteralContext sign antlr.Token tok antlr.Token +======= + +type IntContext struct { + LiteralContext + sign antlr.Token + tok antlr.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext { @@ -5896,10 +8737,18 @@ func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext return p } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IntContext) GetSign() antlr.Token { return s.sign } func (s *IntContext) GetTok() antlr.Token { return s.tok } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IntContext) SetSign(v antlr.Token) { s.sign = v } func (s *IntContext) SetTok(v antlr.Token) { s.tok = v } @@ -5916,6 +8765,10 @@ func (s *IntContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterInt(s) @@ -5938,64 +8791,113 @@ func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } +<<<<<<< HEAD func (p *CELParser) Literal() (localctx ILiteralContext) { localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 32, CELParserRULE_literal) var _la int p.SetState(256) +======= + + +func (p *CELParser) Literal() (localctx ILiteralContext) { + localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 30, CELParserRULE_literal) + var _la int + + p.SetState(248) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } +<<<<<<< HEAD switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 35, p.GetParserRuleContext()) { case 1: localctx = NewIntContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(243) +======= + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) { + case 1: + localctx = NewIntContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + p.SetState(235) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserMINUS { { p.SetState(242) +======= + + if _la == CELParserMINUS { + { + p.SetState(234) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserMINUS) localctx.(*IntContext).sign = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(245) +======= + p.SetState(237) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUM_INT) localctx.(*IntContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } +======= + // Recognition error - abort rule + goto errorExit + } + } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 2: localctx = NewUintContext(p, localctx) p.EnterOuterAlt(localctx, 2) { +<<<<<<< HEAD p.SetState(246) +======= + p.SetState(238) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUM_UINT) localctx.(*UintContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } @@ -6005,110 +8907,208 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { localctx = NewDoubleContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(248) +======= + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewDoubleContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + p.SetState(240) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) +<<<<<<< HEAD if _la == CELParserMINUS { { p.SetState(247) +======= + + if _la == CELParserMINUS { + { + p.SetState(239) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserMINUS) localctx.(*DoubleContext).sign = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { +<<<<<<< HEAD p.SetState(250) +======= + p.SetState(242) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUM_FLOAT) localctx.(*DoubleContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } +======= + // Recognition error - abort rule + goto errorExit + } + } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 4: localctx = NewStringContext(p, localctx) p.EnterOuterAlt(localctx, 4) { +<<<<<<< HEAD p.SetState(251) +======= + p.SetState(243) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserSTRING) localctx.(*StringContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } +======= + // Recognition error - abort rule + goto errorExit + } + } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 5: localctx = NewBytesContext(p, localctx) p.EnterOuterAlt(localctx, 5) { +<<<<<<< HEAD p.SetState(252) +======= + p.SetState(244) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserBYTES) localctx.(*BytesContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } +======= + // Recognition error - abort rule + goto errorExit + } + } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 6: localctx = NewBoolTrueContext(p, localctx) p.EnterOuterAlt(localctx, 6) { +<<<<<<< HEAD p.SetState(253) +======= + p.SetState(245) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCEL_TRUE) localctx.(*BoolTrueContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } +======= + // Recognition error - abort rule + goto errorExit + } + } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 7: localctx = NewBoolFalseContext(p, localctx) p.EnterOuterAlt(localctx, 7) { +<<<<<<< HEAD p.SetState(254) +======= + p.SetState(246) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCEL_FALSE) localctx.(*BoolFalseContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } +======= + // Recognition error - abort rule + goto errorExit + } + } + + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 8: localctx = NewNullContext(p, localctx) p.EnterOuterAlt(localctx, 8) { +<<<<<<< HEAD p.SetState(255) +======= + p.SetState(247) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUL) localctx.(*NullContext).tok = _m if p.HasError() { +<<<<<<< HEAD // Recognition error - abort rule goto errorExit +======= + // Recognition error - abort rule + goto errorExit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -6116,6 +9116,10 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { goto errorExit } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -6129,6 +9133,7 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } +<<<<<<< HEAD func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { switch ruleIndex { case 4: @@ -6151,6 +9156,26 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int t = localctx.(*MemberContext) } return p.Member_Sempred(t, predIndex) +======= + +func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { + switch ruleIndex { + case 4: + var t *RelationContext = nil + if localctx != nil { t = localctx.(*RelationContext) } + return p.Relation_Sempred(t, predIndex) + + case 5: + var t *CalcContext = nil + if localctx != nil { t = localctx.(*CalcContext) } + return p.Calc_Sempred(t, predIndex) + + case 7: + var t *MemberContext = nil + if localctx != nil { t = localctx.(*MemberContext) } + return p.Member_Sempred(t, predIndex) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(ruleIndex)) @@ -6160,7 +9185,11 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool { switch predIndex { case 0: +<<<<<<< HEAD return p.Precpred(p.GetParserRuleContext(), 1) +======= + return p.Precpred(p.GetParserRuleContext(), 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -6170,10 +9199,17 @@ func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool { switch predIndex { case 1: +<<<<<<< HEAD return p.Precpred(p.GetParserRuleContext(), 2) case 2: return p.Precpred(p.GetParserRuleContext(), 1) +======= + return p.Precpred(p.GetParserRuleContext(), 2) + + case 2: + return p.Precpred(p.GetParserRuleContext(), 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -6183,6 +9219,7 @@ func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool { switch predIndex { case 3: +<<<<<<< HEAD return p.Precpred(p.GetParserRuleContext(), 3) case 4: @@ -6190,8 +9227,21 @@ func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bo case 5: return p.Precpred(p.GetParserRuleContext(), 1) +======= + return p.Precpred(p.GetParserRuleContext(), 3) + + case 4: + return p.Precpred(p.GetParserRuleContext(), 2) + + case 5: + return p.Precpred(p.GetParserRuleContext(), 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) } } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go index 7cefe5c571..fccb0b182d 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go @@ -1,8 +1,16 @@ +<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +======= +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A complete Visitor for a parse tree produced by CELParser. type CELVisitor interface { antlr.ParseTreeVisitor @@ -46,11 +54,16 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#Index. VisitIndex(ctx *IndexContext) interface{} +<<<<<<< HEAD // Visit a parse tree produced by CELParser#Ident. VisitIdent(ctx *IdentContext) interface{} // Visit a parse tree produced by CELParser#GlobalCall. VisitGlobalCall(ctx *GlobalCallContext) interface{} +======= + // Visit a parse tree produced by CELParser#IdentOrGlobalCall. + VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Visit a parse tree produced by CELParser#Nested. VisitNested(ctx *NestedContext) interface{} @@ -82,12 +95,15 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#mapInitializerList. VisitMapInitializerList(ctx *MapInitializerListContext) interface{} +<<<<<<< HEAD // Visit a parse tree produced by CELParser#SimpleIdentifier. VisitSimpleIdentifier(ctx *SimpleIdentifierContext) interface{} // Visit a parse tree produced by CELParser#EscapedIdentifier. VisitEscapedIdentifier(ctx *EscapedIdentifierContext) interface{} +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Visit a parse tree produced by CELParser#optExpr. VisitOptExpr(ctx *OptExprContext) interface{} @@ -114,4 +130,9 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#Null. VisitNull(ctx *NullContext) interface{} +<<<<<<< HEAD +} +======= + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index c13296dd5c..56ee89cacc 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -115,7 +115,11 @@ func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Exp func (p *parserHelper) newComprehension(ctx any, iterRange ast.Expr, +<<<<<<< HEAD iterVar, +======= + iterVar string, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar string, accuInit ast.Expr, condition ast.Expr, @@ -125,6 +129,7 @@ func (p *parserHelper) newComprehension(ctx any, p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) } +<<<<<<< HEAD func (p *parserHelper) newComprehensionTwoVar(ctx any, iterRange ast.Expr, iterVar, iterVar2, @@ -137,6 +142,8 @@ func (p *parserHelper) newComprehensionTwoVar(ctx any, p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *parserHelper) newID(ctx any) int64 { if id, isID := ctx.(int64); isID { return id @@ -152,12 +159,24 @@ func (p *parserHelper) id(ctx any) int64 { var offset ast.OffsetRange switch c := ctx.(type) { case antlr.ParserRuleContext: +<<<<<<< HEAD start := c.GetStart() offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) offset.Stop = offset.Start + int32(len(c.GetText())) case antlr.Token: offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) offset.Stop = offset.Start + int32(len(c.GetText())) +======= + start, stop := c.GetStart(), c.GetStop() + if stop == nil { + stop = start + } + offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) + offset.Stop = p.sourceInfo.ComputeOffset(int32(stop.GetLine()), int32(stop.GetColumn())) + case antlr.Token: + offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) + offset.Stop = offset.Start +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case common.Location: offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) offset.Stop = offset.Start @@ -173,6 +192,7 @@ func (p *parserHelper) id(ctx any) int64 { return id } +<<<<<<< HEAD func (p *parserHelper) deleteID(id int64) { p.sourceInfo.ClearOffsetRange(id) if id == p.nextID-1 { @@ -180,14 +200,19 @@ func (p *parserHelper) deleteID(id int64) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *parserHelper) getLocation(id int64) common.Location { return p.sourceInfo.GetStartLocation(id) } +<<<<<<< HEAD func (p *parserHelper) getLocationByOffset(offset int32) common.Location { return p.getSourceInfo().GetLocationByOffset(offset) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // buildMacroCallArg iterates the expression and returns a new expression // where all macros have been replaced by their IDs in MacroCalls func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { @@ -395,10 +420,15 @@ func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { cond := e.Copy(compre.LoopCondition()) step := e.Copy(compre.LoopStep()) result := e.Copy(compre.Result()) +<<<<<<< HEAD // All comprehensions can be represented by the two-variable comprehension since the // differentiation between one and two-variable is whether the iterVar2 value is non-empty. return e.exprFactory.NewComprehensionTwoVar(copyID, iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result) +======= + return e.exprFactory.NewComprehension(copyID, + iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return e.exprFactory.NewUnspecifiedExpr(copyID) } @@ -446,6 +476,7 @@ func (e *exprHelper) NewComprehension( e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) } +<<<<<<< HEAD // NewComprehensionTwoVar implements the ExprHelper interface method. func (e *exprHelper) NewComprehensionTwoVar( iterRange ast.Expr, @@ -460,6 +491,8 @@ func (e *exprHelper) NewComprehensionTwoVar( e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewIdent implements the ExprHelper interface method. func (e *exprHelper) NewIdent(name string) ast.Expr { return e.exprFactory.NewIdent(e.nextMacroID(), name) @@ -470,11 +503,14 @@ func (e *exprHelper) NewAccuIdent() ast.Expr { return e.exprFactory.NewAccuIdent(e.nextMacroID()) } +<<<<<<< HEAD // AccuIdentName implements the ExprHelper interface method. func (e *exprHelper) AccuIdentName() string { return e.exprFactory.AccuIdentName() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewGlobalCall implements the ExprHelper interface method. func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr { return e.exprFactory.NewCall(e.nextMacroID(), function, args...) diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go index 6b3b648d34..375c9355bd 100644 --- a/vendor/github.com/google/cel-go/parser/macro.go +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -170,12 +170,20 @@ type ExprHelper interface { // NewStructField creates a new struct field initializer from the field name and value. NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr +<<<<<<< HEAD // NewComprehension creates a new one-variable comprehension instruction. // // - iterRange represents the expression that resolves to a list or map where the elements or // keys (respectively) will be iterated over. // - iterVar is the variable name for the list element value, or the map key, depending on the // range type. +======= + // NewComprehension creates a new comprehension instruction. + // + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - iterVar is the iteration variable name. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // - accuVar is the accumulation variable name, typically parser.AccumulatorName. // - accuInit is the initial expression whose value will be set for the accuVar prior to // folding. @@ -187,6 +195,7 @@ type ExprHelper interface { // environment in the step and condition expressions. Presently, the name __result__ is commonly // used by built-in macros but this may change in the future. NewComprehension(iterRange ast.Expr, +<<<<<<< HEAD iterVar, accuVar string, accuInit, @@ -217,6 +226,13 @@ type ExprHelper interface { accuInit, condition, step, +======= + iterVar string, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) result ast.Expr) ast.Expr // NewIdent creates an identifier Expr value. @@ -225,9 +241,12 @@ type ExprHelper interface { // NewAccuIdent returns an accumulator identifier for use with comprehension results. NewAccuIdent() ast.Expr +<<<<<<< HEAD // AccuIdentName returns the name of the accumulator identifier. AccuIdentName() string +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewCall creates a function call Expr value for a global (free) function. NewCall(function string, args ...ast.Expr) ast.Expr @@ -262,6 +281,7 @@ var ( // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one // element in range the predicate holds. +<<<<<<< HEAD // Deprecated: Use ExistsOneMacroNew ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne) @@ -269,6 +289,10 @@ var ( // element in range the predicate holds. ExistsOneMacroNew = NewReceiverMacro("existsOne", 2, MakeExistsOne) +======= + ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MapMacro expands "range.map(var, function)" into a comprehension which applies the function // to each element in the range to produce a new list. MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap) @@ -288,7 +312,10 @@ var ( AllMacro, ExistsMacro, ExistsOneMacro, +<<<<<<< HEAD ExistsOneMacroNew, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MapMacro, MapFilterMacro, FilterMacro, @@ -301,11 +328,14 @@ var ( // AccumulatorName is the traditional variable name assigned to the fold accumulator variable. const AccumulatorName = "__result__" +<<<<<<< HEAD // HiddenAccumulatorName is a proposed update to the default fold accumlator variable. // @result is not normally accessible from source, preventing accidental or intentional collisions // in user expressions. const HiddenAccumulatorName = "@result" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type quantifierKind int const ( @@ -350,10 +380,13 @@ func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common if !found { return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } +<<<<<<< HEAD accu := eh.AccuIdentName() if v == accu || v == AccumulatorName { return nil, eh.NewError(args[0].ID(), "iteration variable overwrites accumulator variable") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var fn ast.Expr var filter ast.Expr @@ -373,7 +406,11 @@ func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common if filter != nil { step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) } +<<<<<<< HEAD return eh.NewComprehension(target, v, accu, init, condition, step, eh.NewAccuIdent()), nil +======= + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MakeFilter expands the input call arguments into a comprehension which produces a list which contains @@ -384,17 +421,24 @@ func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *com if !found { return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } +<<<<<<< HEAD accu := eh.AccuIdentName() if v == accu || v == AccumulatorName { return nil, eh.NewError(args[0].ID(), "iteration variable overwrites accumulator variable") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) filter := args[1] init := eh.NewList() condition := eh.NewLiteral(types.True) step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0])) step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) +<<<<<<< HEAD return eh.NewComprehension(target, v, accu, init, condition, step, eh.NewAccuIdent()), nil +======= + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MakeHas expands the input call arguments into a presence test, e.g. has(.field) @@ -411,10 +455,13 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] if !found { return nil, eh.NewError(args[0].ID(), "argument must be a simple name") } +<<<<<<< HEAD accu := eh.AccuIdentName() if v == accu || v == AccumulatorName { return nil, eh.NewError(args[0].ID(), "iteration variable overwrites accumulator variable") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var init ast.Expr var condition ast.Expr @@ -434,6 +481,7 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) result = eh.NewAccuIdent() case quantifierExistsOne: +<<<<<<< HEAD init = eh.NewLiteral(types.Int(0)) condition = eh.NewLiteral(types.True) step = eh.NewCall(operators.Conditional, args[1], @@ -443,6 +491,19 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) } return eh.NewComprehension(target, v, accu, init, condition, step, result), nil +======= + zeroExpr := eh.NewLiteral(types.Int(0)) + oneExpr := eh.NewLiteral(types.Int(1)) + init = zeroExpr + condition = eh.NewLiteral(types.True) + step = eh.NewCall(operators.Conditional, args[1], + eh.NewCall(operators.Add, eh.NewAccuIdent(), oneExpr), eh.NewAccuIdent()) + result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), oneExpr) + default: + return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) + } + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func extractIdent(e ast.Expr) (string, bool) { diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go index 4eb30f83e0..be580150be 100644 --- a/vendor/github.com/google/cel-go/parser/options.go +++ b/vendor/github.com/google/cel-go/parser/options.go @@ -26,8 +26,11 @@ type options struct { populateMacroCalls bool enableOptionalSyntax bool enableVariadicOperatorASTs bool +<<<<<<< HEAD enableIdentEscapeSyntax bool enableHiddenAccumulatorName bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Option configures the behavior of the parser. @@ -129,6 +132,7 @@ func EnableOptionalSyntax(optionalSyntax bool) Option { } } +<<<<<<< HEAD // EnableIdentEscapeSyntax enables backtick (`) escaped field identifiers. This // supports extended types of characters in identifiers, e.g. foo.`baz-bar`. func EnableIdentEscapeSyntax(enableIdentEscapeSyntax bool) Option { @@ -150,6 +154,8 @@ func EnableHiddenAccumulatorName(enabled bool) Option { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative // operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])` // diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index b5ec73ec64..e222c19cf9 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -17,7 +17,10 @@ package parser import ( +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "regexp" "strconv" @@ -41,7 +44,10 @@ type Parser struct { // NewParser builds and returns a new Parser using the provided options. func NewParser(opts ...Option) (*Parser, error) { p := &Parser{} +<<<<<<< HEAD p.enableHiddenAccumulatorName = true +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, opt := range opts { if err := opt(&p.options); err != nil { return nil, err @@ -90,11 +96,15 @@ func mustNewParser(opts ...Option) *Parser { // Parse parses the expression represented by source and returns the result. func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { errs := common.NewErrors(source) +<<<<<<< HEAD accu := AccumulatorName if p.enableHiddenAccumulatorName { accu = HiddenAccumulatorName } fac := ast.NewExprFactoryWithAccumulator(accu) +======= + fac := ast.NewExprFactory() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) impl := parser{ errors: &parseErrors{errs}, exprFactory: fac, @@ -107,7 +117,10 @@ func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { populateMacroCalls: p.populateMacroCalls, enableOptionalSyntax: p.enableOptionalSyntax, enableVariadicOperatorASTs: p.enableVariadicOperatorASTs, +<<<<<<< HEAD enableIdentEscapeSyntax: p.enableIdentEscapeSyntax, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } buf, ok := source.(runes.Buffer) if !ok { @@ -150,6 +163,7 @@ var reservedIds = map[string]struct{}{ "while": {}, } +<<<<<<< HEAD func unescapeIdent(in string) (string, error) { if len(in) <= 2 { return "", errors.New("invalid escaped identifier: underflow") @@ -171,6 +185,8 @@ func (p *parser) normalizeIdent(ctx gen.IEscapeIdentContext) (string, error) { return "", errors.New("unsupported ident kind") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Parse converts a source input a parsed expression. // This function calls ParseWithMacros with AllMacros. // @@ -324,7 +340,10 @@ type parser struct { populateMacroCalls bool enableOptionalSyntax bool enableVariadicOperatorASTs bool +<<<<<<< HEAD enableIdentEscapeSyntax bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var _ gen.CELVisitor = (*parser)(nil) @@ -398,10 +417,15 @@ func (p *parser) Visit(tree antlr.ParseTree) any { return out case *gen.LogicalNotContext: return p.VisitLogicalNot(tree) +<<<<<<< HEAD case *gen.IdentContext: return p.VisitIdent(tree) case *gen.GlobalCallContext: return p.VisitGlobalCall(tree) +======= + case *gen.IdentOrGlobalCallContext: + return p.VisitIdentOrGlobalCall(tree) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *gen.SelectContext: p.checkAndIncrementRecursionDepth() out := p.VisitSelect(tree) @@ -569,10 +593,14 @@ func (p *parser) VisitSelect(ctx *gen.SelectContext) any { if ctx.GetId() == nil || ctx.GetOp() == nil { return p.helper.newExpr(ctx) } +<<<<<<< HEAD id, err := p.normalizeIdent(ctx.GetId()) if err != nil { p.reportError(ctx.GetId(), "%v", err) } +======= + id := ctx.GetId().GetText() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctx.GetOpt() != nil { if !p.enableOptionalSyntax { return p.reportError(ctx.GetOp(), "unsupported syntax '.?'") @@ -656,6 +684,7 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext p.reportError(optField, "unsupported syntax '?'") continue } +<<<<<<< HEAD // The field may be empty due to a prior error. fieldName, err := p.normalizeIdent(optField.EscapeIdent()) @@ -664,6 +693,14 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext continue } +======= + // The field may be empty due to a prior error. + id := optField.IDENTIFIER() + if id == nil { + return []ast.EntryExpr{} + } + fieldName := id.GetText() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) value := p.Visit(vals[i]).(ast.Expr) field := p.helper.newObjectField(initID, fieldName, value, optional) result[i] = field @@ -671,8 +708,13 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext return result } +<<<<<<< HEAD // Visit a parse tree produced by CELParser#Ident. func (p *parser) VisitIdent(ctx *gen.IdentContext) any { +======= +// Visit a parse tree produced by CELParser#IdentOrGlobalCall. +func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) identName := "" if ctx.GetLeadingDot() != nil { identName = "." @@ -687,6 +729,7 @@ func (p *parser) VisitIdent(ctx *gen.IdentContext) any { return p.reportError(ctx, "reserved identifier: %s", id) } identName += id +<<<<<<< HEAD return p.helper.newIdent(ctx.GetId(), identName) } @@ -711,6 +754,15 @@ func (p *parser) VisitGlobalCall(ctx *gen.GlobalCallContext) any { } +======= + if ctx.GetOp() != nil { + opID := p.helper.id(ctx.GetOp()) + return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...) + } + return p.helper.newIdent(ctx.GetId(), identName) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Visit a parse tree produced by CELParser#CreateList. func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any { listID := p.helper.id(ctx.GetOp()) @@ -809,7 +861,11 @@ func (p *parser) VisitDouble(ctx *gen.DoubleContext) any { // Visit a parse tree produced by CELParser#String. func (p *parser) VisitString(ctx *gen.StringContext) any { +<<<<<<< HEAD s := p.unquote(ctx, ctx.GetTok().GetText(), false) +======= + s := p.unquote(ctx, ctx.GetText(), false) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return p.helper.newLiteralString(ctx, s) } @@ -909,8 +965,12 @@ func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { // ANTLR Parse listener implementations func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { +<<<<<<< HEAD offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) l := p.helper.getLocationByOffset(offset) +======= + l := p.helper.source.NewLocation(line, column) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error // messages out of ANTLR to prevent future breaking changes related to error message content. @@ -970,12 +1030,19 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg expr, err := macro.Expander()(eh, target, args) // An error indicates that the macro was matched, but the arguments were not well-formed. if err != nil { +<<<<<<< HEAD loc := err.Location if loc == nil { loc = p.helper.getLocation(exprID) } p.helper.deleteID(exprID) return p.reportError(loc, "%s", err.Message), true +======= + if err.Location != nil { + return p.reportError(err.Location, err.Message), true + } + return p.reportError(p.helper.getLocation(exprID), err.Message), true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A nil value from the macro indicates that the macro implementation decided that // an expansion should not be performed. @@ -985,7 +1052,10 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg if p.populateMacroCalls { p.helper.addMacroCall(expr.ID(), function, target, args...) } +<<<<<<< HEAD p.helper.deleteID(exprID) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return expr, true } diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go index 43cc9b901b..6dd6aac752 100644 --- a/vendor/github.com/google/cel-go/parser/unescape.go +++ b/vendor/github.com/google/cel-go/parser/unescape.go @@ -15,7 +15,11 @@ package parser import ( +<<<<<<< HEAD "errors" +======= + "fmt" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "unicode/utf8" ) @@ -30,7 +34,11 @@ func unescape(value string, isBytes bool) (string, error) { // Nothing to unescape / decode. if n < 2 { +<<<<<<< HEAD return value, errors.New("unable to unescape string") +======= + return value, fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Raw string preceded by the 'r|R' prefix. @@ -43,7 +51,11 @@ func unescape(value string, isBytes bool) (string, error) { // Quoted string of some form, must have same first and last char. if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') { +<<<<<<< HEAD return value, errors.New("unable to unescape string") +======= + return value, fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Normalize the multi-line CEL string representation to a standard @@ -51,12 +63,20 @@ func unescape(value string, isBytes bool) (string, error) { if n >= 6 { if strings.HasPrefix(value, "'''") { if !strings.HasSuffix(value, "'''") { +<<<<<<< HEAD return value, errors.New("unable to unescape string") +======= + return value, fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } value = "\"" + value[3:n-3] + "\"" } else if strings.HasPrefix(value, `"""`) { if !strings.HasSuffix(value, `"""`) { +<<<<<<< HEAD return value, errors.New("unable to unescape string") +======= + return value, fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } value = "\"" + value[3:n-3] + "\"" } @@ -90,10 +110,17 @@ func unescape(value string, isBytes bool) (string, error) { // unescapeChar takes a string input and returns the following info: // +<<<<<<< HEAD // value - the escaped unicode rune at the front of the string. // encode - the value should be unicode-encoded // tail - the remainder of the input string. // err - error value, if the character could not be unescaped. +======= +// value - the escaped unicode rune at the front of the string. +// encode - the value should be unicode-encoded +// tail - the remainder of the input string. +// err - error value, if the character could not be unescaped. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // When encode is true the return value may still fit within a single byte, // but unicode encoding is attempted which is more expensive than when the @@ -113,7 +140,11 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, // 2. Last character is the start of an escape sequence. if len(s) <= 1 { +<<<<<<< HEAD err = errors.New("unable to unescape string, found '\\' as last character") +======= + err = fmt.Errorf("unable to unescape string, found '\\' as last character") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -157,32 +188,53 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, case 'u': n = 4 if isBytes { +<<<<<<< HEAD err = errors.New("unable to unescape string") +======= + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } case 'U': n = 8 if isBytes { +<<<<<<< HEAD err = errors.New("unable to unescape string") +======= + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } var v rune if len(s) < n { +<<<<<<< HEAD err = errors.New("unable to unescape string") +======= + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } for j := 0; j < n; j++ { x, ok := unhex(s[j]) if !ok { +<<<<<<< HEAD err = errors.New("unable to unescape string") +======= + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } v = v<<4 | x } s = s[n:] +<<<<<<< HEAD if !isBytes && !utf8.ValidRune(v) { err = errors.New("invalid unicode code point") +======= + if !isBytes && v > utf8.MaxRune { + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } value = v @@ -190,20 +242,33 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7] case '0', '1', '2', '3': if len(s) < 2 { +<<<<<<< HEAD err = errors.New("unable to unescape octal sequence in string") +======= + err = fmt.Errorf("unable to unescape octal sequence in string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } v := rune(c - '0') for j := 0; j < 2; j++ { x := s[j] if x < '0' || x > '7' { +<<<<<<< HEAD err = errors.New("unable to unescape octal sequence in string") +======= + err = fmt.Errorf("unable to unescape octal sequence in string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } v = v*8 + rune(x-'0') } +<<<<<<< HEAD if !isBytes && !utf8.ValidRune(v) { err = errors.New("invalid unicode code point") +======= + if !isBytes && v > utf8.MaxRune { + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } value = v @@ -212,7 +277,11 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, // Unknown escape sequence. default: +<<<<<<< HEAD err = errors.New("unable to unescape string") +======= + err = fmt.Errorf("unable to unescape string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tail = s diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go index ffd5b18e47..8bcc30c44c 100644 --- a/vendor/github.com/google/cel-go/parser/unparser.go +++ b/vendor/github.com/google/cel-go/parser/unparser.go @@ -17,14 +17,20 @@ package parser import ( "errors" "fmt" +<<<<<<< HEAD "regexp" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "strings" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" +<<<<<<< HEAD "github.com/google/cel-go/common/types/ref" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Unparse takes an input expression and source position information and generates a human-readable @@ -67,6 +73,7 @@ func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (strin return un.str.String(), nil } +<<<<<<< HEAD var identifierPartPattern *regexp.Regexp = regexp.MustCompile(`^[A-Za-z_][0-9A-Za-z_]*$`) func maybeQuoteField(field string) string { @@ -76,6 +83,8 @@ func maybeQuoteField(field string) string { return field } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // unparser visits an expression to reconstruct a human-readable string from an AST. type unparser struct { str strings.Builder @@ -274,6 +283,7 @@ func (un *unparser) visitCallUnary(expr ast.Expr) error { return un.visitMaybeNested(args[0], nested) } +<<<<<<< HEAD func (un *unparser) visitConstVal(val ref.Val) error { optional := false if optVal, ok := val.(*types.Optional); ok { @@ -285,6 +295,10 @@ func (un *unparser) visitConstVal(val ref.Val) error { un.str.WriteString("optional.of(") val = optVal.GetValue() } +======= +func (un *unparser) visitConst(expr ast.Expr) error { + val := expr.AsLiteral() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch val := val.(type) { case types.Bool: un.str.WriteString(strconv.FormatBool(bool(val))) @@ -313,6 +327,7 @@ func (un *unparser) visitConstVal(val ref.Val) error { ui := strconv.FormatUint(uint64(val), 10) un.str.WriteString(ui) un.str.WriteString("u") +<<<<<<< HEAD case *types.Optional: if err := un.visitConstVal(val); err != nil { return err @@ -328,6 +343,9 @@ func (un *unparser) visitConstVal(val ref.Val) error { func (un *unparser) visitConst(expr ast.Expr) error { val := expr.AsLiteral() if err := un.visitConstVal(val); err != nil { +======= + default: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("unsupported constant: %v", expr) } return nil @@ -386,7 +404,11 @@ func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op stri return err } un.str.WriteString(op) +<<<<<<< HEAD un.str.WriteString(maybeQuoteField(field)) +======= + un.str.WriteString(field) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if testOnly { un.str.WriteString(")") } @@ -404,7 +426,11 @@ func (un *unparser) visitStructMsg(expr ast.Expr) error { if field.IsOptional() { un.str.WriteString("?") } +<<<<<<< HEAD un.str.WriteString(maybeQuoteField(f)) +======= + un.str.WriteString(f) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) un.str.WriteString(": ") v := field.Value() err := un.visit(v) diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS index ad514665ef..c12bb6e120 100644 --- a/vendor/github.com/google/certificate-transparency-go/AUTHORS +++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -9,6 +9,10 @@ # Please keep the list sorted. Alex Cohn +<<<<<<< HEAD +======= +Comodo CA Limited +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Ed Maste Elisha Silas Fiaz Hossain @@ -23,7 +27,10 @@ Nicholas Galbreath Oliver Weidner PrimeKey Solutions AB Ruslan Kovalov +<<<<<<< HEAD Sectigo Limited +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Venafi, Inc. Vladimir Rutsky Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md index 5cb7b7d433..f73183ba71 100644 --- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -2,6 +2,7 @@ ## HEAD +<<<<<<< HEAD ## v1.3.1 * Add AllLogListSignatureURL by @AlexLaroche in https://github.com/google/certificate-transparency-go/pull/1634 @@ -176,6 +177,8 @@ A new flag `http_idle_timeout` is added to set the HTTP server's idle timeout va * Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1603 * Bump distroless/base-debian12 from `6ae5fe6` to `8fe31fb` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1604 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## v1.2.1 ### Fixes diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS index 3a98a7e1ef..3c2475353a 100644 --- a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -52,7 +52,11 @@ Paul Lietar Pavel Kalinnikov Pierre Phaneuf Rob Percival +<<<<<<< HEAD Rob Stradling +======= +Rob Stradling +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Roger Ng Roland Shoemaker Ruslan Kovalov diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md index b528c55755..5e9003adf6 100644 --- a/vendor/github.com/google/certificate-transparency-go/README.md +++ b/vendor/github.com/google/certificate-transparency-go/README.md @@ -6,7 +6,11 @@ This repository holds Go code related to [Certificate Transparency](https://www.certificate-transparency.org/) (CT). The +<<<<<<< HEAD repository requires Go version 1.22. +======= +repository requires Go version 1.21. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - [Repository Structure](#repository-structure) - [Trillian CT Personality](#trillian-ct-personality) diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go index 6d47570076..ee385fa08a 100644 --- a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go +++ b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go @@ -51,7 +51,11 @@ func Error(err error) error { if perr != nil { return err // If the URL can't be parsed, just return the original error. } +<<<<<<< HEAD uerr.URL = URL(u) // Update the URL to the redacted URL. +======= + uerr.URL = URL(u).String() // Update the URL to the redacted URL. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return uerr } @@ -73,7 +77,11 @@ var paramAllowlist = map[string]struct{}{ } // URL redacts potentially sensitive query parameter values from the URL's query string. +<<<<<<< HEAD func URL(u *url.URL) string { +======= +func URL(u *url.URL) *url.URL { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) qs := u.Query() for k, v := range qs { for i := range v { @@ -85,5 +93,9 @@ func URL(u *url.URL) string { } r := *u r.RawQuery = qs.Encode() +<<<<<<< HEAD return r.Redacted() +======= + return &r +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go index 6e8814d808..0127a31344 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -84,7 +84,11 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { } // Resolve implements Keychain. +<<<<<<< HEAD func (dk *defaultKeychain) ResolveContext(_ context.Context, target Resource) (Authenticator, error) { +======= +func (dk *defaultKeychain) ResolveContext(ctx context.Context, target Resource) (Authenticator, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dk.mu.Lock() defer dk.mu.Unlock() @@ -204,7 +208,11 @@ func (w wrapper) Resolve(r Resource) (Authenticator, error) { return w.ResolveContext(context.Background(), r) } +<<<<<<< HEAD func (w wrapper) ResolveContext(_ context.Context, r Resource) (Authenticator, error) { +======= +func (w wrapper) ResolveContext(ctx context.Context, r Resource) (Authenticator, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) u, p, err := w.h.Get(r.RegistryStr()) if err != nil { return Anonymous, nil diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go index 0a04867723..305b9bee98 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -44,7 +44,11 @@ func ParseReference(s string, opts ...Option) (Reference, error) { if d, err := NewDigest(s, opts...); err == nil { return d, nil } +<<<<<<< HEAD return nil, newErrBadName("could not parse reference: %s", s) +======= + return nil, newErrBadName("could not parse reference: " + s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type stringConst string diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index 4207740c35..3e26ac702b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -21,7 +21,10 @@ import ( "errors" "fmt" "io" +<<<<<<< HEAD "maps" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "path/filepath" "strings" "time" @@ -166,16 +169,27 @@ func Annotations(f partial.WithRawManifest, anns map[string]string) partial.With if img, ok := f.(v1.Image); ok { return &image{ base: img, +<<<<<<< HEAD annotations: maps.Clone(anns), +======= + annotations: anns, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } if idx, ok := f.(v1.ImageIndex); ok { return &index{ base: idx, +<<<<<<< HEAD annotations: maps.Clone(anns), } } return arbitraryRawManifest{a: f, anns: maps.Clone(anns)} +======= + annotations: anns, + } + } + return arbitraryRawManifest{a: f, anns: anns} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type arbitraryRawManifest struct { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go index 4bc6f70a85..6dadba8090 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go @@ -61,7 +61,11 @@ func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, } defer resp.Body.Close() +<<<<<<< HEAD if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest, http.StatusNotAcceptable); err != nil { +======= + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index ea652d4ae8..5ebf6c8d4f 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -24,10 +24,15 @@ import ( "net/http" "net/url" "strings" +<<<<<<< HEAD "sync" authchallenge "github.com/docker/distribution/registry/client/auth/challenge" +======= + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/logs" @@ -100,7 +105,10 @@ func fromChallenge(reg name.Registry, auth authn.Authenticator, t http.RoundTrip } type bearerTransport struct { +<<<<<<< HEAD mx sync.RWMutex +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Wrapped by bearerTransport. inner http.RoundTripper // Basic credentials that we exchange for bearer tokens. @@ -142,10 +150,14 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // the registry with which we are interacting. // In case of redirect http.Client can use an empty Host, check URL too. if matchesHost(bt.registry.RegistryStr(), in, bt.scheme) { +<<<<<<< HEAD bt.mx.RLock() localToken := bt.bearer.RegistryToken bt.mx.RUnlock() hdr := fmt.Sprintf("Bearer %s", localToken) +======= + hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) in.Header.Set("Authorization", hdr) } return bt.inner.RoundTrip(in) @@ -162,12 +174,19 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { res.Body.Close() newScopes := []string{} +<<<<<<< HEAD bt.mx.Lock() got := stringSet(bt.scopes) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, wac := range challenges { // TODO(jonjohnsonjr): Should we also update "realm" or "service"? if want, ok := wac.Parameters["scope"]; ok { // Add any scopes that we don't already request. +<<<<<<< HEAD +======= + got := stringSet(bt.scopes) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := got[want]; !ok { newScopes = append(newScopes, want) } @@ -179,7 +198,10 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // otherwise the registry might just ignore it :/ newScopes = append(newScopes, bt.scopes...) bt.scopes = newScopes +<<<<<<< HEAD bt.mx.Unlock() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. @@ -204,9 +226,13 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { } if auth.RegistryToken != "" { +<<<<<<< HEAD bt.mx.Lock() bt.bearer.RegistryToken = auth.RegistryToken bt.mx.Unlock() +======= + bt.bearer.RegistryToken = auth.RegistryToken +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -222,9 +248,13 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { // Find a token to turn into a Bearer authenticator if response.Token != "" { +<<<<<<< HEAD bt.mx.Lock() bt.bearer.RegistryToken = response.Token bt.mx.Unlock() +======= + bt.bearer.RegistryToken = response.Token +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // If we obtained a refresh token from the oauth flow, use that for refresh() now. @@ -318,9 +348,13 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { } v := url.Values{} +<<<<<<< HEAD bt.mx.RLock() v.Set("scope", strings.Join(bt.scopes, " ")) bt.mx.RUnlock() +======= + v.Set("scope", strings.Join(bt.scopes, " ")) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if bt.service != "" { v.Set("service", bt.service) } @@ -376,9 +410,13 @@ func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { client := http.Client{Transport: b} v := u.Query() +<<<<<<< HEAD bt.mx.RLock() v["scope"] = bt.scopes bt.mx.RUnlock() +======= + v["scope"] = bt.scopes +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v.Set("service", bt.service) u.RawQuery = v.Encode() diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index f47c77a2ba..5c2b890a5d 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -297,8 +297,11 @@ const ( ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 // Internal use only. ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 +<<<<<<< HEAD // Internal use only. ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 ValidatePeerCertificateChainReq_VerificationMode = 6 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -310,7 +313,10 @@ var ( 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", +<<<<<<< HEAD 6: "RESERVED_CUSTOM_VERIFICATION_MODE_6", +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ "UNSPECIFIED": 0, @@ -319,7 +325,10 @@ var ( "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, +<<<<<<< HEAD "RESERVED_CUSTOM_VERIFICATION_MODE_6": 6, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ) @@ -1982,8 +1991,13 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, +<<<<<<< HEAD 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9d, 0x06, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, +======= + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, + 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, @@ -2017,7 +2031,11 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, +<<<<<<< HEAD 0x69, 0x63, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, +======= + 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, @@ -2029,6 +2047,7 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, +<<<<<<< HEAD 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x36, 0x10, 0x06, @@ -2166,6 +2185,143 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, + 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, + 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, + 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, + 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, + 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, + 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, + 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, + 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 0cc78547e9..9430b5b360 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -64,13 +64,21 @@ type s2av2TransportCreds struct { localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake +<<<<<<< HEAD getS2AStream stream.GetS2AStream +======= + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) serverAuthorizationPolicy []byte } // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. +<<<<<<< HEAD func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream stream.GetS2AStream, serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +======= +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +109,11 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. +<<<<<<< HEAD func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream stream.GetS2AStream) (credentials.TransportCredentials, error) { +======= +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -306,9 +318,14 @@ func NewClientTLSConfig( tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, +<<<<<<< HEAD serverAuthorizationPolicy []byte, getStream stream.GetS2AStream) (*tls.Config, error) { s2AStream, err := createStream(ctx, s2av2Address, transportCreds, getStream) +======= + serverAuthorizationPolicy []byte) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -351,7 +368,11 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } +<<<<<<< HEAD func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream stream.GetS2AStream) (stream.S2AStream, error) { +======= +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 6ca75f5608..54898f4ad6 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -75,7 +75,11 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) } +<<<<<<< HEAD // Extract TLS configuration from SessionResp. +======= + // Extract TLS configiguration from SessionResp. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() var cert tls.Certificate diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index c52fccddf8..139bb2d025 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,7 +35,10 @@ import ( "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" "github.com/google/s2a-go/retry" +<<<<<<< HEAD "github.com/google/s2a-go/stream" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/proto" @@ -331,7 +334,10 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, +<<<<<<< HEAD getStream: opts.getS2AStream, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } return &s2aTLSClientConfigFactory{ @@ -340,7 +346,10 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, +<<<<<<< HEAD getStream: opts.getS2AStream, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -350,7 +359,10 @@ type s2aTLSClientConfigFactory struct { tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte +<<<<<<< HEAD getStream stream.GetS2AStream +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (f *s2aTLSClientConfigFactory) Build( @@ -359,7 +371,11 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } +<<<<<<< HEAD return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy, f.getStream) +======= + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -374,8 +390,11 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 case ReservedCustomVerificationMode5: return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 +<<<<<<< HEAD case ReservedCustomVerificationMode6: return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index b7a277f9e3..c5c35e935b 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -19,6 +19,10 @@ package s2a import ( +<<<<<<< HEAD +======= + "context" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/tls" "errors" "sync" @@ -27,7 +31,11 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" +<<<<<<< HEAD s2av1pb "github.com/google/s2a-go/internal/proto/common_go_proto" +======= + s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) @@ -35,6 +43,7 @@ import ( type Identity interface { // Name returns the name of the identity. Name() string +<<<<<<< HEAD Attributes() map[string]string } @@ -46,6 +55,8 @@ func (u *UnspecifiedID) Name() string { return "" } func (u *UnspecifiedID) Attributes() map[string]string { return u.Attr +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type spiffeID struct { @@ -54,10 +65,17 @@ type spiffeID struct { func (s *spiffeID) Name() string { return s.spiffeID } +<<<<<<< HEAD func (spiffeID) Attributes() map[string]string { return nil } // NewSpiffeID creates a SPIFFE ID from id. func NewSpiffeID(id string) Identity { return &spiffeID{spiffeID: id} } +======= +// NewSpiffeID creates a SPIFFE ID from id. +func NewSpiffeID(id string) Identity { + return &spiffeID{spiffeID: id} +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type hostname struct { hostname string @@ -65,10 +83,17 @@ type hostname struct { func (h *hostname) Name() string { return h.hostname } +<<<<<<< HEAD func (hostname) Attributes() map[string]string { return nil } // NewHostname creates a hostname from name. func NewHostname(name string) Identity { return &hostname{hostname: name} } +======= +// NewHostname creates a hostname from name. +func NewHostname(name string) Identity { + return &hostname{hostname: name} +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type uid struct { uid string @@ -76,10 +101,17 @@ type uid struct { func (h *uid) Name() string { return h.uid } +<<<<<<< HEAD func (uid) Attributes() map[string]string { return nil } // NewUID creates a UID from name. func NewUID(name string) Identity { return &uid{uid: name} } +======= +// NewUID creates a UID from name. +func NewUID(name string) Identity { + return &uid{uid: name} +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // VerificationModeType specifies the mode that S2A must use to verify the peer // certificate chain. @@ -93,7 +125,10 @@ const ( ReservedCustomVerificationMode3 ReservedCustomVerificationMode4 ReservedCustomVerificationMode5 +<<<<<<< HEAD ReservedCustomVerificationMode6 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ClientOptions contains the client-side options used to establish a secure @@ -148,7 +183,11 @@ type ClientOptions struct { FallbackOpts *FallbackOptions // Generates an S2AStream interface for talking to the S2A server. +<<<<<<< HEAD getS2AStream stream.GetS2AStream +======= + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Serialized user specified policy for server authorization. serverAuthorizationPolicy []byte @@ -202,7 +241,11 @@ type ServerOptions struct { VerificationMode VerificationModeType // Generates an S2AStream interface for talking to the S2A server. +<<<<<<< HEAD getS2AStream stream.GetS2AStream +======= + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // DefaultServerOptions returns the default server options. @@ -213,12 +256,17 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } +<<<<<<< HEAD func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) { +======= +func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if identity == nil { return nil, nil } switch id := identity.(type) { case *spiffeID: +<<<<<<< HEAD return &s2av1pb.Identity{ IdentityOneof: &s2av1pb.Identity_SpiffeId{SpiffeId: id.Name()}, Attributes: id.Attributes(), @@ -237,6 +285,13 @@ func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) { return &s2av1pb.Identity{ Attributes: id.Attributes(), }, nil +======= + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.New("unrecognized identity type") } @@ -248,6 +303,7 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { } switch id := identity.(type) { case *spiffeID: +<<<<<<< HEAD return &s2apb.Identity{ IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}, Attributes: id.Attributes(), @@ -266,6 +322,13 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { return &s2apb.Identity{ Attributes: id.Attributes(), }, nil +======= + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.New("unrecognized identity type") } diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go index ae2d5eb4c1..7f1d232dd7 100644 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -20,8 +20,11 @@ package stream import ( +<<<<<<< HEAD "context" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -34,6 +37,9 @@ type S2AStream interface { // Closes the channel to the S2A server. CloseSend() error } +<<<<<<< HEAD // GetS2AStream type is for generating an S2AStream interface for talking to the S2A server. type GetS2AStream func(ctx context.Context, s2av2Address string, opts ...string) (S2AStream, error) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index a8c082dd61..6c74a69143 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,7 @@ { +<<<<<<< HEAD "v2": "2.14.1" +======= + "v2": "2.14.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 17cced15ec..45f6146368 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,6 @@ # Changelog +<<<<<<< HEAD ## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19) @@ -12,6 +13,8 @@ * fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 2b284a24a4..d0b40e56b2 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,8 @@ package internal // Version is the current tagged release of the library. +<<<<<<< HEAD const Version = "2.14.1" +======= +const Version = "2.14.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go index e47ab32acc..fb696a3220 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go @@ -44,7 +44,11 @@ import ( // New returns a new [slog.Logger] default logger, or the provided logger if // non-nil. The returned logger will be a no-op logger unless the environment +<<<<<<< HEAD // variable GOOGLE_SDK_GO_LOGGING_LEVEL is set. +======= +// variable GOOGLE_SDK_DEBUG_LOGGING is set. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func New(l *slog.Logger) *slog.Logger { if l != nil { return l diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index 2e50082ad1..ee799210ca 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,11 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by +<<<<<<< HEAD // 'sep' into an int64 slice. +======= +// 'sep' into a int64 slice. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +122,11 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by +<<<<<<< HEAD // 'sep' into an int32 slice. +======= +// 'sep' into a int32 slice. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +198,11 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +<<<<<<< HEAD // base64 without padding, are separated by 'sep' into a slice of byte slices. +======= +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 41cd4f5030..355de3293a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +<<<<<<< HEAD // HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { st := mux.streamErrorHandler(ctx, err) @@ -96,6 +97,8 @@ func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 2fcd7af3c4..b03924697e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,11 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } +<<<<<<< HEAD // fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask +======= +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 0fa9076566..fa156307fd 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -64,6 +64,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if !wroteHeader { +<<<<<<< HEAD var contentType string if sct, ok := marshaler.(StreamContentType); ok { contentType = sct.StreamContentType(respRw) @@ -71,6 +72,9 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal contentType = marshaler.ContentType(respRw) } w.Header().Set("Content-Type", contentType) +======= + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var buf []byte @@ -200,7 +204,11 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } +<<<<<<< HEAD if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { +======= + if _, err = w.Write(buf); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) grpclog.Errorf("Failed to write response: %v", err) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index b1dfc37af9..0cd66b13bc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,6 +48,7 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } +<<<<<<< HEAD // StreamContentType defines the streaming content type. type StreamContentType interface { @@ -56,3 +57,5 @@ type StreamContentType interface { // in the case of a streamed response. StreamContentType(v interface{}) string } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 07c28112c8..f8c366e023 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,13 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +<<<<<<< HEAD // with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler // with an "application/json" Content-Type. +======= +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index f710036b35..5a6897fd5d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,11 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer +<<<<<<< HEAD // and returns a pointer to an int64 whose value is same as the parsed integer. +======= +// and returns a pointer to a int64 whose value is same as the parsed integer. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +54,11 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer +<<<<<<< HEAD // and returns a pointer to an int32 whose value is same as the parsed integer. +======= +// and returns a pointer to a int32 whose value is same as the parsed integer. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index 0a1ca7e06f..8fa2f66bef 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,11 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set +<<<<<<< HEAD if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { +======= + if of := fieldDescriptor.ContainingOneof(); of != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -291,11 +295,15 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } +<<<<<<< HEAD timestamp := timestamppb.New(t) if ok := timestamp.IsValid(); !ok { return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) } msg = timestamp +======= + msg = timestamppb.New(t) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index 38ca39cc53..2844a697aa 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,10 @@ package utilities +<<<<<<< HEAD // OpCode is an opcode of compiled path patterns. +======= +// An OpCode is a opcode of compiled path patterns. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index 66aa5f2dcc..120cdae689 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,11 @@ import ( "strings" ) +<<<<<<< HEAD // flagInterface is a cut down interface to `flag` +======= +// flagInterface is an cut down interface to `flag` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/in-toto/archivista/LICENSE b/vendor/github.com/in-toto/archivista/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/in-toto/archivista/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/in-toto/archivista/pkg/api/client.go b/vendor/github.com/in-toto/archivista/pkg/api/client.go new file mode 100644 index 0000000000..bab00e2058 --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/client.go @@ -0,0 +1,126 @@ +// client.go +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + + "github.com/in-toto/go-witness/dsse" +) + +// Client wraps HTTP calls to an Archivista service. +type Client struct { + baseURL string + httpClient *http.Client +} + +// NewClient creates a new Archivista API client. +func NewClient(baseURL string) (*Client, error) { + // Validate baseURL. + _, err := url.ParseRequestURI(baseURL) + if err != nil { + return nil, err + } + return &Client{ + baseURL: baseURL, + httpClient: &http.Client{}, + }, nil +} + +// UploadResponse represents the response from Archivista after a successful upload. + +// UploadDSSE uploads a DSSE envelope to Archivista. +// Note that this method now accepts a dsse.Envelope rather than a pointer to an UploadRequest. +func (c *Client) UploadDSSE(ctx context.Context, envelope dsse.Envelope) (*UploadResponse, error) { + uploadURL, err := url.JoinPath(c.baseURL, "upload") + if err != nil { + return nil, err + } + + bodyBytes, err := json.Marshal(envelope) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadURL, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, errors.New(string(respBytes)) + } + + var uploadResp UploadResponse + if err := json.Unmarshal(respBytes, &uploadResp); err != nil { + return nil, err + } + return &uploadResp, nil +} + +// Artifact represents a retrieved artifact from Archivista. +type Artifact struct { + Payload []byte `json:"payload"` + Signature []byte `json:"signature"` +} + +// GetArtifact retrieves a DSSE envelope by key from Archivista, +// decodes it as a dsse.Envelope, and converts it into an Artifact. +// It uses the envelope's payload and (if available) the first signature. +func (c *Client) GetArtifact(ctx context.Context, key string) (*Artifact, error) { + downloadURL, err := url.JoinPath(c.baseURL, "download", key) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + respBytes, _ := io.ReadAll(resp.Body) + return nil, errors.New(string(respBytes)) + } + + // Decode the response into a DSSE envelope. + var envelope dsse.Envelope + if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil { + return nil, err + } + + // Ensure that at least one signature exists. + if len(envelope.Signatures) == 0 { + return nil, errors.New("no signatures in DSSE envelope") + } + + // Create an Artifact using the envelope's payload and the first signature. + artifact := &Artifact{ + Payload: envelope.Payload, + Signature: envelope.Signatures[0].Signature, + } + return artifact, nil +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/download.go b/vendor/github.com/in-toto/archivista/pkg/api/download.go new file mode 100644 index 0000000000..12db87f07f --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/download.go @@ -0,0 +1,128 @@ +<<<<<<< HEAD +// Copyright 2023 The Witness Contributors +======= +// Copyright 2023-2024 The Witness Contributors +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + + "github.com/in-toto/go-witness/dsse" +) + +<<<<<<< HEAD +func Download(ctx context.Context, baseUrl string, gitoid string) (dsse.Envelope, error) { + buf := &bytes.Buffer{} + if err := DownloadWithWriter(ctx, baseUrl, gitoid, buf); err != nil { +======= +func DownloadReadCloser(ctx context.Context, baseURL string, gitoid string) (io.ReadCloser, error) { + return DownloadReadCloserWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid) +} + +func DownloadReadCloserWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string) (io.ReadCloser, error) { + downloadURL, err := url.JoinPath(baseURL, "download", gitoid) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + // NOTE: attempt to read body on error and + // only close if an error occurs + defer resp.Body.Close() + errMsg, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, errors.New(string(errMsg)) + } + return resp.Body, nil +} + +func Download(ctx context.Context, baseURL string, gitoid string) (dsse.Envelope, error) { + buf := &bytes.Buffer{} + if err := DownloadWithWriter(ctx, baseURL, gitoid, buf); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + return dsse.Envelope{}, err + } + + env := dsse.Envelope{} + dec := json.NewDecoder(buf) + if err := dec.Decode(&env); err != nil { + return env, err + } + + return env, nil +} + +<<<<<<< HEAD +func DownloadWithWriter(ctx context.Context, baseUrl, gitoid string, dst io.Writer) error { + downloadUrl, err := url.JoinPath(baseUrl, "download", gitoid) +======= +func DownloadWithWriter(ctx context.Context, baseURL string, gitoid string, dst io.Writer) error { + return DownloadWithWriterWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid, dst) +} + +func DownloadWithWriterWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string, dst io.Writer) error { + downloadUrl, err := url.JoinPath(baseURL, "download", gitoid) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + if err != nil { + return err + } + +<<<<<<< HEAD + req, err := http.NewRequestWithContext(ctx, "GET", downloadUrl, nil) +======= + req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadUrl, nil) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + hc := &http.Client{} + resp, err := hc.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + errMsg, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return errors.New(string(errMsg)) + } + + _, err = io.Copy(dst, resp.Body) + return err +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/graphql.go b/vendor/github.com/in-toto/archivista/pkg/api/graphql.go new file mode 100644 index 0000000000..e7023cd6bf --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/graphql.go @@ -0,0 +1,174 @@ +<<<<<<< HEAD +// Copyright 2023 The Witness Contributors +======= +// Copyright 2023-2024 The Witness Contributors +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" +) + +<<<<<<< HEAD +type graphQLError struct { + Message string `json:"message"` +} + +type graphQLResponse[T any] struct { + Data T `json:"data,omitempty"` + Errors []graphQLError `json:"errors,omitempty"` +} + +type graphQLRequestBody[TVars any] struct { + Query string `json:"query"` + Variables TVars `json:"variables,omitempty"` +} + +func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars) (TRes, error) { +======= +const RetrieveSubjectsQuery = `query($gitoid: String!) { + subjects( + where: { + hasStatementWith:{ + hasDsseWith:{ + gitoidSha256: $gitoid + } + } + } + ) { + edges { + node{ + name + subjectDigests{ + algorithm + value + } + } + } + } +}` + +const SearchQuery = `query($algo: String!, $digest: String!) { + dsses( + where: { + hasStatementWith: { + hasSubjectsWith: { + hasSubjectDigestsWith: { + value: $digest, + algorithm: $algo + } + } + } + } + ) { + edges { + node { + gitoidSha256 + statement { + attestationCollections { + name + attestations { + type + } + } + } + } + } + } +}` + +func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars) (TRes, error) { + return GraphQlQueryWithHeaders[TRes, TVars](ctx, baseUrl, query, vars, nil) +} + +func GraphQlQueryWithHeaders[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars, headers map[string]string) (TRes, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + var response TRes + queryUrl, err := url.JoinPath(baseUrl, "query") + if err != nil { + return response, err + } + +<<<<<<< HEAD + requestBody := graphQLRequestBody[TVars]{ +======= + requestBody := GraphQLRequestBodyGeneric[TVars]{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + Query: query, + Variables: vars, + } + + reqBody, err := json.Marshal(requestBody) + if err != nil { + return response, err + } + +<<<<<<< HEAD + req, err := http.NewRequestWithContext(ctx, "POST", queryUrl, bytes.NewReader(reqBody)) +======= + req, err := http.NewRequestWithContext(ctx, http.MethodPost, queryUrl, bytes.NewReader(reqBody)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + if err != nil { + return response, err + } + +<<<<<<< HEAD +======= + for k, v := range headers { + req.Header.Set(k, v) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + req.Header.Set("Content-Type", "application/json") + hc := &http.Client{} + res, err := hc.Do(req) + if err != nil { + return response, err + } + + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + errMsg, err := io.ReadAll(res.Body) + if err != nil { + return response, err + } + + return response, errors.New(string(errMsg)) + } + + dec := json.NewDecoder(res.Body) +<<<<<<< HEAD + gqlRes := graphQLResponse[TRes]{} +======= + gqlRes := GraphQLResponseGeneric[TRes]{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + if err := dec.Decode(&gqlRes); err != nil { + return response, err + } + + if len(gqlRes.Errors) > 0 { + return response, fmt.Errorf("graph ql query failed: %v", gqlRes.Errors) + } + + return gqlRes.Data, nil +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/structs.go b/vendor/github.com/in-toto/archivista/pkg/api/structs.go new file mode 100644 index 0000000000..fcf295a40f --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/structs.go @@ -0,0 +1,90 @@ +// Copyright 2024 The Archivista Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +type GraphQLError struct { + Message string `json:"message"` +} + +type GraphQLResponseGeneric[T any] struct { + Data T `json:"data,omitempty"` + Errors []GraphQLError `json:"errors,omitempty"` +} + +type GraphQLRequestBodyGeneric[TVars any] struct { + Query string `json:"query"` + Variables TVars `json:"variables,omitempty"` +} + +type RetrieveSubjectVars struct { + Gitoid string `json:"gitoid"` +} + +type SearchVars struct { + Algorithm string `json:"algo"` + Digest string `json:"digest"` +} + +type RetrieveSubjectResults struct { + Subjects Subjects `json:"subjects"` +} + +type Subjects struct { + Edges []SubjectEdge `json:"edges"` +} + +type SubjectEdge struct { + Node SubjectNode `json:"node"` +} + +type SubjectNode struct { + Name string `json:"name"` + SubjectDigests []SubjectDigest `json:"subjectDigests"` +} + +type SubjectDigest struct { + Algorithm string `json:"algorithm"` + Value string `json:"value"` +} + +type SearchResults struct { + Dsses DSSES `json:"dsses"` +} + +type DSSES struct { + Edges []SearchEdge `json:"edges"` +} + +type SearchEdge struct { + Node SearchNode `json:"node"` +} + +type SearchNode struct { + GitoidSha256 string `json:"gitoidSha256"` + Statement Statement `json:"statement"` +} + +type Statement struct { + AttestationCollection AttestationCollection `json:"attestationCollections"` +} + +type AttestationCollection struct { + Name string `json:"name"` + Attestations []Attestation `json:"attestations"` +} + +type Attestation struct { + Type string `json:"type"` +} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/upload.go b/vendor/github.com/in-toto/archivista/pkg/api/upload.go new file mode 100644 index 0000000000..83ffca0144 --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/api/upload.go @@ -0,0 +1,115 @@ +<<<<<<< HEAD +// Copyright 2023 The Witness Contributors +======= +// Copyright 2023-2024 The Archivista Contributors +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + + "github.com/in-toto/go-witness/dsse" +) + +type UploadResponse struct { + Gitoid string `json:"gitoid"` +} + +// Deprecated: Use UploadResponse instead. It will be removed in version >= v0.6.0 +type StoreResponse = UploadResponse + +<<<<<<< HEAD +// Deprecated: Use Upload instead. It will be removed in version >= v0.6.0 +func Store(ctx context.Context, baseUrl string, envelope dsse.Envelope) (StoreResponse, error) { + return Upload(ctx, baseUrl, envelope) +} + +func Upload(ctx context.Context, baseUrl string, envelope dsse.Envelope) (StoreResponse, error) { +======= +// Deprecated: Use Store instead. It will be removed in version >= v0.6.0 +func Upload(ctx context.Context, baseURL string, envelope dsse.Envelope) (UploadResponse, error) { + return Store(ctx, baseURL, envelope) +} + +func Store(ctx context.Context, baseURL string, envelope dsse.Envelope) (StoreResponse, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + if err := enc.Encode(envelope); err != nil { + return StoreResponse{}, err + } + +<<<<<<< HEAD + return UploadWithReader(ctx, baseUrl, buf) +} + +// Deprecated: Use UploadWithReader instead. It will be removed in version >= v0.6.0 +func StoreWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreResponse, error) { + return UploadWithReader(ctx, baseUrl, r) +} + +func UploadWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreResponse, error) { + uploadPath, err := url.JoinPath(baseUrl, "upload") +======= + return StoreWithReader(ctx, baseURL, buf) +} + +func StoreWithReader(ctx context.Context, baseURL string, r io.Reader) (StoreResponse, error) { + return StoreWithReaderWithHTTPClient(ctx, &http.Client{}, baseURL, r) +} + +func StoreWithReaderWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, r io.Reader) (StoreResponse, error) { + uploadPath, err := url.JoinPath(baseURL, "upload") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) + if err != nil { + return UploadResponse{}, err + } + + req, err := http.NewRequestWithContext(ctx, "POST", uploadPath, r) + if err != nil { + return UploadResponse{}, err + } + + req.Header.Set("Content-Type", "application/json") + hc := &http.Client{} + resp, err := hc.Do(req) + if err != nil { + return UploadResponse{}, err + } + + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return UploadResponse{}, err + } + + if resp.StatusCode != http.StatusOK { + return UploadResponse{}, errors.New(string(bodyBytes)) + } + + uploadResp := UploadResponse{} + if err := json.Unmarshal(bodyBytes, &uploadResp); err != nil { + return UploadResponse{}, err + } + + return uploadResp, nil +} diff --git a/vendor/github.com/in-toto/archivista/pkg/http-client/client.go b/vendor/github.com/in-toto/archivista/pkg/http-client/client.go new file mode 100644 index 0000000000..22b87c1a9a --- /dev/null +++ b/vendor/github.com/in-toto/archivista/pkg/http-client/client.go @@ -0,0 +1,216 @@ +// Copyright 2024 The Archivista Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpclient + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/in-toto/archivista/pkg/api" + "github.com/in-toto/go-witness/dsse" +) + +type ArchivistaClient struct { + BaseURL string + GraphQLURL string + *http.Client +} + +type HttpClienter interface { + DownloadDSSE(ctx context.Context, gitoid string) (dsse.Envelope, error) + DownloadReadCloser(ctx context.Context, gitoid string) (io.ReadCloser, error) + DownloadWithWriter(ctx context.Context, gitoid string, dst io.Writer) error + Store(ctx context.Context, envelope dsse.Envelope) (api.UploadResponse, error) + StoreWithReader(ctx context.Context, r io.Reader) (api.UploadResponse, error) + GraphQLRetrieveSubjectResults(ctx context.Context, gitoid string) (api.RetrieveSubjectResults, error) + GraphQLRetrieveSearchResults(ctx context.Context, algo string, digest string) (api.SearchResults, error) + GraphQLQueryIface(ctx context.Context, query string, variables interface{}) (*GraphQLResponseInterface, error) + GraphQLQueryToDst(ctx context.Context, query string, variables interface{}, dst interface{}) error + GraphQLQueryReadCloser(ctx context.Context, query string, variables interface{}) (io.ReadCloser, error) +} + +func CreateArchivistaClient(httpClient *http.Client, baseURL string) (*ArchivistaClient, error) { + client := ArchivistaClient{ + BaseURL: baseURL, + Client: http.DefaultClient, + } + if httpClient != nil { + client.Client = httpClient + } + var err error + client.GraphQLURL, err = url.JoinPath(client.BaseURL, "query") + if err != nil { + return nil, err + } + return &client, nil +} + +func (ac *ArchivistaClient) DownloadDSSE(ctx context.Context, gitoid string) (dsse.Envelope, error) { + reader, err := api.DownloadReadCloserWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid) + if err != nil { + return dsse.Envelope{}, err + } + env := dsse.Envelope{} + if err := json.NewDecoder(reader).Decode(&env); err != nil { + return dsse.Envelope{}, err + } + return env, nil +} + +func (ac *ArchivistaClient) DownloadReadCloser(ctx context.Context, gitoid string) (io.ReadCloser, error) { + return api.DownloadReadCloserWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid) +} + +func (ac *ArchivistaClient) DownloadWithWriter(ctx context.Context, gitoid string, dst io.Writer) error { + return api.DownloadWithWriterWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid, dst) +} + +func (ac *ArchivistaClient) Store(ctx context.Context, envelope dsse.Envelope) (api.UploadResponse, error) { + return api.Store(ctx, ac.BaseURL, envelope) +} + +func (ac *ArchivistaClient) StoreWithReader(ctx context.Context, r io.Reader) (api.UploadResponse, error) { + return api.StoreWithReader(ctx, ac.BaseURL, r) +} + +type GraphQLRequestBodyInterface struct { + Query string `json:"query"` + Variables interface{} `json:"variables,omitempty"` +} + +type GraphQLResponseInterface struct { + Data interface{} + Errors []api.GraphQLError `json:"errors,omitempty"` +} + +// GraphQLRetrieveSubjectResults retrieves the subjects for a given gitoid. +func (ac *ArchivistaClient) GraphQLRetrieveSubjectResults( + ctx context.Context, + gitoid string, +) (api.RetrieveSubjectResults, error) { + return api.GraphQlQuery[api.RetrieveSubjectResults]( + ctx, + ac.BaseURL, + api.RetrieveSubjectsQuery, + api.RetrieveSubjectVars{Gitoid: gitoid}, + ) +} + +// GraphQLRetrieveSearchResults retrieves the search results for a given algorithm and digest. +func (ac *ArchivistaClient) GraphQLRetrieveSearchResults( + ctx context.Context, + algo string, + digest string, +) (api.SearchResults, error) { + return api.GraphQlQuery[api.SearchResults]( + ctx, + ac.BaseURL, + api.SearchQuery, + api.SearchVars{Algorithm: algo, Digest: digest}, + ) +} + +// GraphQLQueryIface executes a GraphQL query against the Archivista API and returns the response as an interface. +// +// Parameters: +// - ctx: The context to control the query's lifecycle, such as cancellations or deadlines. +// - query: A string representing the GraphQL query to be executed. +// - variables: A map or struct containing variables to parameterize the query. +// +// Returns: +// - A pointer to a GraphQLResponseInterface containing the query's result or errors. +// - An error if the query execution or response parsing fails. +// +// Example: +// +// response, err := client.GraphQLQueryIface(ctx, query, variables) +// if err != nil { +// log.Fatalf("GraphQL query failed: %v", err) +// } +// fmt.Printf("Response data: %+v\n", response.Data) +func (ac *ArchivistaClient) GraphQLQueryIface( + ctx context.Context, + query string, + variables interface{}, +) (*GraphQLResponseInterface, error) { + reader, err := ac.GraphQLQueryReadCloser(ctx, query, variables) + if err != nil { + return nil, err + } + defer reader.Close() + gqlRes := GraphQLResponseInterface{} + dec := json.NewDecoder(reader) + if err := dec.Decode(&gqlRes); err != nil { + return nil, err + } + if len(gqlRes.Errors) > 0 { + return nil, fmt.Errorf("graph ql query failed: %v", gqlRes.Errors) + } + return &gqlRes, nil +} + +// GraphQLQueryToDst executes a GraphQL query against the Archivista API and unmarshals the response into a destination object. +func (ac *ArchivistaClient) GraphQLQueryToDst(ctx context.Context, query string, variables interface{}, dst interface{}) error { + reader, err := ac.GraphQLQueryReadCloser(ctx, query, variables) + if err != nil { + return err + } + defer reader.Close() + dec := json.NewDecoder(reader) + if err := dec.Decode(&dst); err != nil { + return err + } + return nil +} + +// GraphQLQueryReadCloser executes a GraphQL query against the Archivista API and returns the response as an io.ReadCloser. +func (ac *ArchivistaClient) GraphQLQueryReadCloser( + ctx context.Context, + query string, + variables interface{}, +) (io.ReadCloser, error) { + requestBodyMap := GraphQLRequestBodyInterface{ + Query: query, + Variables: variables, + } + requestBodyJSON, err := json.Marshal(requestBodyMap) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ac.GraphQLURL, bytes.NewReader(requestBodyJSON)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + res, err := ac.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusOK { + defer res.Body.Close() + errMsg, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + return nil, errors.New(string(errMsg)) + } + return res.Body, nil +} diff --git a/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go b/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go index 9903990df2..84eab8182e 100644 --- a/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go +++ b/vendor/github.com/in-toto/attestation/go/predicates/provenance/v1/provenance.pb.go @@ -1,8 +1,8 @@ -// Keep in sync with schema at https://github.com/slsa-framework/slsa/blob/main/docs/spec/v1.0/schema/provenance.proto +// Keep in sync with schema at https://github.com/slsa-framework/slsa/blob/main/docs/provenance/schema/v1/provenance.proto // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/predicates/provenance/v1/provenance.proto @@ -28,18 +28,21 @@ const ( // Proto representation of predicate type https://slsa.dev/provenance/v1 // Validation of all fields is left to the users of this proto. type Provenance struct { - state protoimpl.MessageState `protogen:"open.v1"` - BuildDefinition *BuildDefinition `protobuf:"bytes,1,opt,name=build_definition,json=buildDefinition,proto3" json:"build_definition,omitempty"` - RunDetails *RunDetails `protobuf:"bytes,2,opt,name=run_details,json=runDetails,proto3" json:"run_details,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildDefinition *BuildDefinition `protobuf:"bytes,1,opt,name=build_definition,json=buildDefinition,proto3" json:"build_definition,omitempty"` + RunDetails *RunDetails `protobuf:"bytes,2,opt,name=run_details,json=runDetails,proto3" json:"run_details,omitempty"` } func (x *Provenance) Reset() { *x = Provenance{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Provenance) String() string { @@ -50,7 +53,7 @@ func (*Provenance) ProtoMessage() {} func (x *Provenance) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -80,20 +83,23 @@ func (x *Provenance) GetRunDetails() *RunDetails { } type BuildDefinition struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + BuildType string `protobuf:"bytes,1,opt,name=build_type,json=buildType,proto3" json:"build_type,omitempty"` ExternalParameters *structpb.Struct `protobuf:"bytes,2,opt,name=external_parameters,json=externalParameters,proto3" json:"external_parameters,omitempty"` InternalParameters *structpb.Struct `protobuf:"bytes,3,opt,name=internal_parameters,json=internalParameters,proto3" json:"internal_parameters,omitempty"` ResolvedDependencies []*v1.ResourceDescriptor `protobuf:"bytes,4,rep,name=resolved_dependencies,json=resolvedDependencies,proto3" json:"resolved_dependencies,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *BuildDefinition) Reset() { *x = BuildDefinition{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *BuildDefinition) String() string { @@ -104,7 +110,7 @@ func (*BuildDefinition) ProtoMessage() {} func (x *BuildDefinition) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -148,19 +154,22 @@ func (x *BuildDefinition) GetResolvedDependencies() []*v1.ResourceDescriptor { } type RunDetails struct { - state protoimpl.MessageState `protogen:"open.v1"` - Builder *Builder `protobuf:"bytes,1,opt,name=builder,proto3" json:"builder,omitempty"` - Metadata *BuildMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` - Byproducts []*v1.ResourceDescriptor `protobuf:"bytes,3,rep,name=byproducts,proto3" json:"byproducts,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Builder *Builder `protobuf:"bytes,1,opt,name=builder,proto3" json:"builder,omitempty"` + Metadata *BuildMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + Byproducts []*v1.ResourceDescriptor `protobuf:"bytes,3,rep,name=byproducts,proto3" json:"byproducts,omitempty"` } func (x *RunDetails) Reset() { *x = RunDetails{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RunDetails) String() string { @@ -171,7 +180,7 @@ func (*RunDetails) ProtoMessage() {} func (x *RunDetails) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,19 +217,22 @@ func (x *RunDetails) GetByproducts() []*v1.ResourceDescriptor { } type Builder struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Version map[string]string `protobuf:"bytes,2,rep,name=version,proto3" json:"version,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Version map[string]string `protobuf:"bytes,2,rep,name=version,proto3" json:"version,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` BuilderDependencies []*v1.ResourceDescriptor `protobuf:"bytes,3,rep,name=builder_dependencies,json=builderDependencies,proto3" json:"builder_dependencies,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *Builder) Reset() { *x = Builder{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Builder) String() string { @@ -231,7 +243,7 @@ func (*Builder) ProtoMessage() {} func (x *Builder) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -268,19 +280,22 @@ func (x *Builder) GetBuilderDependencies() []*v1.ResourceDescriptor { } type BuildMetadata struct { - state protoimpl.MessageState `protogen:"open.v1"` - InvocationId string `protobuf:"bytes,1,opt,name=invocation_id,json=invocationId,proto3" json:"invocation_id,omitempty"` - StartedOn *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_on,json=startedOn,proto3" json:"started_on,omitempty"` - FinishedOn *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finished_on,json=finishedOn,proto3" json:"finished_on,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InvocationId string `protobuf:"bytes,1,opt,name=invocation_id,json=invocationId,proto3" json:"invocation_id,omitempty"` + StartedOn *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_on,json=startedOn,proto3" json:"started_on,omitempty"` + FinishedOn *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=finished_on,json=finishedOn,proto3" json:"finished_on,omitempty"` } func (x *BuildMetadata) Reset() { *x = BuildMetadata{} - mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *BuildMetadata) String() string { @@ -291,7 +306,7 @@ func (*BuildMetadata) ProtoMessage() {} func (x *BuildMetadata) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -443,7 +458,7 @@ func file_in_toto_attestation_predicates_provenance_v1_provenance_proto_rawDescG } var file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_in_toto_attestation_predicates_provenance_v1_provenance_proto_goTypes = []any{ +var file_in_toto_attestation_predicates_provenance_v1_provenance_proto_goTypes = []interface{}{ (*Provenance)(nil), // 0: in_toto_attestation.predicates.provenance.v1.Provenance (*BuildDefinition)(nil), // 1: in_toto_attestation.predicates.provenance.v1.BuildDefinition (*RunDetails)(nil), // 2: in_toto_attestation.predicates.provenance.v1.RunDetails @@ -479,6 +494,68 @@ func file_in_toto_attestation_predicates_provenance_v1_provenance_proto_init() { if File_in_toto_attestation_predicates_provenance_v1_provenance_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Provenance); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildDefinition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Builder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_in_toto_attestation_predicates_provenance_v1_provenance_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BuildMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go index ae912f0d1e..51654e954f 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.go @@ -16,83 +16,16 @@ var ( ErrRDRequiredField = errors.New("at least one of name, URI, or digest are required") ) -type HashAlgorithm string - -const ( - AlgorithmMD5 HashAlgorithm = "md5" - AlgorithmSHA1 HashAlgorithm = "sha1" - AlgorithmSHA224 HashAlgorithm = "sha224" - AlgorithmSHA512_224 HashAlgorithm = "sha512_224" - AlgorithmSHA256 HashAlgorithm = "sha256" - AlgorithmSHA512_256 HashAlgorithm = "sha512_256" - AlgorithmSHA384 HashAlgorithm = "sha384" - AlgorithmSHA512 HashAlgorithm = "sha512" - AlgorithmSHA3_224 HashAlgorithm = "sha3_224" - AlgorithmSHA3_256 HashAlgorithm = "sha3_256" - AlgorithmSHA3_384 HashAlgorithm = "sha3_384" - AlgorithmSHA3_512 HashAlgorithm = "sha3_512" - AlgorithmGitBlob HashAlgorithm = "gitBlob" - AlgorithmGitCommit HashAlgorithm = "gitCommit" - AlgorithmGitTag HashAlgorithm = "gitTag" - AlgorithmGitTree HashAlgorithm = "gitTree" - AlgorithmDirHash HashAlgorithm = "dirHash" -) - -// HashAlgorithms indexes the known algorithms in a dictionary -// by their string value -var HashAlgorithms = map[string]HashAlgorithm{ - "md5": AlgorithmMD5, - "sha1": AlgorithmSHA1, - "sha224": AlgorithmSHA224, - "sha512_224": AlgorithmSHA512_224, - "sha256": AlgorithmSHA256, - "sha512_256": AlgorithmSHA512_256, - "sha384": AlgorithmSHA384, - "sha512": AlgorithmSHA512, - "sha3_224": AlgorithmSHA3_224, - "sha3_256": AlgorithmSHA3_256, - "sha3_384": AlgorithmSHA3_384, - "sha3_512": AlgorithmSHA3_512, - "gitBlob": AlgorithmGitBlob, - "gitCommit": AlgorithmGitCommit, - "gitTag": AlgorithmGitTag, - "gitTree": AlgorithmGitTree, - "dirHash": AlgorithmDirHash, -} - -// HexLength returns the expected length of an algorithm's hash when hexencoded -func (algo HashAlgorithm) HexLength() int { - switch algo { - case AlgorithmMD5: - return 16 - case AlgorithmSHA1, AlgorithmGitBlob, AlgorithmGitCommit, AlgorithmGitTag, AlgorithmGitTree: - return 20 - case AlgorithmSHA224, AlgorithmSHA512_224, AlgorithmSHA3_224: - return 28 - case AlgorithmSHA256, AlgorithmSHA512_256, AlgorithmSHA3_256, AlgorithmDirHash: - return 32 - case AlgorithmSHA384, AlgorithmSHA3_384: - return 48 - case AlgorithmSHA512, AlgorithmSHA3_512: - return 64 - default: - return 0 - } -} - -// String returns the hash algorithm name as a string -func (algo HashAlgorithm) String() string { - return string(algo) -} - // Indicates if a given fixed-size hash algorithm is supported by default and returns the algorithm's // digest size in bytes, if supported. We assume gitCommit and dirHash are aliases for sha1 and sha256, respectively. // // SHA digest sizes from https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf // MD5 digest size from https://www.rfc-editor.org/rfc/rfc1321.html#section-1 -func isSupportedFixedSizeAlgorithm(algString string) (bool, int) { - algo := HashAlgorithm(algString) - return algo.HexLength() > 0, algo.HexLength() +func isSupportedFixedSizeAlgorithm(alg string) (bool, int) { + algos := map[string]int{"md5": 16, "sha1": 20, "sha224": 28, "sha512_224": 28, "sha256": 32, "sha512_256": 32, "sha384": 48, "sha512": 64, "sha3_224": 28, "sha3_256": 32, "sha3_384": 48, "sha3_512": 64, "gitCommit": 20, "dirHash": 32} + + size, ok := algos[alg] + return ok, size } func (d *ResourceDescriptor) Validate() error { diff --git a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go index 44dca29b5c..3e59869b10 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/resource_descriptor.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/v1/resource_descriptor.proto @@ -25,29 +25,32 @@ const ( // https://github.com/in-toto/attestation/blob/main/spec/v1/resource_descriptor.md // Validation of all fields is left to the users of this proto. type ResourceDescriptor struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` - Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` - DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` - MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"` + Digest map[string]string `protobuf:"bytes,3,rep,name=digest,proto3" json:"digest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Content []byte `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + DownloadLocation string `protobuf:"bytes,5,opt,name=download_location,json=downloadLocation,proto3" json:"download_location,omitempty"` + MediaType string `protobuf:"bytes,6,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` // Per the Struct protobuf spec, this type corresponds to // a JSON Object, which is truly a map under the hood. // So, the Struct a) is still consistent with our specification for // the `annotations` field, and b) has native support in some language // bindings making their use easier in implementations. // See: https://pkg.go.dev/google.golang.org/protobuf/types/known/structpb#Struct - Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Annotations *structpb.Struct `protobuf:"bytes,7,opt,name=annotations,proto3" json:"annotations,omitempty"` } func (x *ResourceDescriptor) Reset() { *x = ResourceDescriptor{} - mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ResourceDescriptor) String() string { @@ -58,7 +61,7 @@ func (*ResourceDescriptor) ProtoMessage() {} func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -175,7 +178,7 @@ func file_in_toto_attestation_v1_resource_descriptor_proto_rawDescGZIP() []byte } var file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []any{ +var file_in_toto_attestation_v1_resource_descriptor_proto_goTypes = []interface{}{ (*ResourceDescriptor)(nil), // 0: in_toto_attestation.v1.ResourceDescriptor nil, // 1: in_toto_attestation.v1.ResourceDescriptor.DigestEntry (*structpb.Struct)(nil), // 2: google.protobuf.Struct @@ -195,6 +198,20 @@ func file_in_toto_attestation_v1_resource_descriptor_proto_init() { if File_in_toto_attestation_v1_resource_descriptor_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_resource_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go index bb5c4567d9..a2bd2c2d7d 100644 --- a/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go +++ b/vendor/github.com/in-toto/attestation/go/v1/statement.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.34.1 // protoc v4.24.4 // source: in_toto_attestation/v1/statement.proto @@ -25,21 +25,24 @@ const ( // https://github.com/in-toto/attestation/tree/main/spec/v1 // Validation of all fields is left to the users of this proto. type Statement struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Expected to always be "https://in-toto.io/Statement/v1" Type string `protobuf:"bytes,1,opt,name=type,json=_type,proto3" json:"type,omitempty"` Subject []*ResourceDescriptor `protobuf:"bytes,2,rep,name=subject,proto3" json:"subject,omitempty"` PredicateType string `protobuf:"bytes,3,opt,name=predicate_type,json=predicateType,proto3" json:"predicate_type,omitempty"` Predicate *structpb.Struct `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *Statement) Reset() { *x = Statement{} - mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Statement) String() string { @@ -50,7 +53,7 @@ func (*Statement) ProtoMessage() {} func (x *Statement) ProtoReflect() protoreflect.Message { mi := &file_in_toto_attestation_v1_statement_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -138,7 +141,7 @@ func file_in_toto_attestation_v1_statement_proto_rawDescGZIP() []byte { } var file_in_toto_attestation_v1_statement_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_in_toto_attestation_v1_statement_proto_goTypes = []any{ +var file_in_toto_attestation_v1_statement_proto_goTypes = []interface{}{ (*Statement)(nil), // 0: in_toto_attestation.v1.Statement (*ResourceDescriptor)(nil), // 1: in_toto_attestation.v1.ResourceDescriptor (*structpb.Struct)(nil), // 2: google.protobuf.Struct @@ -159,6 +162,20 @@ func file_in_toto_attestation_v1_statement_proto_init() { return } file_in_toto_attestation_v1_resource_descriptor_proto_init() + if !protoimpl.UnsafeEnabled { + file_in_toto_attestation_v1_statement_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/in-toto/go-witness/LICENSE b/vendor/github.com/in-toto/go-witness/LICENSE new file mode 100644 index 0000000000..c54e7d1566 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 TestifySec, LLC. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go b/vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go new file mode 100644 index 0000000000..3b91a8a08e --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/digestset.go @@ -0,0 +1,292 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "bytes" + "crypto" + "encoding/json" + "fmt" + "hash" + "io" + "os" + + "golang.org/x/mod/sumdb/dirhash" +) + +var ( + hashNames = map[DigestValue]string{ + { + Hash: crypto.SHA256, + GitOID: false, + DirHash: false, + }: "sha256", + { + Hash: crypto.SHA1, + GitOID: false, + DirHash: false, + }: "sha1", + { + Hash: crypto.SHA256, + GitOID: true, + DirHash: false, + }: "gitoid:sha256", + { + Hash: crypto.SHA1, + GitOID: true, + DirHash: false, + }: "gitoid:sha1", + { + Hash: crypto.SHA256, + GitOID: false, + DirHash: true, + }: "dirHash", + } + + hashesByName = map[string]DigestValue{ + "sha256": { + crypto.SHA256, + false, + false, + }, + "sha1": { + crypto.SHA1, + false, + false, + }, + "gitoid:sha256": { + crypto.SHA256, + true, + false, + }, + "gitoid:sha1": { + crypto.SHA1, + true, + false, + }, + "dirHash": { + crypto.SHA256, + false, + true, + }, + } +) + +type ErrUnsupportedHash string + +func (e ErrUnsupportedHash) Error() string { + return fmt.Sprintf("unsupported hash function: %v", string(e)) +} + +type DigestValue struct { + crypto.Hash + GitOID bool + DirHash bool +} + +func (dv DigestValue) New() hash.Hash { + if dv.GitOID { + return &gitoidHasher{hash: dv.Hash, buf: &bytes.Buffer{}} + } + + return dv.Hash.New() +} + +type DigestSet map[DigestValue]string + +func HashToString(h crypto.Hash) (string, error) { + if name, ok := hashNames[DigestValue{Hash: h}]; ok { + return name, nil + } + + return "", ErrUnsupportedHash(h.String()) +} + +func HashFromString(name string) (crypto.Hash, error) { + if hash, ok := hashesByName[name]; ok { + return hash.Hash, nil + } + + return crypto.Hash(0), ErrUnsupportedHash(name) +} + +// Equal returns true if every digest for hash functions both artifacts have in common are equal. +// If the two artifacts don't have any digests from common hash functions, equal will return false. +// If any digest from common hash functions differ between the two artifacts, equal will return false. +func (ds *DigestSet) Equal(second DigestSet) bool { + hasMatchingDigest := false + for hash, digest := range *ds { + otherDigest, ok := second[hash] + if !ok { + continue + } + + if digest == otherDigest { + hasMatchingDigest = true + } else { + return false + } + } + + return hasMatchingDigest +} + +func (ds *DigestSet) ToNameMap() (map[string]string, error) { + nameMap := make(map[string]string) + for hash, digest := range *ds { + name, ok := hashNames[hash] + if !ok { + return nameMap, ErrUnsupportedHash(hash.String()) + } + + nameMap[name] = digest + } + + return nameMap, nil +} + +func NewDigestSet(digestsByName map[string]string) (DigestSet, error) { + ds := make(DigestSet) + for hashName, digest := range digestsByName { + hash, ok := hashesByName[hashName] + if !ok { + return ds, ErrUnsupportedHash(hashName) + } + + ds[hash] = digest + } + + return ds, nil +} + +func CalculateDigestSet(r io.Reader, digestValues []DigestValue) (DigestSet, error) { + digestSet := make(DigestSet) + writers := []io.Writer{} + hashfuncs := map[DigestValue]hash.Hash{} + for _, digestValue := range digestValues { + hashfunc := digestValue.New() + hashfuncs[digestValue] = hashfunc + writers = append(writers, hashfunc) + } + + multiwriter := io.MultiWriter(writers...) + if _, err := io.Copy(multiwriter, r); err != nil { + return digestSet, err + } + + for digestValue, hashfunc := range hashfuncs { + // gitoids are somewhat special... we're using a custom implementation of hash.Hash + // to wrap the gitoid library. Sum will return a gitoid URI, so we don't want to hex + // encode it as it's already a string with a hex encoded hash. + if digestValue.GitOID { + digestSet[digestValue] = string(hashfunc.Sum(nil)) + continue + } + + digestSet[digestValue] = string(HexEncode(hashfunc.Sum(nil))) + } + + return digestSet, nil +} + +func CalculateDigestSetFromBytes(data []byte, hashes []DigestValue) (DigestSet, error) { + return CalculateDigestSet(bytes.NewReader(data), hashes) +} + +func CalculateDigestSetFromFile(path string, hashes []DigestValue) (DigestSet, error) { + file, err := os.Open(path) + if err != nil { + return DigestSet{}, err + } + + hashable, err := isHashableFile(file) + if err != nil { + return DigestSet{}, err + } + + if !hashable { + return DigestSet{}, fmt.Errorf("%s is not a hashable file", path) + } + + defer file.Close() + return CalculateDigestSet(file, hashes) +} + +func CalculateDigestSetFromDir(dir string, hashes []DigestValue) (DigestSet, error) { + + dirHash, err := dirhash.HashDir(dir, "", DirhHashSha256) + if err != nil { + return nil, err + } + + digestSetByName := make(map[string]string) + digestSetByName["dirHash"] = dirHash + + return NewDigestSet(digestSetByName) +} + +func (ds DigestSet) MarshalJSON() ([]byte, error) { + nameMap, err := ds.ToNameMap() + if err != nil { + return nil, err + } + + return json.Marshal(nameMap) +} + +func (ds *DigestSet) UnmarshalJSON(data []byte) error { + nameMap := make(map[string]string) + err := json.Unmarshal(data, &nameMap) + if err != nil { + return err + } + + newDs, err := NewDigestSet(nameMap) + if err != nil { + return err + } + + *ds = newDs + return nil +} + +func isHashableFile(f *os.File) (bool, error) { + stat, err := f.Stat() + if err != nil { + return false, err + } + + mode := stat.Mode() + + isSpecial := stat.Mode()&os.ModeCharDevice != 0 + + if isSpecial { + return false, nil + } + + if mode.IsRegular() { + return true, nil + } + + if mode.Perm().IsDir() { + return true, nil + } + + if mode&os.ModeSymlink == 1 { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go b/vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go new file mode 100644 index 0000000000..044a2b1519 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/dirhash.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +// DirHashSha256 is the "h1:" directory hash function, using SHA-256. +// +// DirHashSha256 returns a SHA-256 hash of a summary +// prepared as if by the Unix command: +// +// sha256sum $(find . -type f | sort) | sha256sum +// +// More precisely, the hashed summary contains a single line for each file in the list, +// ordered by sort.Strings applied to the file names, where each line consists of +// the hexadecimal SHA-256 hash of the file content, +// two spaces (U+0020), the file name, and a newline (U+000A). +// +// File names with newlines (U+000A) are disallowed. +func DirhHashSha256(files []string, open func(string) (io.ReadCloser, error)) (string, error) { + h := sha256.New() + files = append([]string(nil), files...) + sort.Strings(files) + for _, file := range files { + if strings.Contains(file, "\n") { + return "", errors.New("dirhash: filenames with newlines are not supported") + } + r, err := open(file) + if err != nil { + return "", err + } + hf := sha256.New() + _, err = io.Copy(hf, r) + r.Close() + if err != nil { + return "", err + } + fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file) + } + return hex.EncodeToString(h.Sum(nil)), nil +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go b/vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go new file mode 100644 index 0000000000..172ad97831 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/ecdsa.go @@ -0,0 +1,85 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "io" +) + +type ErrVerifyFailed struct{} + +func (e ErrVerifyFailed) Error() string { + return "verification failed" +} + +type ECDSASigner struct { + priv *ecdsa.PrivateKey + hash crypto.Hash +} + +func NewECDSASigner(priv *ecdsa.PrivateKey, hash crypto.Hash) *ECDSASigner { + return &ECDSASigner{priv, hash} +} + +func (s *ECDSASigner) KeyID() (string, error) { + return GeneratePublicKeyID(&s.priv.PublicKey, s.hash) +} + +func (s *ECDSASigner) Sign(r io.Reader) ([]byte, error) { + digest, err := Digest(r, s.hash) + if err != nil { + return nil, err + } + + return ecdsa.SignASN1(rand.Reader, s.priv, digest) +} + +func (s *ECDSASigner) Verifier() (Verifier, error) { + return NewECDSAVerifier(&s.priv.PublicKey, s.hash), nil +} + +type ECDSAVerifier struct { + pub *ecdsa.PublicKey + hash crypto.Hash +} + +func NewECDSAVerifier(pub *ecdsa.PublicKey, hash crypto.Hash) *ECDSAVerifier { + return &ECDSAVerifier{pub, hash} +} + +func (v *ECDSAVerifier) KeyID() (string, error) { + return GeneratePublicKeyID(v.pub, v.hash) +} + +func (v *ECDSAVerifier) Verify(data io.Reader, sig []byte) error { + digest, err := Digest(data, v.hash) + if err != nil { + return err + } + + verified := ecdsa.VerifyASN1(v.pub, digest, sig) + if !verified { + return ErrVerifyFailed{} + } + + return nil +} + +func (v *ECDSAVerifier) Bytes() ([]byte, error) { + return PublicPemBytes(v.pub) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go b/vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go new file mode 100644 index 0000000000..35f3741300 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/ed25519.go @@ -0,0 +1,83 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ed25519" + "fmt" + "io" +) + +type ED25519Signer struct { + priv ed25519.PrivateKey +} + +func NewED25519Signer(priv ed25519.PrivateKey) *ED25519Signer { + return &ED25519Signer{priv} +} + +func (s *ED25519Signer) KeyID() (string, error) { + return GeneratePublicKeyID(s.priv.Public(), crypto.SHA256) +} + +func (s *ED25519Signer) Sign(r io.Reader) ([]byte, error) { + msg, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + return ed25519.Sign(s.priv, msg), nil +} + +func (s *ED25519Signer) Verifier() (Verifier, error) { + pubKey := s.priv.Public() + edPubKey, ok := pubKey.(ed25519.PublicKey) + if !ok { + return nil, ErrUnsupportedKeyType{t: fmt.Sprintf("%T", edPubKey)} + } + + return NewED25519Verifier(edPubKey), nil +} + +type ED25519Verifier struct { + pub ed25519.PublicKey +} + +func NewED25519Verifier(pub ed25519.PublicKey) *ED25519Verifier { + return &ED25519Verifier{pub} +} + +func (v *ED25519Verifier) KeyID() (string, error) { + return GeneratePublicKeyID(v.pub, crypto.SHA256) +} + +func (v *ED25519Verifier) Verify(r io.Reader, sig []byte) error { + msg, err := io.ReadAll(r) + if err != nil { + return err + } + + verified := ed25519.Verify(v.pub, msg, sig) + if !verified { + return ErrVerifyFailed{} + } + + return nil +} + +func (v *ED25519Verifier) Bytes() ([]byte, error) { + return PublicPemBytes(v.pub) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go b/vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go new file mode 100644 index 0000000000..f3fe365ca1 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/gitoid.go @@ -0,0 +1,85 @@ +// Copyright 2023 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "bytes" + "crypto" + "encoding/hex" + "fmt" + + "github.com/edwarnicke/gitoid" +) + +// gitoidHasher implements io.Writer so we can generate gitoids with our CalculateDigestSet function. +// CalculateDigestSet takes in an io.Reader pointing to some data we want to hash, and writes it to a +// MultiWriter that forwards it to writers for each hash we wish to calculate. +// This is a bit hacky -- it maintains an internal buffer and then when asked for the Sum, it calculates +// the gitoid. We may be able to contribute to the gitoid library to make this smoother +type gitoidHasher struct { + buf *bytes.Buffer + hash crypto.Hash +} + +// Write implments the io.Writer interface, and writes to the internal buffer +func (gh *gitoidHasher) Write(p []byte) (n int, err error) { + return gh.buf.Write(p) +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (gh *gitoidHasher) Sum(b []byte) []byte { + opts := []gitoid.Option{} + if gh.hash == crypto.SHA256 { + opts = append(opts, gitoid.WithSha256()) + } + + g, err := gitoid.New(gh.buf, opts...) + if err != nil { + return []byte{} + } + + return append(b, []byte(g.URI())...) +} + +// Reset resets the Hash to its initial state. +func (gh *gitoidHasher) Reset() { + gh.buf = &bytes.Buffer{} +} + +// Size returns the number of bytes Sum will return. +func (gh *gitoidHasher) Size() int { + hashName, err := HashToString(gh.hash) + if err != nil { + return 0 + } + + // this is somewhat fragile and knows too much about the internals of the gitoid code... + // we're assuming that the default gitoid content type will remain BLOB, and that our + // string representations of hash functions will remain consistent with their... + // and that the URI format will remain consistent. + // this should probably be changed, and this entire thing could maybe be upstreamed to the + // gitoid library. + return len(fmt.Sprintf("gitoid:%s:%s:", gitoid.BLOB, hashName)) + hex.EncodedLen(gh.hash.Size()) +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (gh *gitoidHasher) BlockSize() int { + hf := gh.hash.New() + return hf.BlockSize() +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go b/vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go new file mode 100644 index 0000000000..a3d617fcd4 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/rsa.go @@ -0,0 +1,89 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "io" +) + +type RSASigner struct { + priv *rsa.PrivateKey + hash crypto.Hash +} + +func NewRSASigner(priv *rsa.PrivateKey, hash crypto.Hash) *RSASigner { + return &RSASigner{priv, hash} +} + +func (s *RSASigner) KeyID() (string, error) { + return GeneratePublicKeyID(&s.priv.PublicKey, s.hash) +} + +func (s *RSASigner) Sign(r io.Reader) ([]byte, error) { + digest, err := Digest(r, s.hash) + if err != nil { + return nil, err + } + + opts := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: s.hash, + } + + return rsa.SignPSS(rand.Reader, s.priv, s.hash, digest, opts) +} + +func (s *RSASigner) Verifier() (Verifier, error) { + return NewRSAVerifier(&s.priv.PublicKey, s.hash), nil +} + +type RSAVerifier struct { + pub *rsa.PublicKey + hash crypto.Hash +} + +func NewRSAVerifier(pub *rsa.PublicKey, hash crypto.Hash) *RSAVerifier { + return &RSAVerifier{pub, hash} +} + +func (v *RSAVerifier) KeyID() (string, error) { + return GeneratePublicKeyID(v.pub, v.hash) +} + +func (v *RSAVerifier) Verify(data io.Reader, sig []byte) error { + digest, err := Digest(data, v.hash) + if err != nil { + return err + } + + pssOpts := &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: v.hash, + } + + // AWS KMS introduces the chance that attestations get signed by PKCS1v15 instead of PSS + if err := rsa.VerifyPSS(v.pub, v.hash, digest, sig, pssOpts); err != nil { + return rsa.VerifyPKCS1v15(v.pub, v.hash, digest, sig) + } + + return nil +} + +func (v *RSAVerifier) Bytes() ([]byte, error) { + return PublicPemBytes(v.pub) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/signer.go b/vendor/github.com/in-toto/go-witness/cryptoutil/signer.go new file mode 100644 index 0000000000..7c9dfbcaa2 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/signer.go @@ -0,0 +1,121 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" +) + +type ErrUnsupportedKeyType struct { + t string +} + +func (e ErrUnsupportedKeyType) Error() string { + return fmt.Sprintf("unsupported signer key type: %v", e.t) +} + +type Signer interface { + KeyIdentifier + Sign(r io.Reader) ([]byte, error) + Verifier() (Verifier, error) +} + +type KeyIdentifier interface { + KeyID() (string, error) +} + +type TrustBundler interface { + Certificate() *x509.Certificate + Intermediates() []*x509.Certificate + Roots() []*x509.Certificate +} + +type SignerOption func(*signerOptions) + +type signerOptions struct { + cert *x509.Certificate + intermediates []*x509.Certificate + roots []*x509.Certificate + hash crypto.Hash +} + +func SignWithCertificate(cert *x509.Certificate) SignerOption { + return func(so *signerOptions) { + so.cert = cert + } +} + +func SignWithIntermediates(intermediates []*x509.Certificate) SignerOption { + return func(so *signerOptions) { + so.intermediates = intermediates + } +} + +func SignWithRoots(roots []*x509.Certificate) SignerOption { + return func(so *signerOptions) { + so.roots = roots + } +} + +func SignWithHash(h crypto.Hash) SignerOption { + return func(so *signerOptions) { + so.hash = h + } +} + +func NewSigner(priv interface{}, opts ...SignerOption) (Signer, error) { + options := &signerOptions{ + hash: crypto.SHA256, + } + + for _, opt := range opts { + opt(options) + } + + var signer Signer + switch key := priv.(type) { + case *rsa.PrivateKey: + signer = NewRSASigner(key, options.hash) + case *ecdsa.PrivateKey: + signer = NewECDSASigner(key, options.hash) + case ed25519.PrivateKey: + signer = NewED25519Signer(key) + default: + return nil, ErrUnsupportedKeyType{ + t: fmt.Sprintf("%T", priv), + } + } + + if options.cert != nil { + return NewX509Signer(signer, options.cert, options.intermediates, options.roots) + } + + return signer, nil +} + +func NewSignerFromReader(r io.Reader, opts ...SignerOption) (Signer, error) { + key, err := TryParseKeyFromReader(r) + if err != nil { + return nil, err + } + + return NewSigner(key, opts...) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/util.go b/vendor/github.com/in-toto/go-witness/cryptoutil/util.go new file mode 100644 index 0000000000..2b96280be9 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/util.go @@ -0,0 +1,201 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PEMType is a specific type for string constants used during PEM encoding and decoding +type PEMType string + +const ( + // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding + PublicKeyPEMType PEMType = "PUBLIC KEY" + // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys + PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY" +) + +type ErrUnsupportedPEM struct { + t string +} + +func (e ErrUnsupportedPEM) Error() string { + return fmt.Sprintf("unsupported pem type: %v", e.t) +} + +type ErrInvalidPemBlock struct{} + +func (e ErrInvalidPemBlock) Error() string { + return "invalid pem block" +} + +func DigestBytes(data []byte, hash crypto.Hash) ([]byte, error) { + return Digest(bytes.NewReader(data), hash) +} + +func Digest(r io.Reader, hash crypto.Hash) ([]byte, error) { + hashFunc := hash.New() + if _, err := io.Copy(hashFunc, r); err != nil { + return nil, err + } + + return hashFunc.Sum(nil), nil +} + +func HexEncode(src []byte) []byte { + dst := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(dst, src) + return dst +} + +func GeneratePublicKeyID(pub interface{}, hash crypto.Hash) (string, error) { + pemBytes, err := PublicPemBytes(pub) + if err != nil { + return "", err + } + + digest, err := DigestBytes(pemBytes, hash) + if err != nil { + return "", err + } + + return string(HexEncode(digest)), nil +} + +func PublicPemBytes(pub interface{}) ([]byte, error) { + keyBytes, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + + pemBytes := pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: keyBytes}) + if err != nil { + return nil, err + } + + return pemBytes, err +} + +// UnmarshalPEMToPublicKey converts a PEM-encoded byte slice into a crypto.PublicKey +func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) { + derBytes, _ := pem.Decode(pemBytes) + if derBytes == nil { + return nil, errors.New("PEM decoding failed") + } + switch derBytes.Type { + case string(PublicKeyPEMType): + return x509.ParsePKIXPublicKey(derBytes.Bytes) + case string(PKCS1PublicKeyPEMType): + return x509.ParsePKCS1PublicKey(derBytes.Bytes) + default: + return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?", + derBytes.Type) + } +} + +func TryParsePEMBlock(block *pem.Block) (interface{}, error) { + if block == nil { + return nil, ErrInvalidPemBlock{} + } + + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParseECPrivateKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParsePKIXPublicKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParsePKCS1PublicKey(block.Bytes) + if err == nil { + return key, err + } + + key, err = x509.ParseCertificate(block.Bytes) + if err == nil { + return key, err + } + + return nil, ErrUnsupportedPEM{block.Type} +} + +func TryParseKeyFromReader(r io.Reader) (interface{}, error) { + bytes, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + // we may want to handle files with multiple pem blocks in them, but for now... + pemBlock, _ := pem.Decode(bytes) + return TryParsePEMBlock(pemBlock) +} + +func TryParseCertificate(data []byte) (*x509.Certificate, error) { + possibleCert, err := TryParseKeyFromReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + + cert, ok := possibleCert.(*x509.Certificate) + if !ok { + return nil, fmt.Errorf("data was a valid verifier but not a certificate") + } + + return cert, nil +} + +// ComputeDigest calculates the digest value for the specified message using the supplied hash function +func ComputeDigest(rawMessage io.Reader, hashFunc crypto.Hash, supportedHashFuncs []crypto.Hash) ([]byte, crypto.Hash, error) { + var cryptoSignerOpts crypto.SignerOpts = hashFunc + hashedWith := cryptoSignerOpts.HashFunc() + if !isSupportedAlg(hashedWith, supportedHashFuncs) { + return nil, crypto.Hash(0), fmt.Errorf("unsupported hash algorithm: %q not in %v", hashedWith.String(), supportedHashFuncs) + } + + digest, err := Digest(rawMessage, hashedWith) + return digest, hashedWith, err +} + +func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { + if supportedAlgs == nil { + return true + } + for _, supportedAlg := range supportedAlgs { + if alg == supportedAlg { + return true + } + } + return false +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go b/vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go new file mode 100644 index 0000000000..b243e286ec --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/verifier.go @@ -0,0 +1,99 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" + "time" +) + +type Verifier interface { + KeyIdentifier + Verify(body io.Reader, sig []byte) error + Bytes() ([]byte, error) +} + +type VerifierOption func(*verifierOptions) + +type verifierOptions struct { + roots []*x509.Certificate + intermediates []*x509.Certificate + hash crypto.Hash + trustedTime time.Time +} + +func VerifyWithRoots(roots []*x509.Certificate) VerifierOption { + return func(vo *verifierOptions) { + vo.roots = roots + } +} + +func VerifyWithIntermediates(intermediates []*x509.Certificate) VerifierOption { + return func(vo *verifierOptions) { + vo.intermediates = intermediates + } +} + +func VerifyWithHash(h crypto.Hash) VerifierOption { + return func(vo *verifierOptions) { + vo.hash = h + } +} + +func VerifyWithTrustedTime(t time.Time) VerifierOption { + return func(vo *verifierOptions) { + vo.trustedTime = t + } +} + +func NewVerifier(pub interface{}, opts ...VerifierOption) (Verifier, error) { + options := &verifierOptions{ + hash: crypto.SHA256, + } + + for _, opt := range opts { + opt(options) + } + + switch key := pub.(type) { + case *rsa.PublicKey: + return NewRSAVerifier(key, options.hash), nil + case *ecdsa.PublicKey: + return NewECDSAVerifier(key, options.hash), nil + case ed25519.PublicKey: + return NewED25519Verifier(key), nil + case *x509.Certificate: + return NewX509Verifier(key, options.intermediates, options.roots, options.trustedTime) + default: + return nil, ErrUnsupportedKeyType{ + t: fmt.Sprintf("%T", pub), + } + } +} + +func NewVerifierFromReader(r io.Reader, opts ...VerifierOption) (Verifier, error) { + key, err := TryParseKeyFromReader(r) + if err != nil { + return nil, err + } + + return NewVerifier(key, opts...) +} diff --git a/vendor/github.com/in-toto/go-witness/cryptoutil/x509.go b/vendor/github.com/in-toto/go-witness/cryptoutil/x509.go new file mode 100644 index 0000000000..4bf2217c59 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/cryptoutil/x509.go @@ -0,0 +1,173 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cryptoutil + +import ( + "crypto/x509" + "encoding/pem" + "io" + "time" +) + +type X509Verifier struct { + cert *x509.Certificate + roots []*x509.Certificate + intermediates []*x509.Certificate + verifier Verifier + trustedTime time.Time +} + +func NewX509Verifier(cert *x509.Certificate, intermediates, roots []*x509.Certificate, trustedTime time.Time) (*X509Verifier, error) { + verifier, err := NewVerifier(cert.PublicKey) + if err != nil { + return nil, err + } + + return &X509Verifier{ + cert: cert, + roots: roots, + intermediates: intermediates, + verifier: verifier, + trustedTime: trustedTime, + }, nil +} + +func (v *X509Verifier) KeyID() (string, error) { + return v.verifier.KeyID() +} + +func (v *X509Verifier) Verify(body io.Reader, sig []byte) error { + rootPool := certificatesToPool(v.roots) + intermediatePool := certificatesToPool(v.intermediates) + if _, err := v.cert.Verify(x509.VerifyOptions{ + CurrentTime: v.trustedTime, + Roots: rootPool, + Intermediates: intermediatePool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + }); err != nil { + return err + } + + return v.verifier.Verify(body, sig) +} + +func (v *X509Verifier) BelongsToRoot(root *x509.Certificate) error { + rootPool := certificatesToPool([]*x509.Certificate{root}) + intermediatePool := certificatesToPool(v.intermediates) + _, err := v.cert.Verify(x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + CurrentTime: v.trustedTime, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + }) + + return err +} + +func (v *X509Verifier) Bytes() ([]byte, error) { + pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: v.cert.Raw}) + return pemBytes, nil +} + +func (v *X509Verifier) Certificate() *x509.Certificate { + return v.cert +} + +func (v *X509Verifier) Intermediates() []*x509.Certificate { + return v.intermediates +} + +func (v *X509Verifier) Roots() []*x509.Certificate { + return v.roots +} + +type X509Signer struct { + cert *x509.Certificate + roots []*x509.Certificate + intermediates []*x509.Certificate + signer Signer +} + +type ErrInvalidSigner struct{} + +func (e ErrInvalidSigner) Error() string { + return "signer must not be nil" +} + +type ErrInvalidCertificate struct{} + +func (e ErrInvalidCertificate) Error() string { + return "certificate must not be nil" +} + +func NewX509Signer(signer Signer, cert *x509.Certificate, intermediates, roots []*x509.Certificate) (*X509Signer, error) { + if signer == nil { + return nil, ErrInvalidSigner{} + } + + if cert == nil { + return nil, ErrInvalidCertificate{} + } + + return &X509Signer{ + signer: signer, + cert: cert, + roots: roots, + intermediates: intermediates, + }, nil +} + +func (s *X509Signer) KeyID() (string, error) { + return s.signer.KeyID() +} + +func (s *X509Signer) Sign(r io.Reader) ([]byte, error) { + return s.signer.Sign(r) +} + +func (s *X509Signer) Verifier() (Verifier, error) { + verifier, err := s.signer.Verifier() + if err != nil { + return nil, err + } + + return &X509Verifier{ + verifier: verifier, + cert: s.cert, + roots: s.roots, + intermediates: s.intermediates, + }, nil +} + +func (s *X509Signer) Certificate() *x509.Certificate { + return s.cert +} + +func (s *X509Signer) Intermediates() []*x509.Certificate { + return s.intermediates +} + +func (s *X509Signer) Roots() []*x509.Certificate { + return s.roots +} + +func certificatesToPool(certs []*x509.Certificate) *x509.CertPool { + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + + return pool +} diff --git a/vendor/github.com/in-toto/go-witness/dsse/dsse.go b/vendor/github.com/in-toto/go-witness/dsse/dsse.go new file mode 100644 index 0000000000..81d13e2249 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/dsse/dsse.go @@ -0,0 +1,96 @@ +// Copyright 2021 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "fmt" + + "github.com/in-toto/go-witness/log" +) + +type ErrNoSignatures struct{} + +func (e ErrNoSignatures) Error() string { + return "no signatures in dsse envelope" +} + +type ErrNoMatchingSigs struct { + Verifiers []CheckedVerifier +} + +func (e ErrNoMatchingSigs) Error() string { + mess := "no valid signatures for the provided verifiers found for keyids:\n" + for _, v := range e.Verifiers { + if v.Error != nil { + kid, err := v.Verifier.KeyID() + if err != nil { + log.Warnf("failed to get key id from verifier: %w", err) + } + + s := fmt.Sprintf(" %s: %v\n", kid, v.Error) + mess += s + } + } + + return mess +} + +type ErrThresholdNotMet struct { + Theshold int + Actual int +} + +func (e ErrThresholdNotMet) Error() string { + return fmt.Sprintf("envelope did not meet verifier threshold. expected %v valid verifiers but got %v", e.Theshold, e.Actual) +} + +type ErrInvalidThreshold int + +func (e ErrInvalidThreshold) Error() string { + return fmt.Sprintf("invalid threshold (%v). thresholds must be greater than 0", int(e)) +} + +const PemTypeCertificate = "CERTIFICATE" + +type Envelope struct { + Payload []byte `json:"payload"` + PayloadType string `json:"payloadType"` + Signatures []Signature `json:"signatures"` +} + +type Signature struct { + KeyID string `json:"keyid"` + Signature []byte `json:"sig"` + Certificate []byte `json:"certificate,omitempty"` + Intermediates [][]byte `json:"intermediates,omitempty"` + Timestamps []SignatureTimestamp `json:"timestamps,omitempty"` +} + +type SignatureTimestampType string + +const TimestampRFC3161 SignatureTimestampType = "tsp" + +type SignatureTimestamp struct { + Type SignatureTimestampType `json:"type"` + Data []byte `json:"data"` +} + +// preauthEncode wraps the data to be signed or verified and it's type in the DSSE protocol's +// pre-authentication encoding as detailed at https://github.com/secure-systems-lab/dsse/blob/master/protocol.md +// PAE(type, body) = "DSSEv1" + SP + LEN(type) + SP + type + SP + LEN(body) + SP + body +func preauthEncode(bodyType string, body []byte) []byte { + const dsseVersion = "DSSEv1" + return []byte(fmt.Sprintf("%s %d %s %d %s", dsseVersion, len(bodyType), bodyType, len(body), body)) +} diff --git a/vendor/github.com/in-toto/go-witness/dsse/sign.go b/vendor/github.com/in-toto/go-witness/dsse/sign.go new file mode 100644 index 0000000000..267ec079b3 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/dsse/sign.go @@ -0,0 +1,115 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "encoding/pem" + "fmt" + "io" + + "github.com/in-toto/go-witness/cryptoutil" + "github.com/in-toto/go-witness/timestamp" +) + +type signOptions struct { + signers []cryptoutil.Signer + timestampers []timestamp.Timestamper +} + +type SignOption func(*signOptions) + +func SignWithSigners(signers ...cryptoutil.Signer) SignOption { + return func(so *signOptions) { + so.signers = signers + } +} + +func SignWithTimestampers(timestampers ...timestamp.Timestamper) SignOption { + return func(so *signOptions) { + so.timestampers = timestampers + } +} + +func Sign(bodyType string, body io.Reader, opts ...SignOption) (Envelope, error) { + so := &signOptions{} + env := Envelope{} + for _, opt := range opts { + opt(so) + } + + if len(so.signers) == 0 { + return env, fmt.Errorf("must have at least one signer, have %v", len(so.signers)) + } + + bodyBytes, err := io.ReadAll(body) + if err != nil { + return env, err + } + + env.PayloadType = bodyType + env.Payload = bodyBytes + env.Signatures = make([]Signature, 0) + pae := preauthEncode(bodyType, bodyBytes) + for _, signer := range so.signers { + if signer == nil { + continue + } + + sig, err := signer.Sign(bytes.NewReader(pae)) + if err != nil { + return env, err + } + + keyID, err := signer.KeyID() + if err != nil { + return env, err + } + + dsseSig := Signature{ + KeyID: keyID, + Signature: sig, + } + + for _, timestamper := range so.timestampers { + timestamp, err := timestamper.Timestamp(context.TODO(), bytes.NewReader(sig)) + if err != nil { + return env, err + } + + dsseSig.Timestamps = append(dsseSig.Timestamps, SignatureTimestamp{ + Type: TimestampRFC3161, + Data: timestamp, + }) + } + + if trustBundler, ok := signer.(cryptoutil.TrustBundler); ok { + leaf := trustBundler.Certificate() + intermediates := trustBundler.Intermediates() + if leaf != nil { + dsseSig.Certificate = pem.EncodeToMemory(&pem.Block{Type: PemTypeCertificate, Bytes: leaf.Raw}) + } + + for _, intermediate := range intermediates { + dsseSig.Intermediates = append(dsseSig.Intermediates, pem.EncodeToMemory(&pem.Block{Type: PemTypeCertificate, Bytes: intermediate.Raw})) + } + } + + env.Signatures = append(env.Signatures, dsseSig) + } + + return env, nil +} diff --git a/vendor/github.com/in-toto/go-witness/dsse/verify.go b/vendor/github.com/in-toto/go-witness/dsse/verify.go new file mode 100644 index 0000000000..9c94a7446c --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/dsse/verify.go @@ -0,0 +1,201 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dsse + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "time" + + "github.com/in-toto/go-witness/cryptoutil" + "github.com/in-toto/go-witness/log" + "github.com/in-toto/go-witness/timestamp" +) + +type verificationOptions struct { + roots []*x509.Certificate + intermediates []*x509.Certificate + verifiers []cryptoutil.Verifier + threshold int + timestampVerifiers []timestamp.TimestampVerifier +} + +type VerificationOption func(*verificationOptions) + +func VerifyWithRoots(roots ...*x509.Certificate) VerificationOption { + return func(vo *verificationOptions) { + vo.roots = roots + } +} + +func VerifyWithIntermediates(intermediates ...*x509.Certificate) VerificationOption { + return func(vo *verificationOptions) { + vo.intermediates = intermediates + } +} + +func VerifyWithVerifiers(verifiers ...cryptoutil.Verifier) VerificationOption { + return func(vo *verificationOptions) { + vo.verifiers = verifiers + } +} + +func VerifyWithThreshold(threshold int) VerificationOption { + return func(vo *verificationOptions) { + vo.threshold = threshold + } +} + +func VerifyWithTimestampVerifiers(verifiers ...timestamp.TimestampVerifier) VerificationOption { + return func(vo *verificationOptions) { + vo.timestampVerifiers = verifiers + } +} + +type CheckedVerifier struct { + Verifier cryptoutil.Verifier + TimestampVerifiers []timestamp.TimestampVerifier + Error error +} + +func (e Envelope) Verify(opts ...VerificationOption) ([]CheckedVerifier, error) { + options := &verificationOptions{ + threshold: 1, + } + + for _, opt := range opts { + opt(options) + } + + if options.threshold <= 0 { + return nil, ErrInvalidThreshold(options.threshold) + } + + pae := preauthEncode(e.PayloadType, e.Payload) + if len(e.Signatures) == 0 { + return nil, ErrNoSignatures{} + } + + checkedVerifiers := make([]CheckedVerifier, 0) + verified := 0 + for _, sig := range e.Signatures { + if len(sig.Certificate) > 0 { + cert, err := cryptoutil.TryParseCertificate(sig.Certificate) + if err != nil { + continue + } + + sigIntermediates := make([]*x509.Certificate, 0) + for _, int := range sig.Intermediates { + intCert, err := cryptoutil.TryParseCertificate(int) + if err != nil { + continue + } + + sigIntermediates = append(sigIntermediates, intCert) + } + + sigIntermediates = append(sigIntermediates, options.intermediates...) + if len(options.timestampVerifiers) == 0 { + if verifier, err := verifyX509Time(cert, sigIntermediates, options.roots, pae, sig.Signature, time.Now()); err == nil { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier}) + verified += 1 + } else { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier, Error: err}) + log.Debugf("failed to verify with timestamp verifier: %w", err) + } + } else { + var passedVerifier cryptoutil.Verifier + failed := []cryptoutil.Verifier{} + passedTimestampVerifiers := []timestamp.TimestampVerifier{} + failedTimestampVerifiers := []timestamp.TimestampVerifier{} + + for _, timestampVerifier := range options.timestampVerifiers { + for _, sigTimestamp := range sig.Timestamps { + timestamp, err := timestampVerifier.Verify(context.TODO(), bytes.NewReader(sigTimestamp.Data), bytes.NewReader(sig.Signature)) + if err != nil { + continue + } + + if verifier, err := verifyX509Time(cert, sigIntermediates, options.roots, pae, sig.Signature, timestamp); err == nil { + // NOTE: do we not want to save all the passed verifiers? + passedVerifier = verifier + passedTimestampVerifiers = append(passedTimestampVerifiers, timestampVerifier) + } else { + failed = append(failed, verifier) + failedTimestampVerifiers = append(failedTimestampVerifiers, timestampVerifier) + log.Debugf("failed to verify with timestamp verifier: %w", err) + } + + } + } + + if len(passedTimestampVerifiers) > 0 { + verified += 1 + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{ + Verifier: passedVerifier, + TimestampVerifiers: passedTimestampVerifiers, + }) + } else { + for _, v := range failed { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{ + Verifier: v, + TimestampVerifiers: failedTimestampVerifiers, + Error: fmt.Errorf("no valid timestamps found"), + }) + } + } + } + } + + for _, verifier := range options.verifiers { + if verifier != nil { + kid, err := verifier.KeyID() + if err != nil { + log.Warn("failed to get key id from verifier: %v", err) + } + log.Debug("verifying with verifier with KeyID ", kid) + + if err := verifier.Verify(bytes.NewReader(pae), sig.Signature); err == nil { + verified += 1 + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier}) + } else { + checkedVerifiers = append(checkedVerifiers, CheckedVerifier{Verifier: verifier, Error: err}) + } + } + } + } + + if verified == 0 { + return nil, ErrNoMatchingSigs{Verifiers: checkedVerifiers} + } else if verified < options.threshold { + return checkedVerifiers, ErrThresholdNotMet{Theshold: options.threshold, Actual: verified} + } + + return checkedVerifiers, nil +} + +func verifyX509Time(cert *x509.Certificate, sigIntermediates, roots []*x509.Certificate, pae, sig []byte, trustedTime time.Time) (cryptoutil.Verifier, error) { + verifier, err := cryptoutil.NewX509Verifier(cert, sigIntermediates, roots, trustedTime) + if err != nil { + return nil, err + } + + err = verifier.Verify(bytes.NewReader(pae), sig) + + return verifier, err +} diff --git a/vendor/github.com/in-toto/go-witness/log/log.go b/vendor/github.com/in-toto/go-witness/log/log.go new file mode 100644 index 0000000000..31396dc17a --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/log/log.go @@ -0,0 +1,94 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" +) + +var log Logger = SilentLogger{} + +// Logger is used by witness library code to print out relevant information at runtime. +type Logger interface { + Errorf(format string, args ...interface{}) + Error(args ...interface{}) + Warnf(format string, args ...interface{}) + Warn(args ...interface{}) + Debugf(format string, args ...interface{}) + Debug(args ...interface{}) + Infof(format string, args ...interface{}) + Info(args ...interface{}) +} + +// SetLogger will set the Logger instance that all Witness library code will use as logging output. +// The default is a SilentLogger that will output nothing. +func SetLogger(l Logger) { + log = l +} + +// GetLogger returns the Logger instance currently being used by Witness library code. +func GetLogger() Logger { + return log +} + +func Errorf(format string, args ...interface{}) { + err := fmt.Errorf(format, args...) + log.Error(err) +} + +func Error(args ...interface{}) { + log.Error(args...) +} + +func Warnf(format string, args ...interface{}) { + // We want to wrap the error if there is one. + for _, a := range args { + if _, ok := a.(error); ok { + err := fmt.Errorf(format, args...) + log.Warn(err) + return + } + } + + log.Warnf(format, args...) +} + +func Warn(args ...interface{}) { + log.Warn(args...) +} + +func Debugf(format string, args ...interface{}) { + for _, a := range args { + if _, ok := a.(error); ok { + err := fmt.Errorf(format, args...) + log.Debug(err) + return + } + } + + log.Debugf(format, args...) +} + +func Debug(args ...interface{}) { + log.Debug(args...) +} + +func Infof(format string, args ...interface{}) { + log.Infof(format, args...) +} + +func Info(args ...interface{}) { + log.Info(args...) +} diff --git a/vendor/github.com/in-toto/go-witness/log/silent.go b/vendor/github.com/in-toto/go-witness/log/silent.go new file mode 100644 index 0000000000..000236c064 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/log/silent.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +// SilentLogger is an implementation of the Logger interface that suppresses +// all logging output. This is the default logger when using Witness as a +// library, so that we don't interfere with the caller's stdout/stderr. Callers +// should supply their own Logger to capture Witness logging if desired. +type SilentLogger struct{} + +func (l SilentLogger) Errorf(format string, args ...interface{}) {} +func (l SilentLogger) Error(args ...interface{}) {} +func (l SilentLogger) Warnf(format string, args ...interface{}) {} +func (l SilentLogger) Warn(args ...interface{}) {} +func (l SilentLogger) Debugf(format string, args ...interface{}) {} +func (l SilentLogger) Debug(args ...interface{}) {} +func (l SilentLogger) Infof(format string, args ...interface{}) {} +func (l SilentLogger) Info(args ...interface{}) {} diff --git a/vendor/github.com/in-toto/go-witness/timestamp/fake.go b/vendor/github.com/in-toto/go-witness/timestamp/fake.go new file mode 100644 index 0000000000..1d50954683 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/timestamp/fake.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "context" + "fmt" + "io" + "time" +) + +type FakeTimestamper struct { + T time.Time +} + +func (ft FakeTimestamper) Timestamp(context.Context, io.Reader) ([]byte, error) { + return []byte(ft.T.Format(time.RFC3339)), nil +} + +func (ft FakeTimestamper) Verify(ctx context.Context, ts io.Reader, sig io.Reader) (time.Time, error) { + b, err := io.ReadAll(ts) + if err != nil { + return time.Time{}, err + } + + if string(b) != ft.T.Format(time.RFC3339) { + return time.Time{}, fmt.Errorf("mismatched time") + } + + return ft.T, nil +} diff --git a/vendor/github.com/in-toto/go-witness/timestamp/timestamp.go b/vendor/github.com/in-toto/go-witness/timestamp/timestamp.go new file mode 100644 index 0000000000..6408190056 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/timestamp/timestamp.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "context" + "io" + "time" +) + +type TimestampVerifier interface { + Verify(context.Context, io.Reader, io.Reader) (time.Time, error) +} + +type Timestamper interface { + Timestamp(context.Context, io.Reader) ([]byte, error) +} diff --git a/vendor/github.com/in-toto/go-witness/timestamp/tsp.go b/vendor/github.com/in-toto/go-witness/timestamp/tsp.go new file mode 100644 index 0000000000..e8a1e596d4 --- /dev/null +++ b/vendor/github.com/in-toto/go-witness/timestamp/tsp.go @@ -0,0 +1,176 @@ +// Copyright 2022 The Witness Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "fmt" + "io" + "net/http" + "time" + + "github.com/digitorus/pkcs7" + "github.com/digitorus/timestamp" + "github.com/in-toto/go-witness/cryptoutil" +) + +type TSPTimestamper struct { + url string + hash crypto.Hash + requestCertificate bool +} + +type TSPTimestamperOption func(*TSPTimestamper) + +func TimestampWithUrl(url string) TSPTimestamperOption { + return func(t *TSPTimestamper) { + t.url = url + } +} + +func TimestampWithHash(h crypto.Hash) TSPTimestamperOption { + return func(t *TSPTimestamper) { + t.hash = h + } +} + +func TimestampWithRequestCertificate(requestCertificate bool) TSPTimestamperOption { + return func(t *TSPTimestamper) { + t.requestCertificate = requestCertificate + } +} + +func NewTimestamper(opts ...TSPTimestamperOption) TSPTimestamper { + t := TSPTimestamper{ + hash: crypto.SHA256, + requestCertificate: true, + } + + for _, opt := range opts { + opt(&t) + } + + return t +} + +func (t TSPTimestamper) Timestamp(ctx context.Context, r io.Reader) ([]byte, error) { + tsq, err := timestamp.CreateRequest(r, ×tamp.RequestOptions{ + Hash: t.hash, + Certificates: t.requestCertificate, + }) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, "POST", t.url, bytes.NewReader(tsq)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", "application/timestamp-query") + client := http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusAccepted: + default: + return nil, fmt.Errorf("request to timestamp authority failed: %v", resp.Status) + } + + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + timestamp, err := timestamp.ParseResponse(bodyBytes) + if err != nil { + return nil, err + } + + return timestamp.RawToken, nil +} + +type TSPVerifier struct { + certChain *x509.CertPool + hash crypto.Hash +} + +type TSPVerifierOption func(*TSPVerifier) + +func VerifyWithCerts(certs []*x509.Certificate) TSPVerifierOption { + return func(t *TSPVerifier) { + t.certChain = x509.NewCertPool() + for _, cert := range certs { + t.certChain.AddCert(cert) + } + } +} + +func VerifyWithHash(h crypto.Hash) TSPVerifierOption { + return func(t *TSPVerifier) { + t.hash = h + } +} + +func NewVerifier(opts ...TSPVerifierOption) TSPVerifier { + v := TSPVerifier{ + hash: crypto.SHA256, + } + + for _, opt := range opts { + opt(&v) + } + + return v +} + +func (v TSPVerifier) Verify(ctx context.Context, tsrData, signedData io.Reader) (time.Time, error) { + tsrBytes, err := io.ReadAll(tsrData) + if err != nil { + return time.Time{}, err + } + + ts, err := timestamp.Parse(tsrBytes) + if err != nil { + return time.Time{}, err + } + + hashedData, err := cryptoutil.Digest(signedData, v.hash) + if err != nil { + return time.Time{}, err + } + + if !bytes.Equal(ts.HashedMessage, hashedData) { + return time.Time{}, fmt.Errorf("signed payload does not match timestamped payload") + } + + p7, err := pkcs7.Parse(tsrBytes) + if err != nil { + return time.Time{}, err + } + + if err := p7.VerifyWithChain(v.certChain); err != nil { + return time.Time{}, err + } + + return ts.Time, nil +} diff --git a/vendor/github.com/jjti/go-spancheck/.gitignore b/vendor/github.com/jjti/go-spancheck/.gitignore index 04b66d911b..0662f46ec6 100644 --- a/vendor/github.com/jjti/go-spancheck/.gitignore +++ b/vendor/github.com/jjti/go-spancheck/.gitignore @@ -17,5 +17,8 @@ # Dependency directories (remove the comment below to include it) # vendor/ src/ +<<<<<<< HEAD -.vscode \ No newline at end of file +.vscode +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/jjti/go-spancheck/.golangci.yml b/vendor/github.com/jjti/go-spancheck/.golangci.yml index 5d6ab12875..7daf588696 100644 --- a/vendor/github.com/jjti/go-spancheck/.golangci.yml +++ b/vendor/github.com/jjti/go-spancheck/.golangci.yml @@ -17,6 +17,10 @@ linters: - errcheck - errname - errorlint +<<<<<<< HEAD +======= + - exhaustive # checks exhaustiveness of enum switch statements +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - exportloopref # checks for pointers to enclosing loop variables - gci - gochecknoinits # checks that no init functions are present in Go code @@ -58,6 +62,15 @@ linters-settings: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - prefix(github.com/jjti) +<<<<<<< HEAD +======= + exhaustive: + # Program elements to check for exhaustiveness. + # Default: [ switch ] + check: + - switch + - map +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocritic: settings: captLocal: diff --git a/vendor/github.com/jjti/go-spancheck/go.work b/vendor/github.com/jjti/go-spancheck/go.work index ff04ca17e2..9394068d30 100644 --- a/vendor/github.com/jjti/go-spancheck/go.work +++ b/vendor/github.com/jjti/go-spancheck/go.work @@ -1,4 +1,8 @@ +<<<<<<< HEAD go 1.22.1 +======= +go 1.20 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) use ( . diff --git a/vendor/github.com/jjti/go-spancheck/go.work.sum b/vendor/github.com/jjti/go-spancheck/go.work.sum index c96d590d61..ea65c356d4 100644 --- a/vendor/github.com/jjti/go-spancheck/go.work.sum +++ b/vendor/github.com/jjti/go-spancheck/go.work.sum @@ -1,5 +1,6 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +<<<<<<< HEAD golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= @@ -9,3 +10,7 @@ golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= +======= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/jjti/go-spancheck/spancheck.go b/vendor/github.com/jjti/go-spancheck/spancheck.go index 49e5817285..20375c8e4e 100644 --- a/vendor/github.com/jjti/go-spancheck/spancheck.go +++ b/vendor/github.com/jjti/go-spancheck/spancheck.go @@ -309,11 +309,14 @@ outer: } seen[b] = true +<<<<<<< HEAD // Skip successors that are not nested within this current block. if _, ok := nestedBlockTypes[b.Kind]; !ok { continue } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Prune the search if the block uses v. if blockUses(pass, b) { continue @@ -335,6 +338,7 @@ outer: return search(defBlock.Succs) } +<<<<<<< HEAD var nestedBlockTypes = map[cfg.BlockKind]struct{}{ cfg.KindBody: {}, cfg.KindForBody: {}, @@ -350,6 +354,8 @@ var nestedBlockTypes = map[cfg.BlockKind]struct{}{ cfg.KindSwitchNextCase: {}, } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // usesCall reports whether stmts contain a use of the selName call on variable v. func usesCall( pass *analysis.Pass, @@ -360,12 +366,19 @@ func usesCall( startSpanMatchers []spanStartMatcher, depth int, ) bool { +<<<<<<< HEAD if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just two levels deep. return false } cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) +======= + if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just one level deep check. + return false + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) found, reAssigned := false, false for _, subStmt := range stmts { stack := []ast.Node{} @@ -373,6 +386,10 @@ func usesCall( switch n := n.(type) { case *ast.FuncLit: if len(stack) > 0 { +<<<<<<< HEAD +======= + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) g := cfgs.FuncLit(n) if g != nil && len(g.Blocks) > 0 { return usesCall(pass, g.Blocks[0].Nodes, sv, selName, ignoreCheckSig, startSpanMatchers, depth+1) @@ -388,6 +405,7 @@ func usesCall( return false } } +<<<<<<< HEAD case *ast.DeferStmt: if n.Call == nil { break @@ -414,6 +432,8 @@ func usesCall( } } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case nil: if len(stack) > 0 { stack = stack[:len(stack)-1] // pop diff --git a/vendor/github.com/julz/importas/analyzer.go b/vendor/github.com/julz/importas/analyzer.go index 25bc09b82f..302671b235 100644 --- a/vendor/github.com/julz/importas/analyzer.go +++ b/vendor/github.com/julz/importas/analyzer.go @@ -13,7 +13,11 @@ import ( ) var config = &Config{ +<<<<<<< HEAD RequiredAlias: make([][]string, 0), +======= + RequiredAlias: make(map[string]string), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var Analyzer = &analysis.Analyzer{ @@ -129,6 +133,7 @@ func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, orig // skip identifiers pointing to a different import statement. continue } +<<<<<<< HEAD pos := use.Pos() end := use.End() replacement := packageReplacement @@ -142,6 +147,13 @@ func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, orig Pos: pos, End: end, NewText: []byte(replacement), +======= + + result = append(result, analysis.TextEdit{ + Pos: use.Pos(), + End: use.End(), + NewText: []byte(packageReplacement), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } diff --git a/vendor/github.com/julz/importas/config.go b/vendor/github.com/julz/importas/config.go index 58be86c75f..d44cf23047 100644 --- a/vendor/github.com/julz/importas/config.go +++ b/vendor/github.com/julz/importas/config.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "regexp" +<<<<<<< HEAD "sync" ) @@ -24,6 +25,20 @@ func (c *Config) CompileRegexp() error { rules := make([]*Rule, 0, len(c.RequiredAlias)) for _, aliases := range c.RequiredAlias { path, alias := aliases[0], aliases[1] +======= +) + +type Config struct { + RequiredAlias map[string]string + Rules []*Rule + DisallowUnaliased bool + DisallowExtraAliases bool +} + +func (c *Config) CompileRegexp() error { + rules := make([]*Rule, 0, len(c.RequiredAlias)) + for path, alias := range c.RequiredAlias { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reg, err := regexp.Compile(fmt.Sprintf("^%s$", path)) if err != nil { return err @@ -34,15 +49,23 @@ func (c *Config) CompileRegexp() error { Alias: alias, }) } +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.Rules = rules return nil } func (c *Config) findRule(path string) *Rule { +<<<<<<< HEAD c.muRules.Lock() rules := c.Rules c.muRules.Unlock() for _, rule := range rules { +======= + for _, rule := range c.Rules { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if rule.Regexp.MatchString(path) { return rule } diff --git a/vendor/github.com/julz/importas/flags.go b/vendor/github.com/julz/importas/flags.go index cc3f1f3aae..e6a2d5ef58 100644 --- a/vendor/github.com/julz/importas/flags.go +++ b/vendor/github.com/julz/importas/flags.go @@ -7,16 +7,23 @@ import ( "strings" ) +<<<<<<< HEAD var errWrongAlias = errors.New("import flag must be of form path:alias") func flags(config *Config) flag.FlagSet { fs := flag.FlagSet{} fs.Var(&config.RequiredAlias, "alias", "required import alias in form path:alias") +======= +func flags(config *Config) flag.FlagSet { + fs := flag.FlagSet{} + fs.Var(stringMap(config.RequiredAlias), "alias", "required import alias in form path:alias") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fs.BoolVar(&config.DisallowUnaliased, "no-unaliased", false, "do not allow unaliased imports of aliased packages") fs.BoolVar(&config.DisallowExtraAliases, "no-extra-aliases", false, "do not allow non-required aliases") return fs } +<<<<<<< HEAD type aliasList [][]string func (v *aliasList) Set(val string) error { @@ -30,4 +37,20 @@ func (v *aliasList) Set(val string) error { func (v *aliasList) String() string { return fmt.Sprintf("%v", ([][]string)(*v)) +======= +type stringMap map[string]string + +func (v stringMap) Set(val string) error { + spl := strings.SplitN(val, ":", 2) + if len(spl) != 2 { + return errors.New("import flag must be of form path:alias") + } + + v[spl[0]] = spl[1] + return nil +} + +func (v stringMap) String() string { + return fmt.Sprintf("%v", (map[string]string)(v)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/ldez/gomoddirectives/.golangci.yml b/vendor/github.com/ldez/gomoddirectives/.golangci.yml index 7f25666569..c18bf11f61 100644 --- a/vendor/github.com/ldez/gomoddirectives/.golangci.yml +++ b/vendor/github.com/ldez/gomoddirectives/.golangci.yml @@ -1,3 +1,4 @@ +<<<<<<< HEAD linters: enable-all: true disable: @@ -20,12 +21,19 @@ linters: - exhaustive - exhaustruct - varnamelen +======= +run: + timeout: 2m +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linters-settings: govet: enable-all: true +<<<<<<< HEAD disable: - fieldalignment +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocyclo: min-complexity: 12 goconst: @@ -71,6 +79,42 @@ linters-settings: rules: json: pascal +<<<<<<< HEAD +======= +linters: + enable-all: true + disable: + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - prealloc + - bodyclose + - wsl + - nlreturn + - gomnd + - testpackage + - paralleltest + - tparallel + - goerr113 + - wrapcheck + - exhaustive + - exhaustruct + - varnamelen + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) issues: exclude-use-default: false max-issues-per-linter: 0 @@ -81,20 +125,29 @@ issues: exclude-rules: - path: "(.+)_test.go" linters: +<<<<<<< HEAD - funlen - goconst - maintidx - path: cmd/gomoddirectives/gomoddirectives.go linters: - forbidigo +======= + - funlen + - goconst + - path: cmd/gomoddirectives/gomoddirectives.go +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) text: 'use of `fmt.Println` forbidden' output: show-stats: true sort-results: true +<<<<<<< HEAD sort-order: - linter - file run: timeout: 2m +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ldez/gomoddirectives/LICENSE b/vendor/github.com/ldez/gomoddirectives/LICENSE index c1bf0c3288..d7cff29fdf 100644 --- a/vendor/github.com/ldez/gomoddirectives/LICENSE +++ b/vendor/github.com/ldez/gomoddirectives/LICENSE @@ -175,7 +175,11 @@ END OF TERMS AND CONDITIONS +<<<<<<< HEAD Copyright 2024 Fernandez Ludovic +======= + Copyright 2021 Fernandez Ludovic +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go index 22d01d627e..145af85289 100644 --- a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go +++ b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go @@ -1,25 +1,38 @@ +<<<<<<< HEAD // Package gomoddirectives a linter that handle directives into `go.mod`. +======= +// Package gomoddirectives a linter that handle `replace`, `retract`, `exclude` directives into `go.mod`. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gomoddirectives import ( "fmt" "go/token" +<<<<<<< HEAD "regexp" "strings" "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" "golang.org/x/tools/go/analysis" +======= + "strings" + + "golang.org/x/mod/modfile" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( reasonRetract = "a comment is mandatory to explain why the version has been retracted" reasonExclude = "exclude directive is not allowed" +<<<<<<< HEAD reasonToolchain = "toolchain directive is not allowed" reasonToolchainPattern = "toolchain directive (%s) doesn't match the pattern '%s'" reasonTool = "tool directive is not allowed" reasonGoDebug = "godebug directive is not allowed" reasonGoVersion = "go directive (%s) doesn't match the pattern '%s'" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reasonReplaceLocal = "local replacement are not allowed" reasonReplace = "replacement are not allowed" reasonReplaceIdentical = "the original module and the replacement are identical" @@ -52,6 +65,7 @@ type Options struct { ReplaceAllowLocal bool ExcludeForbidden bool RetractAllowNoExplanation bool +<<<<<<< HEAD ToolchainForbidden bool ToolchainPattern *regexp.Regexp ToolForbidden bool @@ -82,6 +96,8 @@ func AnalyzePass(pass *analysis.Pass, opts Options) ([]Result, error) { } return AnalyzeFile(f, opts), nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Analyze analyzes a project. @@ -96,6 +112,7 @@ func Analyze(opts Options) ([]Result, error) { // AnalyzeFile analyzes a mod file. func AnalyzeFile(file *modfile.File, opts Options) []Result { +<<<<<<< HEAD checks := []func(file *modfile.File, opts Options) []Result{ checkRetractDirectives, checkExcludeDirectives, @@ -210,21 +227,70 @@ func checkReplaceDirectives(file *modfile.File, opts Options) []Result { } uniqReplace[replace.Old.Path+replace.Old.Version] = struct{}{} +======= + var results []Result + + if !opts.RetractAllowNoExplanation { + for _, r := range file.Retract { + if r.Rationale != "" { + continue + } + + results = append(results, NewResult(file, r.Syntax, reasonRetract)) + } + } + + if opts.ExcludeForbidden { + for _, e := range file.Exclude { + results = append(results, NewResult(file, e.Syntax, reasonExclude)) + } + } + + uniqReplace := map[string]struct{}{} + + for _, r := range file.Replace { + reason := check(opts, r) + if reason != "" { + results = append(results, NewResult(file, r.Syntax, reason)) + continue + } + + if r.Old.Path == r.New.Path && r.Old.Version == r.New.Version { + results = append(results, NewResult(file, r.Syntax, reasonReplaceIdentical)) + continue + } + + if _, ok := uniqReplace[r.Old.Path+r.Old.Version]; ok { + results = append(results, NewResult(file, r.Syntax, reasonReplaceDuplicate)) + } + + uniqReplace[r.Old.Path+r.Old.Version] = struct{}{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return results } +<<<<<<< HEAD func checkReplaceDirective(opts Options, r *modfile.Replace) string { if isLocal(r) { if opts.ReplaceAllowLocal { +======= +func check(o Options, r *modfile.Replace) string { + if isLocal(r) { + if o.ReplaceAllowLocal { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "" } return fmt.Sprintf("%s: %s", reasonReplaceLocal, r.Old.Path) } +<<<<<<< HEAD for _, v := range opts.ReplaceAllowList { +======= + for _, v := range o.ReplaceAllowList { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if r.Old.Path == v { return "" } @@ -233,6 +299,7 @@ func checkReplaceDirective(opts Options, r *modfile.Replace) string { return fmt.Sprintf("%s: %s", reasonReplace, r.Old.Path) } +<<<<<<< HEAD func checkGoDebugDirectives(file *modfile.File, opts Options) []Result { if !opts.GoDebugForbidden { return nil @@ -247,6 +314,8 @@ func checkGoDebugDirectives(file *modfile.File, opts Options) []Result { return results } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Filesystem paths found in "replace" directives are represented by a path with an empty version. // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 func isLocal(r *modfile.Replace) bool { diff --git a/vendor/github.com/ldez/gomoddirectives/module.go b/vendor/github.com/ldez/gomoddirectives/module.go index 53cf1f59e1..413c288775 100644 --- a/vendor/github.com/ldez/gomoddirectives/module.go +++ b/vendor/github.com/ldez/gomoddirectives/module.go @@ -1,6 +1,7 @@ package gomoddirectives import ( +<<<<<<< HEAD "errors" "fmt" "os" @@ -27,6 +28,47 @@ func GetModuleFile() (*modfile.File, error) { func parseGoMod(goMod string) (*modfile.File, error) { raw, err := os.ReadFile(filepath.Clean(goMod)) +======= + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + + "golang.org/x/mod/modfile" +) + +type modInfo struct { + Path string `json:"Path"` + Dir string `json:"Dir"` + GoMod string `json:"GoMod"` + GoVersion string `json:"GoVersion"` + Main bool `json:"Main"` +} + +// GetModuleFile gets module file. +func GetModuleFile() (*modfile.File, error) { + // https://github.com/golang/go/issues/44753#issuecomment-790089020 + cmd := exec.Command("go", "list", "-m", "-json") + + raw, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("command go list: %w: %s", err, string(raw)) + } + + var v modInfo + err = json.NewDecoder(bytes.NewBuffer(raw)).Decode(&v) + if err != nil { + return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(raw)) + } + + if v.GoMod == "" { + return nil, errors.New("working directory is not part of a module") + } + + raw, err = os.ReadFile(v.GoMod) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("reading go.mod file: %w", err) } diff --git a/vendor/github.com/ldez/gomoddirectives/readme.md b/vendor/github.com/ldez/gomoddirectives/readme.md index 04738bd81c..56aca698ce 100644 --- a/vendor/github.com/ldez/gomoddirectives/readme.md +++ b/vendor/github.com/ldez/gomoddirectives/readme.md @@ -1,5 +1,6 @@ # gomoddirectives +<<<<<<< HEAD A linter that handle directives into `go.mod`. [![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) @@ -190,3 +191,19 @@ module example.com/foo go 1.22.0 ``` +======= +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) +[![Build Status](https://github.com/ldez/gomoddirectives/workflows/Main/badge.svg?branch=master)](https://github.com/ldez/gomoddirectives/actions) + +A linter that handle [`replace`](https://golang.org/ref/mod#go-mod-file-replace), [`retract`](https://golang.org/ref/mod#go-mod-file-retract), [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives into `go.mod`. + +Features: + +- ban all [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- allow only local [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- allow only some [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- force explanation for [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives +- ban all [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives +- detect duplicated [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +- detect identical [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ldez/tagliatelle/.golangci.yml b/vendor/github.com/ldez/tagliatelle/.golangci.yml index 01c76dca99..048fc4ab87 100644 --- a/vendor/github.com/ldez/tagliatelle/.golangci.yml +++ b/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -1,3 +1,4 @@ +<<<<<<< HEAD linters: enable-all: true disable: @@ -23,6 +24,12 @@ linters: - nilnil - errchkjson - nonamedreturns +======= +run: + timeout: 5m + skip-files: [ ] + skip-dirs: [ ] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linters-settings: govet: @@ -30,7 +37,11 @@ linters-settings: disable: - fieldalignment gocyclo: +<<<<<<< HEAD min-complexity: 20 +======= + min-complexity: 15 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goconst: min-len: 5 min-occurrences: 3 @@ -45,6 +56,7 @@ linters-settings: gofumpt: extra-rules: true depguard: +<<<<<<< HEAD rules: main: deny: @@ -52,6 +64,13 @@ linters-settings: desc: not allowed - pkg: "github.com/pkg/errors" desc: Should be replaced by standard lib errors package +======= + list-type: denylist + include-go-root: false + packages: + - github.com/sirupsen/logrus + - github.com/pkg/errors +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocritic: enabled-tags: - diagnostic @@ -66,6 +85,7 @@ linters-settings: hugeParam: sizeThreshold: 100 +<<<<<<< HEAD issues: exclude-use-default: false max-issues-per-linter: 0 @@ -75,3 +95,48 @@ issues: run: timeout: 5m +======= +linters: + enable-all: true + disable: + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - wsl + - nlreturn + - gomnd + - goerr113 + - wrapcheck + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - ifshort + - forcetypeassert + - varnamelen + - nilnil + - errchkjson + - nonamedreturns + +issues: + exclude-use-default: false + max-per-linter: 0 + max-same-issues: 0 + exclude: + - 'package-comments: should have a package comment' +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ldez/tagliatelle/readme.md b/vendor/github.com/ldez/tagliatelle/readme.md index 52d10304b1..e664d9f4c6 100644 --- a/vendor/github.com/ldez/tagliatelle/readme.md +++ b/vendor/github.com/ldez/tagliatelle/readme.md @@ -97,6 +97,7 @@ type Foo struct { } ``` +<<<<<<< HEAD ## What this linter is about This linter is about validating tags according to rules you define. @@ -105,6 +106,17 @@ The linter also allows to fix tags according to the rules you defined. This linter is not intended to validate the fact a tag in valid or not. ## How to use the linter +======= +## What this tool is about + +This tool is about validating tags according to rules you define. +The tool also allows to fix tags according to the rules you defined. + +This tool is not intended to validate the fact a tag in valid or not. +To do that, you can use `go vet`, or use [golangci-lint](https://golangci-lint.run) ["go vet"](https://golangci-lint.run/usage/linters/#govet) linter. + +## How to use the tool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### As a golangci-lint linter @@ -113,6 +125,7 @@ Define the rules, you want via your [golangci-lint](https://golangci-lint.run) c ```yaml linters-settings: tagliatelle: +<<<<<<< HEAD # Checks the struct tag name case. case: # Defines the association between tag name and case. @@ -256,6 +269,19 @@ linters-settings: overrides: - pkg: foo/bar ignore: true +======= + # Check the struck tag name case. + case: + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`. + json: camel + yaml: camel + xml: camel +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` More information here https://golangci-lint.run/usage/linters/#tagliatelle @@ -280,14 +306,23 @@ Here are the default rules for the well known and used tags, when using tagliate - `bson`: `camel` - `avro`: `snake` - `header`: `header` +<<<<<<< HEAD - `env`: `upperSnake` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - `envconfig`: `upperSnake` ### Custom Rules +<<<<<<< HEAD The linter is not limited to the tags used in example, **you can use it to validate any tag**. You can add your own tag, for example `whatever` and tells the linter you want to use `kebab`. +======= +The tool is not limited to the tags used in example, you can use it to validate any tag. + +You can add your own tag, for example `whatever` and tells the tool you want to use `kebab`. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) This option is only available via [golangci-lint](https://golangci-lint.run). @@ -296,6 +331,7 @@ linters-settings: tagliatelle: # Check the struck tag name case. case: +<<<<<<< HEAD rules: # Any struct tag type can be used. # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` @@ -307,4 +343,16 @@ linters-settings: # Use the struct field name to check the name of the struct tag. # Default: false use-field-name: true +======= + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` + json: camel + yaml: camel + xml: camel + whatever: kebab +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` diff --git a/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/vendor/github.com/ldez/tagliatelle/tagliatelle.go index 99c7da2d04..13625b7eb5 100644 --- a/vendor/github.com/ldez/tagliatelle/tagliatelle.go +++ b/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "go/ast" +<<<<<<< HEAD "maps" "path" "path/filepath" @@ -14,6 +15,12 @@ import ( "strings" iradix "github.com/hashicorp/go-immutable-radix/v2" +======= + "reflect" + "strings" + + "github.com/ettle/strcase" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -21,6 +28,7 @@ import ( // Config the tagliatelle configuration. type Config struct { +<<<<<<< HEAD Base Overrides []Overrides } @@ -45,6 +53,10 @@ type ExtendedRule struct { Case string ExtraInitialisms bool InitialismOverrides map[string]bool +======= + Rules map[string]string + UseFieldName bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // New creates an analyzer. @@ -52,18 +64,33 @@ func New(config Config) *analysis.Analyzer { return &analysis.Analyzer{ Name: "tagliatelle", Doc: "Checks the struct tags.", +<<<<<<< HEAD Run: func(pass *analysis.Pass) (any, error) { if len(config.Rules) == 0 && len(config.ExtendedRules) == 0 && len(config.Overrides) == 0 { +======= + Run: func(pass *analysis.Pass) (interface{}, error) { + if len(config.Rules) == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil } return run(pass, config) }, +<<<<<<< HEAD Requires: []*analysis.Analyzer{inspect.Analyzer}, } } func run(pass *analysis.Pass, config Config) (any, error) { +======= + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + } +} + +func run(pass *analysis.Pass, config Config) (interface{}, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) isp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !ok { return nil, errors.New("missing inspect analyser") @@ -73,6 +100,7 @@ func run(pass *analysis.Pass, config Config) (any, error) { (*ast.StructType)(nil), } +<<<<<<< HEAD cfg := config.Base if pass.Module != nil { radixTree := createRadixTree(config, pass.Module.Path) @@ -83,6 +111,8 @@ func run(pass *analysis.Pass, config Config) (any, error) { return nil, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) isp.Preorder(nodeFilter, func(n ast.Node) { node, ok := n.(*ast.StructType) if !ok { @@ -90,14 +120,22 @@ func run(pass *analysis.Pass, config Config) (any, error) { } for _, field := range node.Fields.List { +<<<<<<< HEAD analyze(pass, cfg, node, field) +======= + analyze(pass, config, node, field) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }) return nil, nil } +<<<<<<< HEAD func analyze(pass *analysis.Pass, config Base, n *ast.StructType, field *ast.Field) { +======= +func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.Field) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n.Fields == nil || n.Fields.NumFields() < 1 { // skip empty structs return @@ -114,6 +152,7 @@ func analyze(pass *analysis.Pass, config Base, n *ast.StructType, field *ast.Fie return } +<<<<<<< HEAD cleanRules(config) if slices.Contains(config.IgnoredFields, fieldName) { @@ -182,6 +221,51 @@ func report(pass *analysis.Pass, config Base, key, convName, fieldName string, n if value != converter(expected) { pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) +======= + for key, convName := range config.Rules { + if convName == "" { + continue + } + + value, flags, ok := lookupTagValue(field.Tag, key) + if !ok { + // skip when no struct tag for the key + continue + } + + if value == "-" { + // skip when skipped :) + continue + } + + // TODO(ldez): need to be rethink. + // This is an exception because of a bug. + // https://github.com/ldez/tagliatelle/issues/8 + // For now, tagliatelle should try to remain neutral in terms of format. + if hasTagFlag(flags, "inline") { + // skip for inline children (no name to lint) + continue + } + + if value == "" { + value = fieldName + } + + converter, err := getConverter(convName) + if err != nil { + pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) + continue + } + + expected := value + if config.UseFieldName { + expected = fieldName + } + + if value != converter(expected) { + pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -241,6 +325,7 @@ func hasTagFlag(flags []string, query string) bool { return false } +<<<<<<< HEAD func createRadixTree(config Config, modPath string) *iradix.Tree[Base] { r := iradix.New[Base]() @@ -300,3 +385,39 @@ func cleanRules(config Base) { delete(config.Rules, k) } } +======= +func getConverter(c string) (func(s string) string, error) { + switch c { + case "camel": + return strcase.ToCamel, nil + case "pascal": + return strcase.ToPascal, nil + case "kebab": + return strcase.ToKebab, nil + case "snake": + return strcase.ToSnake, nil + case "goCamel": + return strcase.ToGoCamel, nil + case "goPascal": + return strcase.ToGoPascal, nil + case "goKebab": + return strcase.ToGoKebab, nil + case "goSnake": + return strcase.ToGoSnake, nil + case "header": + return toHeader, nil + case "upper": + return strings.ToUpper, nil + case "upperSnake": + return strcase.ToSNAKE, nil + case "lower": + return strings.ToLower, nil + default: + return nil, fmt.Errorf("unsupported case: %s", c) + } +} + +func toHeader(s string) string { + return strcase.ToCase(s, strcase.TitleCase, '-') +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md new file mode 100644 index 0000000000..842e8e24fb --- /dev/null +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -0,0 +1,205 @@ +## Changelog + +### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 + + * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge + + Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. + + * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions + +### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 + + * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error + + Thanks to [@ellie](https://github.com/ellie) for the patch. + + * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible + + This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the + author happy until it affects real users. + + Thanks to [@maage](https://github.com/maage) for the patch. + +### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 + + * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments + + When reading comments \ are loaded correctly, but when writing they are then + replaced by \\. This leads to wrong comments when writing and reading multiple times. + + Thanks to [@doxsch](https://github.com/doxsch) for the patch. + +### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 + + * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references + + Thanks to [@sriv](https://github.com/sriv) for the patch. + +### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 + + * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference + + The change is include the key in the error message which is causing the circular + reference when parsing/loading the properties files. + + Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. + +### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 + + * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write + + This patch ensures that backslashes are escaped on write. Existing applications which + rely on the old behavior may need to be updated. + + Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. + + * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() + + Thanks to [@aliras1](https://github.com/aliras1) for the patch. + + * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + + * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + +### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 + + * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request + + This patch ensures that in `LoadURL` the response body is always closed. + + Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. + +### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 + + * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading + + This adds the option to disable property expansion during loading. + + Thanks to [@kmala](https://github.com/kmala) for the patch. + +### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 + + * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. + + See PR for an example. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 + + * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value + + Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail + with a `circular reference error`. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 + + * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces + + * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled + + Thanks to [@mgurov](https://github.com/mgurov) for the fix. + +### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 + + * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically + * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map + +### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 + + * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency + * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) + +### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 + + * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` + * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs + * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy + * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function + +### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 + + * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. + * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. + * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) + +### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 + + * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. + +### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 + + * Vendored in gopkg.in/check.v1 + +### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 + + * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) + +### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 + + * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. + +### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 + + * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) + +### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 + + * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty + * Add clickable links to README + +### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 + + * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with + [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). + +### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 + + * Added support for single and multi-line comments (reading, writing and updating) + * The order of keys is now preserved + * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry + * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method + * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) + +### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 + + * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one + +### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 + + * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string + +### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 + + * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys + * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties + +### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 + +* Added support for time.Duration +* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) +* Changed default of MustXXX() failure from panic to log.Fatal + +### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 + +* Added MustGet... functions +* Added support for int and uint with range checks on 32 bit platforms + +### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 + +* Renamed from goproperties to properties +* Added support for expansion of environment vars in + filenames and value expressions +* Fixed bug where value expressions were not at the + start of the string + +### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 + +* Initial release diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 4872685f46..1509ade854 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,9 +1,18 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) +<<<<<<< HEAD +======= +[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) # Overview +<<<<<<< HEAD +======= +#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) properties is a Go library for reading and writing properties files. It supports reading from multiple files or URLs and Spring style recursive @@ -96,3 +105,33 @@ $ go get -u github.com/magiconair/properties ## ToDo * Dump contents with passwords and secrets obscured +<<<<<<< HEAD +======= + +## Updated Git tags + +#### 13 Feb 2018 + +I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags +and I've only recently learned that this doesn't play well with `git describe` 😞 + +I have replaced all lightweight tags with signed tags using this script which should +retain the commit date, name and email address. Please run `git pull --tags` to update them. + +Worst case you have to reclone the repo. + +```shell +#!/bin/bash +tag=$1 +echo "Updating $tag" +date=$(git show ${tag}^0 --format=%aD | head -1) +email=$(git show ${tag}^0 --format=%aE | head -1) +name=$(git show ${tag}^0 --format=%aN | head -1) +GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} +``` + +I apologize for the inconvenience. + +Frank + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go index f5e252f8d9..70443fd8e0 100644 --- a/vendor/github.com/magiconair/properties/decode.go +++ b/vendor/github.com/magiconair/properties/decode.go @@ -189,12 +189,21 @@ func dec(p *Properties, key string, def *string, opts map[string]string, v refle for i := 0; i < v.NumField(); i++ { fv := v.Field(i) fk, def, opts := keydef(t.Field(i)) +<<<<<<< HEAD if fk == "-" { continue } if !fv.CanSet() { return fmt.Errorf("cannot set %s", t.Field(i).Name) } +======= + if !fv.CanSet() { + return fmt.Errorf("cannot set %s", t.Field(i).Name) + } + if fk == "-" { + continue + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if key != "" { fk = key + "." + fk } diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index 6567e0c719..03d1062064 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -6,7 +6,11 @@ package properties import ( "fmt" +<<<<<<< HEAD "io" +======= + "io/ioutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" "strings" @@ -52,6 +56,7 @@ func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { return l.loadBytes(buf, l.Encoding) } +<<<<<<< HEAD // LoadReader reads an io.Reader into a Properties struct. func (l *Loader) LoadReader(r io.Reader) (*Properties, error) { if buf, err := io.ReadAll(r); err != nil { @@ -61,6 +66,8 @@ func (l *Loader) LoadReader(r io.Reader) (*Properties, error) { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // LoadAll reads the content of multiple URLs or files in the given order into // a Properties struct. If IgnoreMissing is true then a 404 status code or // missing file will not be reported as error. Encoding sets the encoding for @@ -100,7 +107,11 @@ func (l *Loader) LoadAll(names []string) (*Properties, error) { // If IgnoreMissing is true then a missing file will not be // reported as error. func (l *Loader) LoadFile(filename string) (*Properties, error) { +<<<<<<< HEAD data, err := os.ReadFile(filename) +======= + data, err := ioutil.ReadFile(filename) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { if l.IgnoreMissing && os.IsNotExist(err) { LogPrintf("properties: %s not found. skipping", filename) @@ -135,7 +146,11 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) } +<<<<<<< HEAD body, err := io.ReadAll(resp.Body) +======= + body, err := ioutil.ReadAll(resp.Body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) } @@ -194,12 +209,15 @@ func LoadFile(filename string, enc Encoding) (*Properties, error) { return l.LoadAll([]string{filename}) } +<<<<<<< HEAD // LoadReader reads an io.Reader into a Properties struct. func LoadReader(r io.Reader, enc Encoding) (*Properties, error) { l := &Loader{Encoding: enc} return l.LoadReader(r) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // LoadFiles reads multiple files in the given order into // a Properties struct. If 'ignoreMissing' is true then // non-existent files will not be reported as error. @@ -239,12 +257,15 @@ func MustLoadString(s string) *Properties { return must(LoadString(s)) } +<<<<<<< HEAD // MustLoadSReader reads an io.Reader into a Properties struct and // panics on error. func MustLoadReader(r io.Reader, enc Encoding) *Properties { return must(LoadReader(r, enc)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MustLoadFile reads a file into a Properties struct and // panics on error. func MustLoadFile(filename string, enc Encoding) *Properties { diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index ae634d1cc0..1232f36cfb 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,8 +1,11 @@ +<<<<<<< HEAD ## 1.5.1 * Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] * Fix map of slices not decoding properly in certain cases. [GH-266] +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.5.0 * New option `IgnoreUntaggedFields` to ignore decoding to any fields diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index c1f99da032..44a7b7f97c 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -271,11 +271,15 @@ func TextUnmarshallerHookFunc() DecodeHookFuncType { if !ok { return data, nil } +<<<<<<< HEAD str, ok := data.(string) if !ok { str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() } if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { +======= + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } return result, nil diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 7581806a79..6045ea0749 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -458,7 +458,11 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { +<<<<<<< HEAD return fmt.Errorf("error decoding '%s': %w", name, err) +======= + return fmt.Errorf("error decoding '%s': %s", name, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -1123,8 +1127,11 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) +<<<<<<< HEAD } else if valSlice.Len() > dataVal.Len() { valSlice = valSlice.Slice(0, dataVal.Len()) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Accumulate any errors diff --git a/vendor/github.com/nunnatsa/ginkgolinter/README.md b/vendor/github.com/nunnatsa/ginkgolinter/README.md index 83c436359f..7435a290f7 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/README.md +++ b/vendor/github.com/nunnatsa/ginkgolinter/README.md @@ -180,7 +180,11 @@ var _ = Describe("checking something", Focus, func() { These container, or the `Focus` spec, must not be part of the final source code, and should only be used locally by the developer. +<<<<<<< HEAD ***This rule is disabled by default***. Use the `--forbid-focus-container` command line flag to enable it. +======= +***This rule is disabled by default***. Use the `--forbid-focus-container=true` command line flag to enable it. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Comparing values from different types [BUG] @@ -189,7 +193,11 @@ The `Equal` and the `BeIdentical` matchers also check the type, not only the val The following code will fail in runtime: ```go x := 5 // x is int +<<<<<<< HEAD Expect(x).Should(Equal(uint(5)) // x and uint(5) are with different +======= +Expect(x).Should(Eqaul(uint(5)) // x and uint(5) are with different +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` When using negative checks, it's even worse, because we get a false positive: ``` @@ -202,7 +210,11 @@ using casting, or use the `BeEquivalentTo` matcher. The linter can't guess what is the best solution in each case, and so it won't auto-fix this warning. +<<<<<<< HEAD To suppress this warning entirely, use the `--suppress-type-compare-assertion` command line parameter. +======= +To suppress this warning entirely, use the `--suppress-type-compare-assertion=true` command line parameter. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) To suppress a specific file or line, use the `// ginkgo-linter:ignore-type-compare-warning` comment (see [below](#suppress-warning-from-the-code)) @@ -234,7 +246,11 @@ flag **is** set. ***Note***: This rule work with best-effort approach. It can't find many cases, like const defined not in the same package, or when using variables. +<<<<<<< HEAD The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or +======= +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). This rule checks if the async (`Eventually` or `Consistently`) timeout duration, is not shorter than the polling interval. @@ -274,7 +290,11 @@ a Gomega object as their first parameter, and returns nothing, e.g. this is a va ***Note***: This rule **does not** support auto-fix. ### Avoid Spec Pollution: Don't Initialize Variables in Container Nodes [BUG/STYLE]: +<<<<<<< HEAD ***Note***: Only applied when the `--forbid-spec-pollution` flag is set (disabled by default). +======= +***Note***: Only applied when the `--forbid-spec-pollution=true` flag is set (disabled by default). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) According to [ginkgo documentation](https://onsi.github.io/ginkgo/#avoid-spec-pollution-dont-initialize-variables-in-container-nodes), no variable should be assigned within a container node (`Describe`, `Context`, `When` or their `F`, `P` or `X` forms) @@ -451,13 +471,21 @@ Expect("abc").ShouldNot(BeEmpty()) // => Expect("abc").ToNot(BeEmpty()) ``` This rule support auto fixing. +<<<<<<< HEAD ***This rule is disabled by default***. Use the `--force-expect-to` command line flag to enable it. +======= +***This rule is disabled by default***. Use the `--force-expect-to=true` command line flag to enable it. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Async timing interval: multiple timeout or polling intervals [STYLE] ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. +<<<<<<< HEAD The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or +======= +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). The linter checks that there is up to one polling argument and up to one timeout argument. @@ -475,7 +503,11 @@ Eventually(aFunc, time.Second*10, time.Millisecond * 500).WithPolling(time.Milli ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. +<<<<<<< HEAD gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): +======= +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) * a `time.Duration` value * any kind of numeric value (int(8/16/32/64), uint(8/16/32/64) or float(32/64), as the number of seconds. * duration string like `"12s"` @@ -522,12 +554,17 @@ will trigger a warning with a suggestion to replace the mather to ```go Expect(myErrorFunc()).To(Succeed()) ``` +<<<<<<< HEAD ***This rule is disabled by default***. Use the `--force-succeed` command line flag to enable it. +======= +***This rule is disabled by default***. Use the `--force-succeed=true` command line flag to enable it. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ***Note***: This rule **does** support auto-fix, when the `--fix` command line parameter is used. ## Suppress the linter ### Suppress warning from command line +<<<<<<< HEAD * Use the `--suppress-len-assertion` flag to suppress the wrong length and cap assertions warning * Use the `--suppress-nil-assertion` flag to suppress the wrong nil assertion warning * Use the `--suppress-err-assertion` flag to suppress the wrong error assertion warning @@ -536,6 +573,16 @@ will trigger a warning with a suggestion to replace the mather to * Use the `--forbid-focus-container` flag to activate the focused container assertion (deactivated by default) * Use the `--suppress-type-compare-assertion` to suppress the type compare assertion warning * Use the `--allow-havelen-0` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from +======= +* Use the `--suppress-len-assertion=true` flag to suppress the wrong length and cap assertions warning +* Use the `--suppress-nil-assertion=true` flag to suppress the wrong nil assertion warning +* Use the `--suppress-err-assertion=true` flag to suppress the wrong error assertion warning +* Use the `--suppress-compare-assertion=true` flag to suppress the wrong comparison assertion warning +* Use the `--suppress-async-assertion=true` flag to suppress the function call in async assertion warning +* Use the `--forbid-focus-container=true` flag to activate the focused container assertion (deactivated by default) +* Use the `--suppress-type-compare-assertion=true` to suppress the type compare assertion warning +* Use the `--allow-havelen-0=true` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) command line, and not from a comment. ### Suppress warning from the code @@ -559,7 +606,11 @@ To suppress the wrong async assertion warning, add a comment with (only) `ginkgo-linter:ignore-async-assert-warning`. +<<<<<<< HEAD To suppress the focus container warning, add a comment with (only) +======= +To supress the focus container warning, add a comment with (only) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) `ginkgo-linter:ignore-focus-container-warning` @@ -572,10 +623,17 @@ Notice that this comment will not work for an anonymous variable container like // ginkgo-linter:ignore-focus-container-warning (not working!!) var _ = FDescribe(...) ``` +<<<<<<< HEAD In this case, use the file comment (see below). There are two options to use these comments: 1. If the comment is at the top of the file, suppress the warning for the whole file; e.g.: +======= +In this case, use the file comment (see bellow). + +There are two options to use these comments: +1. If the comment is at the top of the file, supress the warning for the whole file; e.g.: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ```go package mypackage diff --git a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go index ac762cd9b6..c63892f396 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go @@ -37,6 +37,7 @@ func NewAnalyzer() *analysis.Analyzer { a := NewAnalyzerWithConfig(config) +<<<<<<< HEAD a.Flags.Init("ginkgolinter", flag.ExitOnError) a.Flags.BoolVar(&config.SuppressLen, "suppress-len-assertion", config.SuppressLen, "Suppress warning for wrong length assertions") a.Flags.BoolVar(&config.SuppressNil, "suppress-nil-assertion", config.SuppressNil, "Suppress warning for wrong nil assertions") @@ -50,6 +51,23 @@ func NewAnalyzer() *analysis.Analyzer { a.Flags.BoolVar(&config.ForbidFocus, "forbid-focus-container", config.ForbidFocus, "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") a.Flags.BoolVar(&config.ForbidSpecPollution, "forbid-spec-pollution", config.ForbidSpecPollution, "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") a.Flags.BoolVar(&config.ForceSucceedForFuncs, "force-succeed", config.ForceSucceedForFuncs, "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") +======= + var ignored bool + a.Flags.Init("ginkgolinter", flag.ExitOnError) + a.Flags.Var(&config.SuppressLen, "suppress-len-assertion", "Suppress warning for wrong length assertions") + a.Flags.Var(&config.SuppressNil, "suppress-nil-assertion", "Suppress warning for wrong nil assertions") + a.Flags.Var(&config.SuppressErr, "suppress-err-assertion", "Suppress warning for wrong error assertions") + a.Flags.Var(&config.SuppressCompare, "suppress-compare-assertion", "Suppress warning for wrong comparison assertions") + a.Flags.Var(&config.SuppressAsync, "suppress-async-assertion", "Suppress warning for function call in async assertion, like Eventually") + a.Flags.Var(&config.ValidateAsyncIntervals, "validate-async-intervals", "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") + a.Flags.Var(&config.SuppressTypeCompare, "suppress-type-compare-assertion", "Suppress warning for comparing values from different types, like int32 and uint32") + a.Flags.Var(&config.AllowHaveLen0, "allow-havelen-0", "Do not warn for HaveLen(0); default = false") + a.Flags.Var(&config.ForceExpectTo, "force-expect-to", "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") + a.Flags.BoolVar(&ignored, "suppress-focus-container", true, "Suppress warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt. Deprecated and ignored: use --forbid-focus-container instead") + a.Flags.Var(&config.ForbidFocus, "forbid-focus-container", "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") + a.Flags.Var(&config.ForbidSpecPollution, "forbid-spec-pollution", "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") + a.Flags.Var(&config.ForceSucceedForFuncs, "force-succeed", "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return a } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/doc.go b/vendor/github.com/nunnatsa/ginkgolinter/doc.go index 2a935e9b34..de1225acf3 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/doc.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/doc.go @@ -94,7 +94,11 @@ For example: Eventually(func() bool { return true }, time.Second*10, 500*time.Millisecond).ProbeEvery(time.Millisecond * 500).Should(BeTrue()) * async timing interval: non-time.Duration intervals [Style] +<<<<<<< HEAD gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): +======= +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) * time.Duration * any kind of numeric value, as number of seconds * duration string like "12s" diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go index 5bd6dd6e7e..b278afac52 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go @@ -21,8 +21,18 @@ type Actual struct { actualOffset int } +<<<<<<< HEAD func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, timePkg string, info *gomegahandler.GomegaBasicInfo) (*Actual, bool) { arg, actualOffset := getActualArgPayload(orig, clone, pass, info) +======= +func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string, errMethodExists bool) (*Actual, bool) { + funcName, ok := handler.GetActualFuncName(orig) + if !ok { + return nil, false + } + + arg, actualOffset := getActualArgPayload(orig, clone, pass, funcName, errMethodExists) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if arg == nil { return nil, false } @@ -40,7 +50,11 @@ func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallE isTuple = tpl.Len() > 1 } +<<<<<<< HEAD isAsyncExpr := gomegainfo.IsAsyncActualMethod(info.MethodName) +======= + isAsyncExpr := gomegainfo.IsAsyncActualMethod(funcName) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var asyncArg *AsyncArg if isAsyncExpr { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go index 5b6cfbbc44..efe1ee1f0d 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go @@ -8,7 +8,10 @@ import ( "golang.org/x/tools/go/analysis" "github.com/nunnatsa/ginkgolinter/internal/expression/value" +<<<<<<< HEAD "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/nunnatsa/ginkgolinter/internal/gomegainfo" "github.com/nunnatsa/ginkgolinter/internal/reverseassertion" ) @@ -41,15 +44,24 @@ func (a ArgType) Is(val ArgType) bool { return a&val != 0 } +<<<<<<< HEAD func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, info *gomegahandler.GomegaBasicInfo) (ArgPayload, int) { origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, info.MethodName, pass) +======= +func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, actualMethodName string, errMethodExists bool) (ArgPayload, int) { + origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, actualMethodName, pass) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !isGomegaExpr { return nil, 0 } var arg ArgPayload +<<<<<<< HEAD if info.HasErrorMethod { +======= + if errMethodExists { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) arg = &ErrorMethodPayload{} } else if value.IsExprError(pass, origArgExpr) { arg = newErrPayload(origArgExpr, argExprClone, pass) diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go index 6e8e0db6ac..86db59cfce 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go @@ -27,8 +27,12 @@ type GomegaExpression struct { origAssertionFuncName string actualFuncName string +<<<<<<< HEAD isAsync bool isUsingGomegaVar bool +======= + isAsync bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actual *actual.Actual matcher *matcher.Matcher @@ -37,8 +41,13 @@ type GomegaExpression struct { } func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string) (*GomegaExpression, bool) { +<<<<<<< HEAD info, ok := handler.GetGomegaBasicInfo(origExpr) if !ok || !gomegainfo.IsActualMethod(info.MethodName) { +======= + actualMethodName, ok := handler.GetActualFuncName(origExpr) + if !ok || !gomegainfo.IsActualMethod(actualMethodName) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, false } @@ -46,14 +55,24 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand if !ok || !gomegainfo.IsAssertionFunc(origSel.Sel.Name) { return &GomegaExpression{ orig: origExpr, +<<<<<<< HEAD actualFuncName: info.MethodName, +======= + actualFuncName: actualMethodName, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, true } exprClone := astcopy.CallExpr(origExpr) selClone := exprClone.Fun.(*ast.SelectorExpr) +<<<<<<< HEAD origActual := handler.GetActualExpr(origSel) +======= + errMethodExists := false + + origActual := handler.GetActualExpr(origSel, &errMethodExists) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if origActual == nil { return nil, false } @@ -63,7 +82,11 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand return nil, false } +<<<<<<< HEAD actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, timePkg, info) +======= + actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, handler, timePkg, errMethodExists) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return nil, false } @@ -88,10 +111,16 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand assertionFuncName: origSel.Sel.Name, origAssertionFuncName: origSel.Sel.Name, +<<<<<<< HEAD actualFuncName: info.MethodName, isAsync: actl.IsAsync(), isUsingGomegaVar: info.UseGomegaVar, +======= + actualFuncName: actualMethodName, + + isAsync: actl.IsAsync(), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actual: actl, matcher: mtchr, @@ -135,10 +164,13 @@ func (e *GomegaExpression) IsAsync() bool { return e.isAsync } +<<<<<<< HEAD func (e *GomegaExpression) IsUsingGomegaVar() bool { return e.isUsingGomegaVar } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *GomegaExpression) ReverseAssertionFuncLogic() { assertionFunc := e.clone.Fun.(*ast.SelectorExpr).Sel newName := reverseassertion.ChangeAssertionLogic(assertionFunc.Name) diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go index 7a983cc9e8..5444ea1505 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go @@ -41,13 +41,22 @@ func New(origMatcher, matcherClone *ast.CallExpr, pass *analysis.Pass, handler g reverse := false var assertFuncName string for { +<<<<<<< HEAD info, ok := handler.GetGomegaBasicInfo(origMatcher) +======= + ok := false + assertFuncName, ok = handler.GetActualFuncName(origMatcher) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return nil, false } +<<<<<<< HEAD if info.MethodName != "Not" { assertFuncName = info.MethodName +======= + if assertFuncName != "Not" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go index ba74722d27..ef855830f7 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go @@ -194,10 +194,13 @@ func IsExprError(pass *analysis.Pass, expr ast.Expr) bool { return interfaces.ImplementsError(actualArgType) case *gotypes.Pointer: +<<<<<<< HEAD if interfaces.ImplementsError(t) { return true } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if tt, ok := t.Elem().(*gotypes.Named); ok { return interfaces.ImplementsError(tt) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go index 322bbc4533..ead016b18c 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go @@ -20,11 +20,19 @@ const ( func handleGinkgoSpecs(expr ast.Expr, config types.Config, pass *analysis.Pass, ginkgoHndlr Handler) bool { goDeeper := false if exp, ok := expr.(*ast.CallExpr); ok { +<<<<<<< HEAD if config.ForbidFocus && checkFocusContainer(pass, ginkgoHndlr, exp) { goDeeper = true } if config.ForbidSpecPollution && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { +======= + if bool(config.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, exp) { + goDeeper = true + } + + if bool(config.ForbidSpecPollution) && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goDeeper = true } } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go index 8ab87c76e9..50a90eacc3 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go @@ -14,6 +14,7 @@ type dotHandler struct { pass *analysis.Pass } +<<<<<<< HEAD // GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info func (h dotHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { info := &GomegaBasicInfo{} @@ -42,6 +43,26 @@ func (h dotHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bo return nil, false } } +======= +// GetActualFuncName returns the name of the gomega function, e.g. `Expect` +func (h dotHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { + switch actualFunc := expr.Fun.(type) { + case *ast.Ident: + return actualFunc.Name, true + case *ast.SelectorExpr: + if h.isGomegaVar(actualFunc.X) { + return actualFunc.Sel.Name, true + } + + if x, ok := actualFunc.X.(*ast.CallExpr); ok { + return h.GetActualFuncName(x) + } + + case *ast.CallExpr: + return h.GetActualFuncName(actualFunc) + } + return "", false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ReplaceFunction replaces the function with another one, for fix suggestions @@ -61,7 +82,11 @@ func (dotHandler) GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast } } +<<<<<<< HEAD func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { +======= +func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actualExpr, ok := assertionFunc.X.(*ast.CallExpr) if !ok { return nil @@ -76,7 +101,15 @@ func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr return actualExpr } } else { +<<<<<<< HEAD return h.GetActualExpr(fun) +======= + if fun.Sel.Name == "Error" { + *errMethodExists = true + } + + return h.GetActualExpr(fun, errMethodExists) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go index 881ec87896..970c7c4e13 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go @@ -14,23 +14,34 @@ const ( // in imported with "." name, custom name or without any name. type Handler interface { // GetActualFuncName returns the name of the gomega function, e.g. `Expect` +<<<<<<< HEAD GetGomegaBasicInfo(*ast.CallExpr) (*GomegaBasicInfo, bool) // ReplaceFunction replaces the function with another one, for fix suggestions ReplaceFunction(*ast.CallExpr, *ast.Ident) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr +======= + GetActualFuncName(*ast.CallExpr) (string, bool) + // ReplaceFunction replaces the function with another one, for fix suggestions + ReplaceFunction(*ast.CallExpr, *ast.Ident) + + GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GetActualExprClone(origFunc, funcClone *ast.SelectorExpr) *ast.CallExpr GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast.CallExpr } +<<<<<<< HEAD type GomegaBasicInfo struct { MethodName string UseGomegaVar bool HasErrorMethod bool } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetGomegaHandler returns a gomegar handler according to the way gomega was imported in the specific file func GetGomegaHandler(file *ast.File, pass *analysis.Pass) Handler { for _, imp := range file.Imports { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go index 61c471f4c2..62e27e15b8 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go @@ -18,6 +18,7 @@ type nameHandler struct { pass *analysis.Pass } +<<<<<<< HEAD // GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info func (g nameHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { info := &GomegaBasicInfo{} @@ -51,6 +52,30 @@ func (g nameHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, b return nil, false } } +======= +// GetActualFuncName returns the name of the gomega function, e.g. `Expect` +func (g nameHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { + selector, ok := expr.Fun.(*ast.SelectorExpr) + if !ok { + return "", false + } + + switch x := selector.X.(type) { + case *ast.Ident: + if x.Name != g.name { + if !g.isGomegaVar(x) { + return "", false + } + } + + return selector.Sel.Name, true + + case *ast.CallExpr: + return g.GetActualFuncName(x) + } + + return "", false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ReplaceFunction replaces the function with another one, for fix suggestions @@ -62,7 +87,11 @@ func (g nameHandler) isGomegaVar(x ast.Expr) bool { return gomegainfo.IsGomegaVar(x, g.pass) } +<<<<<<< HEAD func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { +======= +func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actualExpr, ok := assertionFunc.X.(*ast.CallExpr) if !ok { return nil @@ -80,7 +109,14 @@ func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExp return actualExpr } } else { +<<<<<<< HEAD return g.GetActualExpr(fun) +======= + if fun.Sel.Name == "Error" { + *errMethodExists = true + } + return g.GetActualExpr(fun, errMethodExists) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go index 307cd2d125..a7b5bb0003 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go @@ -19,11 +19,19 @@ const valueInEventually = "use a function call in %[1]s. This actually checks no type AsyncFuncCallRule struct{} func (r AsyncFuncCallRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { +<<<<<<< HEAD if config.SuppressAsync || !gexp.IsAsync() { return false } if asyncArg := gexp.GetAsyncActualArg(); asyncArg != nil { +======= + if bool(config.SuppressAsync) || !gexp.IsAsync() { + return false + } + + if asyncArg := gexp.GetAsyncActualArg(); asyncRules != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return !asyncArg.IsValid() } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go index ca5c326195..e754be8f0c 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go @@ -20,7 +20,11 @@ const ( type AsyncTimeIntervalsRule struct{} func (r AsyncTimeIntervalsRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { +<<<<<<< HEAD return !config.SuppressAsync && config.ValidateAsyncIntervals && gexp.IsAsync() +======= + return !bool(config.SuppressAsync) && bool(config.ValidateAsyncIntervals) && gexp.IsAsync() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r AsyncTimeIntervalsRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go index 4b6eafdda0..437b8a199d 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go @@ -14,7 +14,11 @@ const compareDifferentTypes = "use %[1]s with different types: Comparing %[2]s w type EqualDifferentTypesRule struct{} func (r EqualDifferentTypesRule) isApplied(config types.Config) bool { +<<<<<<< HEAD return !config.SuppressTypeCompare +======= + return !bool(config.SuppressTypeCompare) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r EqualDifferentTypesRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go index f27dfb0d88..275b226910 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go @@ -12,7 +12,11 @@ import ( type EqualNilRule struct{} func (r EqualNilRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { +<<<<<<< HEAD return !config.SuppressNil && +======= + return !bool(config.SuppressNil) && +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gexp.MatcherTypeIs(matcher.EqualValueMatcherType) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go index 159fb615a0..25ab42f39a 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go @@ -10,7 +10,11 @@ import ( type HaveLen0 struct{} func (r *HaveLen0) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { +<<<<<<< HEAD return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !config.AllowHaveLen0 +======= + return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !bool(config.AllowHaveLen0) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *HaveLen0) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go index 317e22ed3d..0067c812ea 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go @@ -24,7 +24,11 @@ func (r HaveOccurredRule) Apply(gexp *expression.GomegaExpression, config types. return true } +<<<<<<< HEAD if config.ForceSucceedForFuncs && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { +======= + if bool(config.ForceSucceedForFuncs) && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gexp.ReverseAssertionFuncLogic() gexp.SetMatcherSucceed() reportBuilder.AddIssue(true, "prefer using the Succeed matcher for error function, instead of HaveOccurred") diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go index 6677dce3bb..faa3ff2323 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go @@ -42,9 +42,15 @@ func (r NilCompareRule) isApplied(gexp *expression.GomegaExpression, config type return false, false } +<<<<<<< HEAD isErr := actl.IsError() && !config.SuppressErr if !isErr && config.SuppressNil { +======= + isErr := actl.IsError() && !bool(config.SuppressErr) + + if !isErr && bool(config.SuppressNil) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return isErr, false } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go index 45a8d948b4..f18c39991b 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go @@ -28,7 +28,11 @@ func (r SucceedRule) Apply(gexp *expression.GomegaExpression, config types.Confi return true } +<<<<<<< HEAD if config.ForceSucceedForFuncs && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { +======= + if bool(config.ForceSucceedForFuncs) && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gexp.ReverseAssertionFuncLogic() gexp.SetMatcherHaveOccurred() diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go new file mode 100644 index 0000000000..be510c4e95 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go @@ -0,0 +1,32 @@ +package types + +import ( + "errors" + "strings" +) + +// Boolean is a bool, implementing the flag.Value interface, to be used as a flag var. +type Boolean bool + +func (b *Boolean) Set(value string) error { + if b == nil { + return errors.New("trying to set nil parameter") + } + switch strings.ToLower(value) { + case "true": + *b = true + case "false": + *b = false + default: + return errors.New(value + " is not a Boolean value") + + } + return nil +} + +func (b Boolean) String() string { + if b { + return "true" + } + return "false" +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go index 81a9ebe327..6c81f365b2 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go @@ -17,6 +17,7 @@ const ( ) type Config struct { +<<<<<<< HEAD SuppressLen bool SuppressNil bool SuppressErr bool @@ -33,6 +34,24 @@ type Config struct { func (s *Config) AllTrue() bool { return s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus +======= + SuppressLen Boolean + SuppressNil Boolean + SuppressErr Boolean + SuppressCompare Boolean + SuppressAsync Boolean + ForbidFocus Boolean + SuppressTypeCompare Boolean + AllowHaveLen0 Boolean + ForceExpectTo Boolean + ValidateAsyncIntervals Boolean + ForbidSpecPollution Boolean + ForceSucceedForFuncs Boolean +} + +func (s *Config) AllTrue() bool { + return bool(s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *Config) Clone() Config { diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f..14a88daef8 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,11 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { +<<<<<<< HEAD return FmtUnknown +======= + return fmtUnknown +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } const textType = "text/plain" @@ -53,6 +57,7 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { +<<<<<<< HEAD return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { @@ -68,6 +73,23 @@ func ResponseFormat(h http.Header) Format { } return FmtUnknown +======= + return fmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return fmtUnknown + } + return fmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return fmtUnknown + } + return fmtText + } + + return fmtUnknown +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55..c0237d02da 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,11 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: +<<<<<<< HEAD escapingScheme = Format("; escaping=" + escapeParam) +======= + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: // If the escaping parameter is unknown, ignore it. } @@ -77,6 +81,7 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": +<<<<<<< HEAD return FmtProtoDelim + escapingScheme case "text": return FmtProtoText + escapingScheme @@ -89,6 +94,20 @@ func Negotiate(h http.Header) Format { } } return FmtText + escapingScheme +======= + return fmtProtoDelim + escapingScheme + case "text": + return fmtProtoText + escapingScheme + case "compact-text": + return fmtProtoCompact + escapingScheme + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return fmtText + escapingScheme + } + } + return fmtText + escapingScheme +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -101,7 +120,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: +<<<<<<< HEAD escapingScheme = Format("; escaping=" + escapeParam) +======= + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: // If the escaping parameter is unknown, ignore it. } @@ -110,6 +133,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": +<<<<<<< HEAD return FmtProtoDelim + escapingScheme case "text": return FmtProtoText + escapingScheme @@ -119,10 +143,22 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText + escapingScheme +======= + return fmtProtoDelim + escapingScheme + case "text": + return fmtProtoText + escapingScheme + case "compact-text": + return fmtProtoCompact + escapingScheme + } + } + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return fmtText + escapingScheme +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: +<<<<<<< HEAD return FmtOpenMetrics_1_0_0 + escapingScheme default: return FmtOpenMetrics_0_0_1 + escapingScheme @@ -130,6 +166,15 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { } } return FmtText + escapingScheme +======= + return fmtOpenMetrics_1_0_0 + escapingScheme + default: + return fmtOpenMetrics_0_0_1 + escapingScheme + } + } + } + return fmtText + escapingScheme +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d..ff84a6ecab 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,11 @@ package expfmt import ( +<<<<<<< HEAD "errors" +======= + "fmt" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "github.com/prometheus/common/model" @@ -32,15 +36,23 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( +<<<<<<< HEAD TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" +======= + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" +<<<<<<< HEAD // The Content-Type values for the different wire protocols. Do not do direct // comparisons to these constants, instead use the comparison functions. // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. @@ -57,6 +69,18 @@ const ( FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` +======= + // The Content-Type values for the different wire protocols. Note that these + // values are now unexported. If code was relying on comparisons to these + // constants, instead use FormatType(). + fmtUnknown Format = `` + fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + fmtProtoDelim Format = protoFmt + ` encoding=delimited` + fmtProtoText Format = protoFmt + ` encoding=text` + fmtProtoCompact Format = protoFmt + ` encoding=compact-text` + fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -86,6 +110,7 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: +<<<<<<< HEAD return FmtProtoCompact case TypeProtoDelim: return FmtProtoDelim @@ -97,6 +122,19 @@ func NewFormat(t FormatType) Format { return FmtOpenMetrics_1_0_0 default: return FmtUnknown +======= + return fmtProtoCompact + case TypeProtoDelim: + return fmtProtoDelim + case TypeProtoText: + return fmtProtoText + case TypeTextPlain: + return fmtText + case TypeOpenMetrics: + return fmtOpenMetrics_1_0_0 + default: + return fmtUnknown +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -104,6 +142,7 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { +<<<<<<< HEAD return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { @@ -133,6 +172,14 @@ func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { } terms = append(terms, model.EscapingKey+"="+s.String()) return Format(strings.Join(terms, "; ")) +======= + return fmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return fmtOpenMetrics_1_0_0, nil + } + return fmtUnknown, fmt.Errorf("unknown open metrics version string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f..a29481261d 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,7 +38,11 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See +<<<<<<< HEAD // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). +======= +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -102,7 +106,11 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: +<<<<<<< HEAD // (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). +======= +// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name @@ -152,8 +160,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } +<<<<<<< HEAD if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { compliantName = compliantName + "_" + *in.Unit +======= + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { + compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Comments, first HELP, then TYPE. @@ -477,7 +490,11 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. +<<<<<<< HEAD if !model.IsValidLegacyMetricName(name) { +======= + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b33..a702476fb8 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,11 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. +<<<<<<< HEAD if !model.IsValidLegacyMetricName(name) { +======= + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +502,11 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { +<<<<<<< HEAD if model.IsValidLegacyMetricName(name) { +======= + if model.IsValidLegacyMetricName(model.LabelValue(name)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..0aa066c462 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,15 @@ import ( "math" "strconv" "strings" +<<<<<<< HEAD "unicode/utf8" dto "github.com/prometheus/client_model/go" +======= + + dto "github.com/prometheus/client_model/go" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,7 +66,10 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair +<<<<<<< HEAD currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -75,9 +84,12 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool +<<<<<<< HEAD // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -141,15 +153,21 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() +<<<<<<< HEAD p.currentMF = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ +<<<<<<< HEAD p.currentMetricIsInsideBraces = false p.currentMetricInsideBracesIsPresent = false +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -165,9 +183,12 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. +<<<<<<< HEAD case '{': p.currentMetricIsInsideBraces = true return p.readingLabels +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return p.readingMetricName } @@ -285,8 +306,11 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { +<<<<<<< HEAD p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) p.currentLabelPairs = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -299,6 +323,7 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } +<<<<<<< HEAD if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -338,6 +363,8 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPairs = nil return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -347,17 +374,35 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { +<<<<<<< HEAD p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { +======= + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + // Check for duplicate label names. + labels := make(map[string]struct{}) + for _, l := range p.currentMetric.Label { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) +<<<<<<< HEAD p.currentLabelPairs = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } @@ -390,7 +435,10 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) +<<<<<<< HEAD p.currentLabelPairs = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } else { @@ -417,19 +465,25 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': +<<<<<<< HEAD if p.currentMF == nil { p.parseError("invalid metric name") return nil } p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) p.currentLabelPairs = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) +<<<<<<< HEAD p.currentLabelPairs = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } @@ -638,8 +692,11 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') +<<<<<<< HEAD case '"': p.currentToken.WriteByte('"') +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -665,6 +722,7 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() +<<<<<<< HEAD // A UTF-8 metric name must be quoted and may have escaped characters. quoted := false escaped := false @@ -704,6 +762,15 @@ func (p *TextParser) readTokenAsMetricName() { } p.currentByte, p.err = p.buf.ReadByte() if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { +======= + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } @@ -715,6 +782,7 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() +<<<<<<< HEAD // A UTF-8 label name must be quoted and may have escaped characters. quoted := false escaped := false @@ -754,6 +822,15 @@ func (p *TextParser) readTokenAsLabelName() { } p.currentByte, p.err = p.buf.ReadByte() if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { +======= + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } @@ -779,7 +856,10 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) +<<<<<<< HEAD p.currentLabelPairs = nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } escaped = false @@ -838,19 +918,32 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { +<<<<<<< HEAD return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } func isValidLabelNameContinuation(b byte, quoted bool) bool { return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) +======= + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } +<<<<<<< HEAD func isValidMetricNameContinuation(b byte, quoted bool) bool { return isValidLabelNameContinuation(b, quoted) || b == ':' +======= +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func isBlankOrTab(b byte) bool { @@ -895,7 +988,11 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { +<<<<<<< HEAD return 0, errors.New("unsupported character in float") +======= + return 0, fmt.Errorf("unsupported character in float") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..cfe5680f8d 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,7 +14,10 @@ package model import ( +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "time" ) @@ -90,16 +93,27 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { +<<<<<<< HEAD return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { return errors.New("start time must be before end time") +======= + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { +<<<<<<< HEAD return errors.New("at least one label pair required") +======= + return fmt.Errorf("at least one label pair required") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..8a85d43451 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,21 +97,37 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string +<<<<<<< HEAD // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +======= +// IsValid returns true iff name matches the pattern of LabelNameRE for legacy +// names, and iff it's valid UTF-8 if NameValidationScheme is set to +// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the +// check but a much faster hardcoded implementation. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: +<<<<<<< HEAD return ln.IsValidLegacy() +======= + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +<<<<<<< HEAD } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for @@ -126,6 +142,8 @@ func (ln LabelName) IsValidLegacy() bool { return false } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index abb2c90018..3cbeefec53 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,6 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +<<<<<<< HEAD +======= +//go:build go1.21 + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go new file mode 100644 index 0000000000..c4212685e7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.21 + +package model + +import ( + "fmt" + "sort" + "strings" +) + +// String was optimized using functions not available for go 1.20 +// or lower. We keep the old implementation for compatibility with client_golang. +// Once client golang drops support for go 1.20 (scheduled for August 2024), this +// file can be removed. +func (l LabelSet) String() string { + labelNames := make([]string, 0, len(l)) + for name := range l { + labelNames = append(labelNames, string(name)) + } + sort.Strings(labelNames) + lstrs := make([]string, 0, len(l)) + for _, name := range labelNames { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) + } + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..eddea85d80 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,11 +14,17 @@ package model import ( +<<<<<<< HEAD "errors" "fmt" "regexp" "sort" "strconv" +======= + "fmt" + "regexp" + "sort" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "unicode/utf8" @@ -28,6 +34,7 @@ import ( var ( // NameValidationScheme determines the method of name validation to be used by +<<<<<<< HEAD // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 // mode in isolation from other components that don't support UTF-8 may result // in bugs or other undefined behavior. This value can be set to @@ -43,6 +50,20 @@ var ( // is used in content negotiation to indicate that a system supports UTF-8 and // has that feature enabled. NameEscapingScheme = UnderscoreEscaping +======= + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be + // escaped when presented to systems that do not support UTF-8 names. If the + // Content-Type "escaping" term is specified, that will override this value. + NameEscapingScheme = ValueEncodingEscaping +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -166,7 +187,11 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: +<<<<<<< HEAD return IsValidLegacyMetricName(string(n)) +======= + return IsValidLegacyMetricName(n) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case UTF8Validation: if len(n) == 0 { return false @@ -181,7 +206,11 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +<<<<<<< HEAD func IsValidLegacyMetricName(n string) bool { +======= +func IsValidLegacyMetricName(n LabelValue) bool { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(n) == 0 { return false } @@ -213,7 +242,11 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. +<<<<<<< HEAD if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { +======= + if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -235,7 +268,11 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { +<<<<<<< HEAD if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { +======= + if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) escaped.Label = append(escaped.Label, l) continue } @@ -245,7 +282,11 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } +<<<<<<< HEAD if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { +======= + if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) escaped.Label = append(escaped.Label, l) continue } @@ -261,16 +302,30 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { +<<<<<<< HEAD if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } if !IsValidLegacyMetricName(l.GetName()) { +======= + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + return true + } + if !IsValidLegacyMetricName(LabelValue(l.GetName())) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } } return false } +<<<<<<< HEAD +======= +const ( + lowerhex = "0123456789abcdef" +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -284,7 +339,11 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: +<<<<<<< HEAD if IsValidLegacyMetricName(name) { +======= + if IsValidLegacyMetricName(LabelValue(name)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return name } for i, b := range name { @@ -305,16 +364,25 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { +<<<<<<< HEAD escaped.WriteString("__") +======= + escaped.WriteRune('_') +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return escaped.String() case ValueEncodingEscaping: +<<<<<<< HEAD if IsValidLegacyMetricName(name) { +======= + if IsValidLegacyMetricName(LabelValue(name)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return name } escaped.WriteString("U__") for i, b := range name { +<<<<<<< HEAD if b == '_' { escaped.WriteString("__") } else if isValidLegacyRune(b, i) { @@ -324,6 +392,23 @@ func EscapeName(name string, scheme EscapingScheme) string { } else { escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) +======= + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) escaped.WriteRune('_') } } @@ -381,9 +466,14 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { +<<<<<<< HEAD // This is too many characters for a utf8 value based on the MaxRune // value of '\U0010FFFF'. if j >= 6 { +======= + // This is too many characters for a utf8 value. + if j > 4 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -436,7 +526,11 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { +<<<<<<< HEAD return NoEscaping, errors.New("got empty string instead of escaping scheme") +======= + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } switch s { case AllowUTF8: @@ -448,6 +542,10 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: +<<<<<<< HEAD return NoEscaping, fmt.Errorf("unknown format scheme %s", s) +======= + return NoEscaping, fmt.Errorf("unknown format scheme " + s) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 8f91a9702e..32c7095723 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,7 +15,10 @@ package model import ( "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "regexp" "time" @@ -35,7 +38,11 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { +<<<<<<< HEAD return errors.New("label name in matcher must not be empty") +======= + return fmt.Errorf("label name in matcher must not be empty") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -78,7 +85,11 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { +<<<<<<< HEAD return errors.New("at least one matcher required") +======= + return fmt.Errorf("at least one matcher required") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -86,6 +97,7 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { +<<<<<<< HEAD return errors.New("start time missing") } if s.EndsAt.IsZero() { @@ -102,6 +114,24 @@ func (s *Silence) Validate() error { } if s.CreatedAt.IsZero() { return errors.New("creation timestamp missing") +======= + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 6bfc757d18..40b5822730 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,7 +15,10 @@ package model import ( "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "math" "strconv" @@ -40,7 +43,11 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { +<<<<<<< HEAD return errors.New("sample value must be a quoted string") +======= + return fmt.Errorf("sample value must be a quoted string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e83..45ac9f202a 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,7 +15,10 @@ package model import ( "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "strconv" "strings" @@ -33,7 +36,11 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { +<<<<<<< HEAD return errors.New("float value must be a quoted string") +======= + return fmt.Errorf("float value must be a quoted string") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -142,7 +149,11 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { +<<<<<<< HEAD return nil, errors.New("histogram is nil") +======= + return nil, fmt.Errorf("histogram is nil") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -165,7 +176,11 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { +<<<<<<< HEAD return errors.New("histogram is null") +======= + return fmt.Errorf("histogram is null") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } diff --git a/vendor/github.com/raeperd/recvcheck/.gitignore b/vendor/github.com/raeperd/recvcheck/.gitignore index 4212673324..fbde17613f 100644 --- a/vendor/github.com/raeperd/recvcheck/.gitignore +++ b/vendor/github.com/raeperd/recvcheck/.gitignore @@ -1,3 +1,8 @@ +<<<<<<< HEAD .idea/ coverage.txt /recvcheck +======= +coverage.txt +cmd/recvcheck/recvcheck +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/raeperd/recvcheck/Makefile b/vendor/github.com/raeperd/recvcheck/Makefile index d78605a3bd..54fb68be88 100644 --- a/vendor/github.com/raeperd/recvcheck/Makefile +++ b/vendor/github.com/raeperd/recvcheck/Makefile @@ -1,3 +1,4 @@ +<<<<<<< HEAD .PHONY: clean lint test build default: clean lint test build @@ -9,6 +10,17 @@ build: go build -ldflags "-s -w" -trimpath ./cmd/recvcheck/ test: clean +======= +all: build test lint + +download: + go mod download + +build: download + go build -C cmd/recvcheck + +test: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go test -race -coverprofile=coverage.txt . lint: diff --git a/vendor/github.com/raeperd/recvcheck/README.md b/vendor/github.com/raeperd/recvcheck/README.md index 067aa3c580..90e342c926 100644 --- a/vendor/github.com/raeperd/recvcheck/README.md +++ b/vendor/github.com/raeperd/recvcheck/README.md @@ -1,4 +1,5 @@ # recvcheck +<<<<<<< HEAD [![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) Golang linter for check receiver type in method @@ -7,6 +8,16 @@ From [Go Wiki: Go Code Review Comments - The Go Programming Language](https://go > Don’t mix receiver types. Choose either pointers or struct types for all available method Following code from [Dave Cheney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? +======= +[![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) [![codecov](https://codecov.io/gh/raeperd/recvcheck/graph/badge.svg?token=fPYgEHlq1e)](https://codecov.io/gh/raeperd/recvcheck) +Golang linter for check receiver type in method + +## Motivtation +From [Go Wiki: Go Code Review Comments - The Go Programming Language](https://go.dev/wiki/CodeReviewComments#receiver-type) +> Don’t mix receiver types. Choose either pointers or struct types for all available method + +Following code from [Dave Chenney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) This linter does it for you. ```go diff --git a/vendor/github.com/raeperd/recvcheck/analyzer.go b/vendor/github.com/raeperd/recvcheck/analyzer.go index 11fb38e72e..1082fbbb41 100644 --- a/vendor/github.com/raeperd/recvcheck/analyzer.go +++ b/vendor/github.com/raeperd/recvcheck/analyzer.go @@ -8,6 +8,7 @@ import ( "golang.org/x/tools/go/ast/inspector" ) +<<<<<<< HEAD // NewAnalyzer returns a new analyzer to check for receiver type consistency. func NewAnalyzer(s Settings) *analysis.Analyzer { a := &analyzer{ @@ -60,6 +61,16 @@ type analyzer struct { } func (r *analyzer) run(pass *analysis.Pass) (any, error) { +======= +var Analyzer = &analysis.Analyzer{ + Name: "recvcheck", + Doc: "checks for receiver type consistency", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (any, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) structs := map[string]*structType{} @@ -69,6 +80,7 @@ func (r *analyzer) run(pass *analysis.Pass) (any, error) { return } +<<<<<<< HEAD recv, isStar := recvTypeIdent(funcDecl.Recv.List[0].Type) if recv == nil { return @@ -81,10 +93,31 @@ func (r *analyzer) run(pass *analysis.Pass) (any, error) { st, ok := structs[recv.Name] if !ok { structs[recv.Name] = &structType{} +======= + var recv *ast.Ident + var isStar bool + switch recvType := funcDecl.Recv.List[0].Type.(type) { + case *ast.StarExpr: + isStar = true + if recv, ok = recvType.X.(*ast.Ident); !ok { + return + } + case *ast.Ident: + recv = recvType + default: + return + } + + var st *structType + st, ok = structs[recv.Name] + if !ok { + structs[recv.Name] = &structType{recv: recv.Name} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) st = structs[recv.Name] } if isStar { +<<<<<<< HEAD st.starUsed = true } else { st.typeUsed = true @@ -94,12 +127,24 @@ func (r *analyzer) run(pass *analysis.Pass) (any, error) { for recv, st := range structs { if st.starUsed && st.typeUsed { pass.Reportf(pass.Pkg.Scope().Lookup(recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", recv) +======= + st.numStarMethod++ + } else { + st.numTypeMethod++ + } + }) + + for _, st := range structs { + if st.numStarMethod > 0 && st.numTypeMethod > 0 { + pass.Reportf(pass.Pkg.Scope().Lookup(st.recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", st.recv) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil, nil } +<<<<<<< HEAD func (r *analyzer) isExcluded(recv *ast.Ident, f *ast.FuncDecl) bool { if f.Name == nil || f.Name.Name == "" { return true @@ -132,4 +177,10 @@ func recvTypeIdent(r ast.Expr) (*ast.Ident, bool) { } return nil, false +======= +type structType struct { + recv string + numStarMethod int + numTypeMethod int +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore new file mode 100644 index 0000000000..3c0af38259 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore @@ -0,0 +1,4 @@ +.vscode +.idea +*.swp +cmd/jv/jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules new file mode 100644 index 0000000000..314da31c5e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules @@ -0,0 +1,3 @@ +[submodule "testdata/JSON-Schema-Test-Suite"] + path = testdata/JSON-Schema-Test-Suite + url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE new file mode 100644 index 0000000000..19dc35b243 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md new file mode 100644 index 0000000000..b0d05054ca --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md @@ -0,0 +1,220 @@ +# jsonschema v5.3.1 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v5)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v5) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/master/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema) + +Package jsonschema provides json-schema compilation and validation. + +[Benchmarks](https://dev.to/vearutop/benchmarking-correctness-and-performance-of-go-json-schema-validators-3247) + +### Features: + - implements + [draft 2020-12](https://json-schema.org/specification-links.html#2020-12), + [draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8), + [draft-7](https://json-schema.org/specification-links.html#draft-7), + [draft-6](https://json-schema.org/specification-links.html#draft-6), + [draft-4](https://json-schema.org/specification-links.html#draft-4) + - fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional) + - list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L24)) + - validates schemas against meta-schema + - full support of remote references + - support of recursive references between schemas + - detects infinite loop in schemas + - thread safe validation + - rich, intuitive hierarchial error messages with json-pointers to exact location + - supports output formats flag, basic and detailed + - supports enabling format and content Assertions in draft2019-09 or above + - change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true` + - compiled schema can be introspected. easier to develop tools like generating go structs given schema + - supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension) + - implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat)) + - date-time, date, time, duration, period (supports leap-second) + - uuid, hostname, email + - ip-address, ipv4, ipv6 + - uri, uriref, uri-template(limited validation) + - json-pointer, relative-json-pointer + - regex, format + - implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) + - base64 + - implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) + - application/json + - can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader)) + + +see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) + +The schema is compiled against the version specified in `$schema` property. +If "$schema" property is missing, it uses latest draft which currently implemented +by this library. + +You can force to use specific version, when `$schema` is missing, as follows: + +```go +compiler := jsonschema.NewCompiler() +compiler.Draft = jsonschema.Draft4 +``` + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + +```go +import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" +``` + +## Rich Errors + +The ValidationError returned by Validate method contains detailed context to understand why and where the error is. + +schema.json: +```json +{ + "$ref": "t.json#/definitions/employee" +} +``` + +t.json: +```json +{ + "definitions": { + "employee": { + "type": "string" + } + } +} +``` + +doc.json: +```json +1 +``` + +assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`, +```go +fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy +``` +Prints: +``` +[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json# + [I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee' + [I#] [S#/definitions/employee/type] expected string, but got number +``` + +Here `I` stands for instance document and `S` stands for schema document. +The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. +Nested causes are printed with indent. + +To output `err` in `flag` output format: +```go +b, _ := json.MarshalIndent(err.FlagOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false +} +``` +To output `err` in `basic` output format: +```go +b, _ := json.MarshalIndent(err.BasicOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false, + "errors": [ + { + "keywordLocation": "", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", + "instanceLocation": "", + "error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#" + }, + { + "keywordLocation": "/$ref", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", + "instanceLocation": "", + "error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'" + }, + { + "keywordLocation": "/$ref/type", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", + "instanceLocation": "", + "error": "expected string, but got number" + } + ] +} +``` +To output `err` in `detailed` output format: +```go +b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ") +fmt.Println(string(b)) +``` +Prints: +```json +{ + "valid": false, + "keywordLocation": "", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", + "instanceLocation": "", + "errors": [ + { + "valid": false, + "keywordLocation": "/$ref", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", + "instanceLocation": "", + "errors": [ + { + "valid": false, + "keywordLocation": "/$ref/type", + "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", + "instanceLocation": "", + "error": "expected string, but got number" + } + ] + } + ] +} +``` + +## CLI + +to install `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +```bash +jv [-draft INT] [-output FORMAT] [-assertformat] [-assertcontent] []... + -assertcontent + enable content assertions with draft >= 2019 + -assertformat + enable format assertions with draft >= 2019 + -draft int + draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020) + -output string + output format. valid values flag, basic, detailed +``` + +if no `` arguments are passed, it simply validates the ``. +if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag + +exit-code is 1, if there are any validation errors + +`jv` can also validate yaml files. It also accepts schema from yaml files. + +## Validating YAML Documents + +since yaml supports non-string keys, such yaml documents are rendered as invalid json documents. + +most yaml parser use `map[interface{}]interface{}` for object, +whereas json parser uses `map[string]interface{}`. + +so we need to manually convert them to `map[string]interface{}`. +below code shows such conversion by `toStringKeys` function. + +https://play.golang.org/p/Hhax3MrtD8r + +NOTE: if you are using `gopkg.in/yaml.v3`, then you do not need such conversion. since this library +returns `map[string]interface{}` if all keys are strings. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go new file mode 100644 index 0000000000..fdb68e6480 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go @@ -0,0 +1,812 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "regexp" + "strconv" + "strings" +) + +// A Compiler represents a json-schema compiler. +type Compiler struct { + // Draft represents the draft used when '$schema' attribute is missing. + // + // This defaults to latest supported draft (currently 2020-12). + Draft *Draft + resources map[string]*resource + + // Extensions is used to register extensions. + extensions map[string]extension + + // ExtractAnnotations tells whether schema annotations has to be extracted + // in compiled Schema or not. + ExtractAnnotations bool + + // LoadURL loads the document at given absolute URL. + // + // If nil, package global LoadURL is used. + LoadURL func(s string) (io.ReadCloser, error) + + // Formats can be registered by adding to this map. Key is format name, + // value is function that knows how to validate that format. + Formats map[string]func(interface{}) bool + + // AssertFormat for specifications >= draft2019-09. + AssertFormat bool + + // Decoders can be registered by adding to this map. Key is encoding name, + // value is function that knows how to decode string in that format. + Decoders map[string]func(string) ([]byte, error) + + // MediaTypes can be registered by adding to this map. Key is mediaType name, + // value is function that knows how to validate that mediaType. + MediaTypes map[string]func([]byte) error + + // AssertContent for specifications >= draft2019-09. + AssertContent bool +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// Returned error can be *SchemaError +func Compile(url string) (*Schema, error) { + return NewCompiler().Compile(url) +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func MustCompile(url string) *Schema { + return NewCompiler().MustCompile(url) +} + +// CompileString parses and compiles the given schema with given base url. +func CompileString(url, schema string) (*Schema, error) { + c := NewCompiler() + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + return nil, err + } + return c.Compile(url) +} + +// MustCompileString is like CompileString but panics on error. +// It simplified safe initialization of global variables holding compiled Schema. +func MustCompileString(url, schema string) *Schema { + c := NewCompiler() + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + panic(err) + } + return c.MustCompile(url) +} + +// NewCompiler returns a json-schema Compiler object. +// if '$schema' attribute is missing, it is treated as draft7. to change this +// behavior change Compiler.Draft value +func NewCompiler() *Compiler { + return &Compiler{ + Draft: latest, + resources: make(map[string]*resource), + Formats: make(map[string]func(interface{}) bool), + Decoders: make(map[string]func(string) ([]byte, error)), + MediaTypes: make(map[string]func([]byte) error), + extensions: make(map[string]extension), + } +} + +// AddResource adds in-memory resource to the compiler. +// +// Note that url must not have fragment +func (c *Compiler) AddResource(url string, r io.Reader) error { + res, err := newResource(url, r) + if err != nil { + return err + } + c.resources[res.url] = res + return nil +} + +// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. +// It simplifies safe initialization of global variables holding compiled Schemas. +func (c *Compiler) MustCompile(url string) *Schema { + s, err := c.Compile(url) + if err != nil { + panic(fmt.Sprintf("jsonschema: %#v", err)) + } + return s +} + +// Compile parses json-schema at given url returns, if successful, +// a Schema object that can be used to match against json. +// +// error returned will be of type *SchemaError +func (c *Compiler) Compile(url string) (*Schema, error) { + // make url absolute + u, err := toAbs(url) + if err != nil { + return nil, &SchemaError{url, err} + } + url = u + + sch, err := c.compileURL(url, nil, "#") + if err != nil { + err = &SchemaError{url, err} + } + return sch, err +} + +func (c *Compiler) findResource(url string) (*resource, error) { + if _, ok := c.resources[url]; !ok { + // load resource + var rdr io.Reader + if sch, ok := vocabSchemas[url]; ok { + rdr = strings.NewReader(sch) + } else { + loadURL := LoadURL + if c.LoadURL != nil { + loadURL = c.LoadURL + } + r, err := loadURL(url) + if err != nil { + return nil, err + } + defer r.Close() + rdr = r + } + if err := c.AddResource(url, rdr); err != nil { + return nil, err + } + } + + r := c.resources[url] + if r.draft != nil { + return r, nil + } + + // set draft + r.draft = c.Draft + if m, ok := r.doc.(map[string]interface{}); ok { + if sch, ok := m["$schema"]; ok { + sch, ok := sch.(string) + if !ok { + return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) + } + if !isURI(sch) { + return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url) + } + r.draft = findDraft(sch) + if r.draft == nil { + sch, _ := split(sch) + if sch == url { + return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url) + } + mr, err := c.findResource(sch) + if err != nil { + return nil, err + } + r.draft = mr.draft + } + } + } + + id, err := r.draft.resolveID(r.url, r.doc) + if err != nil { + return nil, err + } + if id != "" { + r.url = id + } + + if err := r.fillSubschemas(c, r); err != nil { + return nil, err + } + + return r, nil +} + +func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) { + // if url points to a draft, return Draft.meta + if d := findDraft(url); d != nil && d.meta != nil { + return d.meta, nil + } + + b, f := split(url) + r, err := c.findResource(b) + if err != nil { + return nil, err + } + return c.compileRef(r, stack, ptr, r, f) +} + +func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) { + base := r.baseURL(res.floc) + ref, err := resolveURL(base, ref) + if err != nil { + return nil, err + } + + u, f := split(ref) + sr := r.findResource(u) + if sr == nil { + // external resource + return c.compileURL(ref, stack, refPtr) + } + + // ensure root resource is always compiled first. + // this is required to get schema.meta from root resource + if r.schema == nil { + r.schema = newSchema(r.url, r.floc, r.draft, r.doc) + if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil { + return nil, err + } + } + + sr, err = r.resolveFragment(c, sr, f) + if err != nil { + return nil, err + } + if sr == nil { + return nil, fmt.Errorf("jsonschema: %s not found", ref) + } + + if sr.schema != nil { + if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil { + return nil, err + } + return sr.schema, nil + } + + sr.schema = newSchema(r.url, sr.floc, r.draft, sr.doc) + return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr) +} + +func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error { + if r.draft.version < 2020 { + return nil + } + + rr := r.listResources(res) + rr = append(rr, res) + for _, sr := range rr { + if m, ok := sr.doc.(map[string]interface{}); ok { + if _, ok := m["$dynamicAnchor"]; ok { + sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc) + if err != nil { + return err + } + res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch) + } + } + } + return nil +} + +func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) { + if err := c.compileDynamicAnchors(r, res); err != nil { + return nil, err + } + + switch v := res.doc.(type) { + case bool: + res.schema.Always = &v + return res.schema, nil + default: + return res.schema, c.compileMap(r, stack, sref, res) + } +} + +func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error { + m := res.doc.(map[string]interface{}) + + if err := checkLoop(stack, sref); err != nil { + return err + } + stack = append(stack, sref) + + var s = res.schema + var err error + + if r == res { // root schema + if sch, ok := m["$schema"]; ok { + sch := sch.(string) + if d := findDraft(sch); d != nil { + s.meta = d.meta + } else { + if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil { + return err + } + } + } + } + + if ref, ok := m["$ref"]; ok { + s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string)) + if err != nil { + return err + } + if r.draft.version < 2019 { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if r.draft.version >= 2019 { + if r == res { // root schema + if vocab, ok := m["$vocabulary"]; ok { + for url, reqd := range vocab.(map[string]interface{}) { + if reqd, ok := reqd.(bool); ok && !reqd { + continue + } + if !r.draft.isVocab(url) { + return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res) + } + s.vocab = append(s.vocab, url) + } + } else { + s.vocab = r.draft.defaultVocab + } + } + + if ref, ok := m["$recursiveRef"]; ok { + s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string)) + if err != nil { + return err + } + } + } + if r.draft.version >= 2020 { + if dref, ok := m["$dynamicRef"]; ok { + s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string)) + if err != nil { + return err + } + if dref, ok := dref.(string); ok { + _, frag := split(dref) + if frag != "#" && !strings.HasPrefix(frag, "#/") { + // frag is anchor + s.dynamicRefAnchor = frag[1:] + } + } + } + } + + loadInt := func(pname string) int { + if num, ok := m[pname]; ok { + i, _ := num.(json.Number).Float64() + return int(i) + } + return -1 + } + + loadRat := func(pname string) *big.Rat { + if num, ok := m[pname]; ok { + r, _ := new(big.Rat).SetString(string(num.(json.Number))) + return r + } + return nil + } + + if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") { + if t, ok := m["type"]; ok { + switch t := t.(type) { + case string: + s.Types = []string{t} + case []interface{}: + s.Types = toStrings(t) + } + } + + if e, ok := m["enum"]; ok { + s.Enum = e.([]interface{}) + allPrimitives := true + for _, item := range s.Enum { + switch jsonType(item) { + case "object", "array": + allPrimitives = false + break + } + } + s.enumError = "enum failed" + if allPrimitives { + if len(s.Enum) == 1 { + s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) + } else { + strEnum := make([]string, len(s.Enum)) + for i, item := range s.Enum { + strEnum[i] = fmt.Sprintf("%#v", item) + } + s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) + } + } + } + + s.Minimum = loadRat("minimum") + if exclusive, ok := m["exclusiveMinimum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Minimum, s.ExclusiveMinimum = nil, s.Minimum + } + } else { + s.ExclusiveMinimum = loadRat("exclusiveMinimum") + } + } + + s.Maximum = loadRat("maximum") + if exclusive, ok := m["exclusiveMaximum"]; ok { + if exclusive, ok := exclusive.(bool); ok { + if exclusive { + s.Maximum, s.ExclusiveMaximum = nil, s.Maximum + } + } else { + s.ExclusiveMaximum = loadRat("exclusiveMaximum") + } + } + + s.MultipleOf = loadRat("multipleOf") + + s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") + + if req, ok := m["required"]; ok { + s.Required = toStrings(req.([]interface{})) + } + + s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") + + if unique, ok := m["uniqueItems"]; ok { + s.UniqueItems = unique.(bool) + } + + s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") + + if pattern, ok := m["pattern"]; ok { + s.Pattern = regexp.MustCompile(pattern.(string)) + } + + if r.draft.version >= 2019 { + s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains") + if s.MinContains == -1 { + s.MinContains = 1 + } + + if deps, ok := m["dependentRequired"]; ok { + deps := deps.(map[string]interface{}) + s.DependentRequired = make(map[string][]string, len(deps)) + for pname, pvalue := range deps { + s.DependentRequired[pname] = toStrings(pvalue.([]interface{})) + } + } + } + } + + compile := func(stack []schemaRef, ptr string) (*Schema, error) { + return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr) + } + + loadSchema := func(pname string, stack []schemaRef) (*Schema, error) { + if _, ok := m[pname]; ok { + return compile(stack, escape(pname)) + } + return nil, nil + } + + loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) { + if pvalue, ok := m[pname]; ok { + pvalue := pvalue.([]interface{}) + schemas := make([]*Schema, len(pvalue)) + for i := range pvalue { + sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i)) + if err != nil { + return nil, err + } + schemas[i] = sch + } + return schemas, nil + } + return nil, nil + } + + if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") { + if s.Not, err = loadSchema("not", stack); err != nil { + return err + } + if s.AllOf, err = loadSchemas("allOf", stack); err != nil { + return err + } + if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil { + return err + } + if s.OneOf, err = loadSchemas("oneOf", stack); err != nil { + return err + } + + if props, ok := m["properties"]; ok { + props := props.(map[string]interface{}) + s.Properties = make(map[string]*Schema, len(props)) + for pname := range props { + s.Properties[pname], err = compile(nil, "properties/"+escape(pname)) + if err != nil { + return err + } + } + } + + if regexProps, ok := m["regexProperties"]; ok { + s.RegexProperties = regexProps.(bool) + } + + if patternProps, ok := m["patternProperties"]; ok { + patternProps := patternProps.(map[string]interface{}) + s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) + for pattern := range patternProps { + s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern)) + if err != nil { + return err + } + } + } + + if additionalProps, ok := m["additionalProperties"]; ok { + switch additionalProps := additionalProps.(type) { + case bool: + s.AdditionalProperties = additionalProps + case map[string]interface{}: + s.AdditionalProperties, err = compile(nil, "additionalProperties") + if err != nil { + return err + } + } + } + + if deps, ok := m["dependencies"]; ok { + deps := deps.(map[string]interface{}) + s.Dependencies = make(map[string]interface{}, len(deps)) + for pname, pvalue := range deps { + switch pvalue := pvalue.(type) { + case []interface{}: + s.Dependencies[pname] = toStrings(pvalue) + default: + s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname)) + if err != nil { + return err + } + } + } + } + + if r.draft.version >= 6 { + if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil { + return err + } + if s.Contains, err = loadSchema("contains", nil); err != nil { + return err + } + } + + if r.draft.version >= 7 { + if m["if"] != nil { + if s.If, err = loadSchema("if", stack); err != nil { + return err + } + if s.Then, err = loadSchema("then", stack); err != nil { + return err + } + if s.Else, err = loadSchema("else", stack); err != nil { + return err + } + } + } + if r.draft.version >= 2019 { + if deps, ok := m["dependentSchemas"]; ok { + deps := deps.(map[string]interface{}) + s.DependentSchemas = make(map[string]*Schema, len(deps)) + for pname := range deps { + s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname)) + if err != nil { + return err + } + } + } + } + + if r.draft.version >= 2020 { + if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil { + return err + } + if s.Items2020, err = loadSchema("items", nil); err != nil { + return err + } + } else { + if items, ok := m["items"]; ok { + switch items.(type) { + case []interface{}: + s.Items, err = loadSchemas("items", nil) + if err != nil { + return err + } + if additionalItems, ok := m["additionalItems"]; ok { + switch additionalItems := additionalItems.(type) { + case bool: + s.AdditionalItems = additionalItems + case map[string]interface{}: + s.AdditionalItems, err = compile(nil, "additionalItems") + if err != nil { + return err + } + } + } + default: + s.Items, err = compile(nil, "items") + if err != nil { + return err + } + } + } + } + + } + + // unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020 + if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) { + if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil { + return err + } + if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil { + return err + } + if r.draft.version >= 2020 { + // any item in an array that passes validation of the contains schema is considered "evaluated" + s.ContainsEval = true + } + } + + if format, ok := m["format"]; ok { + s.Format = format.(string) + if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") { + if format, ok := c.Formats[s.Format]; ok { + s.format = format + } else { + s.format, _ = Formats[s.Format] + } + } + } + + if c.ExtractAnnotations { + if title, ok := m["title"]; ok { + s.Title = title.(string) + } + if description, ok := m["description"]; ok { + s.Description = description.(string) + } + s.Default = m["default"] + } + + if r.draft.version >= 6 { + if c, ok := m["const"]; ok { + s.Constant = []interface{}{c} + } + } + + if r.draft.version >= 7 { + if encoding, ok := m["contentEncoding"]; ok { + s.ContentEncoding = encoding.(string) + if decoder, ok := c.Decoders[s.ContentEncoding]; ok { + s.decoder = decoder + } else { + s.decoder, _ = Decoders[s.ContentEncoding] + } + } + if mediaType, ok := m["contentMediaType"]; ok { + s.ContentMediaType = mediaType.(string) + if mediaType, ok := c.MediaTypes[s.ContentMediaType]; ok { + s.mediaType = mediaType + } else { + s.mediaType, _ = MediaTypes[s.ContentMediaType] + } + if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil { + return err + } + } + if c.ExtractAnnotations { + if comment, ok := m["$comment"]; ok { + s.Comment = comment.(string) + } + if readOnly, ok := m["readOnly"]; ok { + s.ReadOnly = readOnly.(bool) + } + if writeOnly, ok := m["writeOnly"]; ok { + s.WriteOnly = writeOnly.(bool) + } + if examples, ok := m["examples"]; ok { + s.Examples = examples.([]interface{}) + } + } + } + + if r.draft.version >= 2019 { + if !c.AssertContent { + s.decoder = nil + s.mediaType = nil + s.ContentSchema = nil + } + if c.ExtractAnnotations { + if deprecated, ok := m["deprecated"]; ok { + s.Deprecated = deprecated.(bool) + } + } + } + + for name, ext := range c.extensions { + es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m) + if err != nil { + return err + } + if es != nil { + if s.Extensions == nil { + s.Extensions = make(map[string]ExtSchema) + } + s.Extensions[name] = es + } + } + + return nil +} + +func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error { + validate := func(meta *Schema) error { + if meta == nil { + return nil + } + return meta.validateValue(v, vloc) + } + + if err := validate(r.draft.meta); err != nil { + return err + } + for _, ext := range c.extensions { + if err := validate(ext.meta); err != nil { + return err + } + } + return nil +} + +func toStrings(arr []interface{}) []string { + s := make([]string, len(arr)) + for i, v := range arr { + s[i] = v.(string) + } + return s +} + +// SchemaRef captures schema and the path referring to it. +type schemaRef struct { + path string // relative-json-pointer to schema + schema *Schema // target schema + discard bool // true when scope left +} + +func (sr schemaRef) String() string { + return fmt.Sprintf("(%s)%v", sr.path, sr.schema) +} + +func checkLoop(stack []schemaRef, sref schemaRef) error { + for _, ref := range stack { + if ref.schema == sref.schema { + return infiniteLoopError(stack, sref) + } + } + return nil +} + +func keywordLocation(stack []schemaRef, path string) string { + var loc string + for _, ref := range stack[1:] { + loc += "/" + ref.path + } + if path != "" { + loc = loc + "/" + path + } + return loc +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go new file mode 100644 index 0000000000..7570b8b5a9 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go @@ -0,0 +1,29 @@ +package jsonschema + +import ( + "encoding/base64" + "encoding/json" +) + +// Decoders is a registry of functions, which know how to decode +// string encoded in specific format. +// +// New Decoders can be registered by adding to this map. Key is encoding name, +// value is function that knows how to decode string in that format. +var Decoders = map[string]func(string) ([]byte, error){ + "base64": base64.StdEncoding.DecodeString, +} + +// MediaTypes is a registry of functions, which know how to validate +// whether the bytes represent data of that mediaType. +// +// New mediaTypes can be registered by adding to this map. Key is mediaType name, +// value is function that knows how to validate that mediaType. +var MediaTypes = map[string]func([]byte) error{ + "application/json": validateJSON, +} + +func validateJSON(b []byte) error { + var v interface{} + return json.Unmarshal(b, &v) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go new file mode 100644 index 0000000000..a124262a51 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go @@ -0,0 +1,49 @@ +/* +Package jsonschema provides json-schema compilation and validation. + +Features: + - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4 + - fully compliant with JSON-Schema-Test-Suite, (excluding some optional) + - list of optional tests that are excluded can be found in schema_test.go(variable skipTests) + - validates schemas against meta-schema + - full support of remote references + - support of recursive references between schemas + - detects infinite loop in schemas + - thread safe validation + - rich, intuitive hierarchial error messages with json-pointers to exact location + - supports output formats flag, basic and detailed + - supports enabling format and content Assertions in draft2019-09 or above + - change Compiler.AssertFormat, Compiler.AssertContent to true + - compiled schema can be introspected. easier to develop tools like generating go structs given schema + - supports user-defined keywords via extensions + - implements following formats (supports user-defined) + - date-time, date, time, duration (supports leap-second) + - uuid, hostname, email + - ip-address, ipv4, ipv6 + - uri, uriref, uri-template(limited validation) + - json-pointer, relative-json-pointer + - regex, format + - implements following contentEncoding (supports user-defined) + - base64 + - implements following contentMediaType (supports user-defined) + - application/json + - can load from files/http/https/string/[]byte/io.Reader (supports user-defined) + +The schema is compiled against the version specified in "$schema" property. +If "$schema" property is missing, it uses latest draft which currently implemented +by this library. + +You can force to use specific draft, when "$schema" is missing, as follows: + + compiler := jsonschema.NewCompiler() + compiler.Draft = jsonschema.Draft4 + +This package supports loading json-schema from filePath and fileURL. + +To load json-schema from HTTPURL, add following import: + + import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" + +you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA +*/ +package jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go new file mode 100644 index 0000000000..154fa5837d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go @@ -0,0 +1,1454 @@ +package jsonschema + +import ( + "fmt" + "strconv" + "strings" +) + +// A Draft represents json-schema draft +type Draft struct { + version int + meta *Schema + id string // property name used to represent schema id. + boolSchema bool // is boolean valid schema + vocab []string // built-in vocab + defaultVocab []string // vocabs when $vocabulary is not used + subschemas map[string]position +} + +func (d *Draft) URL() string { + switch d.version { + case 2020: + return "https://json-schema.org/draft/2020-12/schema" + case 2019: + return "https://json-schema.org/draft/2019-09/schema" + case 7: + return "https://json-schema.org/draft-07/schema" + case 6: + return "https://json-schema.org/draft-06/schema" + case 4: + return "https://json-schema.org/draft-04/schema" + } + return "" +} + +func (d *Draft) String() string { + return fmt.Sprintf("Draft%d", d.version) +} + +func (d *Draft) loadMeta(url, schema string) { + c := NewCompiler() + c.AssertFormat = true + if err := c.AddResource(url, strings.NewReader(schema)); err != nil { + panic(err) + } + d.meta = c.MustCompile(url) + d.meta.meta = d.meta +} + +func (d *Draft) getID(sch interface{}) string { + m, ok := sch.(map[string]interface{}) + if !ok { + return "" + } + if _, ok := m["$ref"]; ok && d.version <= 7 { + // $ref prevents a sibling id from changing the base uri + return "" + } + v, ok := m[d.id] + if !ok { + return "" + } + id, ok := v.(string) + if !ok { + return "" + } + return id +} + +func (d *Draft) resolveID(base string, sch interface{}) (string, error) { + id, _ := split(d.getID(sch)) // strip fragment + if id == "" { + return "", nil + } + url, err := resolveURL(base, id) + url, _ = split(url) // strip fragment + return url, err +} + +func (d *Draft) anchors(sch interface{}) []string { + m, ok := sch.(map[string]interface{}) + if !ok { + return nil + } + + var anchors []string + + // before draft2019, anchor is specified in id + _, f := split(d.getID(m)) + if f != "#" { + anchors = append(anchors, f[1:]) + } + + if v, ok := m["$anchor"]; ok && d.version >= 2019 { + anchors = append(anchors, v.(string)) + } + if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 { + anchors = append(anchors, v.(string)) + } + return anchors +} + +// listSubschemas collects subschemas in r into rr. +func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error { + add := func(loc string, sch interface{}) error { + url, err := d.resolveID(base, sch) + if err != nil { + return err + } + floc := r.floc + "/" + loc + sr := &resource{url: url, floc: floc, doc: sch} + rr[floc] = sr + + base := base + if url != "" { + base = url + } + return d.listSubschemas(sr, base, rr) + } + + sch, ok := r.doc.(map[string]interface{}) + if !ok { + return nil + } + for kw, pos := range d.subschemas { + v, ok := sch[kw] + if !ok { + continue + } + if pos&self != 0 { + switch v := v.(type) { + case map[string]interface{}: + if err := add(kw, v); err != nil { + return err + } + case bool: + if d.boolSchema { + if err := add(kw, v); err != nil { + return err + } + } + } + } + if pos&item != 0 { + if v, ok := v.([]interface{}); ok { + for i, item := range v { + if err := add(kw+"/"+strconv.Itoa(i), item); err != nil { + return err + } + } + } + } + if pos&prop != 0 { + if v, ok := v.(map[string]interface{}); ok { + for pname, pval := range v { + if err := add(kw+"/"+escape(pname), pval); err != nil { + return err + } + } + } + } + } + return nil +} + +// isVocab tells whether url is built-in vocab. +func (d *Draft) isVocab(url string) bool { + for _, v := range d.vocab { + if url == v { + return true + } + } + return false +} + +type position uint + +const ( + self position = 1 << iota + prop + item +) + +// supported drafts +var ( + Draft4 = &Draft{version: 4, id: "id", boolSchema: false} + Draft6 = &Draft{version: 6, id: "$id", boolSchema: true} + Draft7 = &Draft{version: 7, id: "$id", boolSchema: true} + Draft2019 = &Draft{ + version: 2019, + id: "$id", + boolSchema: true, + vocab: []string{ + "https://json-schema.org/draft/2019-09/vocab/core", + "https://json-schema.org/draft/2019-09/vocab/applicator", + "https://json-schema.org/draft/2019-09/vocab/validation", + "https://json-schema.org/draft/2019-09/vocab/meta-data", + "https://json-schema.org/draft/2019-09/vocab/format", + "https://json-schema.org/draft/2019-09/vocab/content", + }, + defaultVocab: []string{ + "https://json-schema.org/draft/2019-09/vocab/core", + "https://json-schema.org/draft/2019-09/vocab/applicator", + "https://json-schema.org/draft/2019-09/vocab/validation", + }, + } + Draft2020 = &Draft{ + version: 2020, + id: "$id", + boolSchema: true, + vocab: []string{ + "https://json-schema.org/draft/2020-12/vocab/core", + "https://json-schema.org/draft/2020-12/vocab/applicator", + "https://json-schema.org/draft/2020-12/vocab/unevaluated", + "https://json-schema.org/draft/2020-12/vocab/validation", + "https://json-schema.org/draft/2020-12/vocab/meta-data", + "https://json-schema.org/draft/2020-12/vocab/format-annotation", + "https://json-schema.org/draft/2020-12/vocab/format-assertion", + "https://json-schema.org/draft/2020-12/vocab/content", + }, + defaultVocab: []string{ + "https://json-schema.org/draft/2020-12/vocab/core", + "https://json-schema.org/draft/2020-12/vocab/applicator", + "https://json-schema.org/draft/2020-12/vocab/unevaluated", + "https://json-schema.org/draft/2020-12/vocab/validation", + }, + } + + latest = Draft2020 +) + +func findDraft(url string) *Draft { + if strings.HasPrefix(url, "http://") { + url = "https://" + strings.TrimPrefix(url, "http://") + } + if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") { + url = url[:strings.IndexByte(url, '#')] + } + switch url { + case "https://json-schema.org/schema": + return latest + case "https://json-schema.org/draft/2020-12/schema": + return Draft2020 + case "https://json-schema.org/draft/2019-09/schema": + return Draft2019 + case "https://json-schema.org/draft-07/schema": + return Draft7 + case "https://json-schema.org/draft-06/schema": + return Draft6 + case "https://json-schema.org/draft-04/schema": + return Draft4 + } + return nil +} + +func init() { + subschemas := map[string]position{ + // type agnostic + "definitions": prop, + "not": self, + "allOf": item, + "anyOf": item, + "oneOf": item, + // object + "properties": prop, + "additionalProperties": self, + "patternProperties": prop, + // array + "items": self | item, + "additionalItems": self, + "dependencies": prop, + } + Draft4.subschemas = clone(subschemas) + + subschemas["propertyNames"] = self + subschemas["contains"] = self + Draft6.subschemas = clone(subschemas) + + subschemas["if"] = self + subschemas["then"] = self + subschemas["else"] = self + Draft7.subschemas = clone(subschemas) + + subschemas["$defs"] = prop + subschemas["dependentSchemas"] = prop + subschemas["unevaluatedProperties"] = self + subschemas["unevaluatedItems"] = self + subschemas["contentSchema"] = self + Draft2019.subschemas = clone(subschemas) + + subschemas["prefixItems"] = item + Draft2020.subschemas = clone(subschemas) + + Draft4.loadMeta("http://json-schema.org/draft-04/schema", `{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "regexProperties": { "type": "boolean" }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} + }`) + Draft6.loadMeta("http://json-schema.org/draft-06/schema", `{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "regexProperties": true, + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} + }`) + Draft7.loadMeta("http://json-schema.org/draft-07/schema", `{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true + }`) + Draft2019.loadMeta("https://json-schema.org/draft/2019-09/schema", `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } + }`) + Draft2020.loadMeta("https://json-schema.org/draft/2020-12/schema", `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } + }`) +} + +var vocabSchemas = map[string]string{ + "https://json-schema.org/draft/2019-09/meta/core": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/applicator": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/validation": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/meta-data": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } + }`, + "https://json-schema.org/draft/2019-09/meta/format": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "https://json-schema.org/draft/2019-09/meta/content": `{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/core": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/applicator": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/unevaluated": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/validation": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/meta-data": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } + }`, + "https://json-schema.org/draft/2020-12/meta/format-annotation": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/format-assertion": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } + }`, + "https://json-schema.org/draft/2020-12/meta/content": `{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } + }`, +} + +func clone(m map[string]position) map[string]position { + mm := make(map[string]position) + for k, v := range m { + mm[k] = v + } + return mm +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go new file mode 100644 index 0000000000..deaded89f7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go @@ -0,0 +1,129 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +// InvalidJSONTypeError is the error type returned by ValidateInterface. +// this tells that specified go object is not valid jsonType. +type InvalidJSONTypeError string + +func (e InvalidJSONTypeError) Error() string { + return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e)) +} + +// InfiniteLoopError is returned by Compile/Validate. +// this gives url#keywordLocation that lead to infinity loop. +type InfiniteLoopError string + +func (e InfiniteLoopError) Error() string { + return "jsonschema: infinite loop " + string(e) +} + +func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError { + var path string + for _, ref := range stack { + if path == "" { + path += ref.schema.Location + } else { + path += "/" + ref.path + } + } + return InfiniteLoopError(path + "/" + sref.path) +} + +// SchemaError is the error type returned by Compile. +type SchemaError struct { + // SchemaURL is the url to json-schema that filed to compile. + // This is helpful, if your schema refers to external schemas + SchemaURL string + + // Err is the error that occurred during compilation. + // It could be ValidationError, because compilation validates + // given schema against the json meta-schema + Err error +} + +func (se *SchemaError) Unwrap() error { + return se.Err +} + +func (se *SchemaError) Error() string { + s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL) + if se.Err != nil { + return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: ")) + } + return s +} + +func (se *SchemaError) GoString() string { + if _, ok := se.Err.(*ValidationError); ok { + return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err) + } + return se.Error() +} + +// ValidationError is the error type returned by Validate. +type ValidationError struct { + KeywordLocation string // validation path of validating keyword or schema + AbsoluteKeywordLocation string // absolute location of validating keyword or schema + InstanceLocation string // location of the json value within the instance being validated + Message string // describes error + Causes []*ValidationError // nested validation errors +} + +func (ve *ValidationError) add(causes ...error) error { + for _, cause := range causes { + ve.Causes = append(ve.Causes, cause.(*ValidationError)) + } + return ve +} + +func (ve *ValidationError) causes(err error) error { + if err := err.(*ValidationError); err.Message == "" { + ve.Causes = err.Causes + } else { + ve.add(err) + } + return ve +} + +func (ve *ValidationError) Error() string { + leaf := ve + for len(leaf.Causes) > 0 { + leaf = leaf.Causes[0] + } + u, _ := split(ve.AbsoluteKeywordLocation) + return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message) +} + +func (ve *ValidationError) GoString() string { + sloc := ve.AbsoluteKeywordLocation + sloc = sloc[strings.IndexByte(sloc, '#')+1:] + msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message) + for _, c := range ve.Causes { + for _, line := range strings.Split(c.GoString(), "\n") { + msg += "\n " + line + } + } + return msg +} + +func joinPtr(ptr1, ptr2 string) string { + if len(ptr1) == 0 { + return ptr2 + } + if len(ptr2) == 0 { + return ptr1 + } + return ptr1 + "/" + ptr2 +} + +// quote returns single-quoted string +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go new file mode 100644 index 0000000000..452ba118c5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go @@ -0,0 +1,116 @@ +package jsonschema + +// ExtCompiler compiles custom keyword(s) into ExtSchema. +type ExtCompiler interface { + // Compile compiles the custom keywords in schema m and returns its compiled representation. + // if the schema m does not contain the keywords defined by this extension, + // compiled representation nil should be returned. + Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error) +} + +// ExtSchema is schema representation of custom keyword(s) +type ExtSchema interface { + // Validate validates the json value v with this ExtSchema. + // Returned error must be *ValidationError. + Validate(ctx ValidationContext, v interface{}) error +} + +type extension struct { + meta *Schema + compiler ExtCompiler +} + +// RegisterExtension registers custom keyword(s) into this compiler. +// +// name is extension name, used only to avoid name collisions. +// meta captures the metaschema for the new keywords. +// This is used to validate the schema before calling ext.Compile. +func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) { + c.extensions[name] = extension{meta, ext} +} + +// CompilerContext --- + +// CompilerContext provides additional context required in compiling for extension. +type CompilerContext struct { + c *Compiler + r *resource + stack []schemaRef + res *resource +} + +// Compile compiles given value at ptr into *Schema. This is useful in implementing +// keyword like allOf/not/patternProperties. +// +// schPath is the relative-json-pointer to the schema to be compiled from parent schema. +// +// applicableOnSameInstance tells whether current schema and the given schema +// are applied on same instance value. this is used to detect infinite loop in schema. +func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) { + var stack []schemaRef + if applicableOnSameInstance { + stack = ctx.stack + } + return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath) +} + +// CompileRef compiles the schema referenced by ref uri +// +// refPath is the relative-json-pointer to ref. +// +// applicableOnSameInstance tells whether current schema and the given schema +// are applied on same instance value. this is used to detect infinite loop in schema. +func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) { + var stack []schemaRef + if applicableOnSameInstance { + stack = ctx.stack + } + return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref) +} + +// ValidationContext --- + +// ValidationContext provides additional context required in validating for extension. +type ValidationContext struct { + result validationResult + validate func(sch *Schema, schPath string, v interface{}, vpath string) error + validateInplace func(sch *Schema, schPath string) error + validationError func(keywordPath string, format string, a ...interface{}) *ValidationError +} + +// EvaluatedProp marks given property of object as evaluated. +func (ctx ValidationContext) EvaluatedProp(prop string) { + delete(ctx.result.unevalProps, prop) +} + +// EvaluatedItem marks given index of array as evaluated. +func (ctx ValidationContext) EvaluatedItem(index int) { + delete(ctx.result.unevalItems, index) +} + +// Validate validates schema s with value v. Extension must use this method instead of +// *Schema.ValidateInterface method. This will be useful in implementing keywords like +// allOf/oneOf +// +// spath is relative-json-pointer to s +// vpath is relative-json-pointer to v. +func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error { + if vpath == "" { + return ctx.validateInplace(s, spath) + } + return ctx.validate(s, spath, v, vpath) +} + +// Error used to construct validation error by extensions. +// +// keywordPath is relative-json-pointer to keyword. +func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError { + return ctx.validationError(keywordPath, format, a...) +} + +// Group is used by extensions to group multiple errors as causes to parent error. +// This is useful in implementing keywords like allOf where each schema specified +// in allOf can result a validationError. +func (ValidationError) Group(parent *ValidationError, causes ...error) error { + return parent.add(causes...) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go new file mode 100644 index 0000000000..05686073f0 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go @@ -0,0 +1,567 @@ +package jsonschema + +import ( + "errors" + "net" + "net/mail" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// Formats is a registry of functions, which know how to validate +// a specific format. +// +// New Formats can be registered by adding to this map. Key is format name, +// value is function that knows how to validate that format. +var Formats = map[string]func(interface{}) bool{ + "date-time": isDateTime, + "date": isDate, + "time": isTime, + "duration": isDuration, + "period": isPeriod, + "hostname": isHostname, + "email": isEmail, + "ip-address": isIPV4, + "ipv4": isIPV4, + "ipv6": isIPV6, + "uri": isURI, + "iri": isURI, + "uri-reference": isURIReference, + "uriref": isURIReference, + "iri-reference": isURIReference, + "uri-template": isURITemplate, + "regex": isRegex, + "json-pointer": isJSONPointer, + "relative-json-pointer": isRelativeJSONPointer, + "uuid": isUUID, +} + +// isDateTime tells whether given string is a valid date representation +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isDateTime(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ + return false + } + if s[10] != 'T' && s[10] != 't' { + return false + } + return isDate(s[:10]) && isTime(s[11:]) +} + +// isDate tells whether given string is a valid full-date production +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isDate(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := time.Parse("2006-01-02", s) + return err == nil +} + +// isTime tells whether given string is a valid full-time production +// as defined by RFC 3339, section 5.6. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details +func isTime(v interface{}) bool { + str, ok := v.(string) + if !ok { + return true + } + + // golang time package does not support leap seconds. + // so we are parsing it manually here. + + // hh:mm:ss + // 01234567 + if len(str) < 9 || str[2] != ':' || str[5] != ':' { + return false + } + isInRange := func(str string, min, max int) (int, bool) { + n, err := strconv.Atoi(str) + if err != nil { + return 0, false + } + if n < min || n > max { + return 0, false + } + return n, true + } + var h, m, s int + if h, ok = isInRange(str[0:2], 0, 23); !ok { + return false + } + if m, ok = isInRange(str[3:5], 0, 59); !ok { + return false + } + if s, ok = isInRange(str[6:8], 0, 60); !ok { + return false + } + str = str[8:] + + // parse secfrac if present + if str[0] == '.' { + // dot following more than one digit + str = str[1:] + var numDigits int + for str != "" { + if str[0] < '0' || str[0] > '9' { + break + } + numDigits++ + str = str[1:] + } + if numDigits == 0 { + return false + } + } + + if len(str) == 0 { + return false + } + + if str[0] == 'z' || str[0] == 'Z' { + if len(str) != 1 { + return false + } + } else { + // time-numoffset + // +hh:mm + // 012345 + if len(str) != 6 || str[3] != ':' { + return false + } + + var sign int + if str[0] == '+' { + sign = -1 + } else if str[0] == '-' { + sign = +1 + } else { + return false + } + + var zh, zm int + if zh, ok = isInRange(str[1:3], 0, 23); !ok { + return false + } + if zm, ok = isInRange(str[4:6], 0, 59); !ok { + return false + } + + // apply timezone offset + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leapsecond + if s == 60 { // leap second + if h != 23 || m != 59 { + return false + } + } + + return true +} + +// isDuration tells whether given string is a valid duration format +// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details +func isDuration(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if len(s) == 0 || s[0] != 'P' { + return false + } + s = s[1:] + parseUnits := func() (units string, ok bool) { + for len(s) > 0 && s[0] != 'T' { + digits := false + for { + if len(s) == 0 { + break + } + if s[0] < '0' || s[0] > '9' { + break + } + digits = true + s = s[1:] + } + if !digits || len(s) == 0 { + return units, false + } + units += s[:1] + s = s[1:] + } + return units, true + } + units, ok := parseUnits() + if !ok { + return false + } + if units == "W" { + return len(s) == 0 // P_W + } + if len(units) > 0 { + if strings.Index("YMD", units) == -1 { + return false + } + if len(s) == 0 { + return true // "P" dur-date + } + } + if len(s) == 0 || s[0] != 'T' { + return false + } + s = s[1:] + units, ok = parseUnits() + return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1 +} + +// isPeriod tells whether given string is a valid period format +// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. +// +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details +func isPeriod(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + slash := strings.IndexByte(s, '/') + if slash == -1 { + return false + } + start, end := s[:slash], s[slash+1:] + if isDateTime(start) { + return isDateTime(end) || isDuration(end) + } + return isDuration(start) && isDateTime(end) +} + +// isHostname tells whether given string is a valid representation +// for an Internet host name, as defined by RFC 1034 section 3.1 and +// RFC 1123 section 2.1. +// +// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. +func isHostname(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return false + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if labelLen := len(label); labelLen < 1 || labelLen > 63 { + return false + } + + // labels must not start with a hyphen + // RFC 1123 section 2.1: restriction on the first character + // is relaxed to allow either a letter or a digit + if first := s[0]; first == '-' { + return false + } + + // must not end with a hyphen + if label[len(label)-1] == '-' { + return false + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, c := range label { + if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { + return false + } + } + } + + return true +} + +// isEmail tells whether given string is a valid Internet email address +// as defined by RFC 5322, section 3.4.1. +// +// See https://en.wikipedia.org/wiki/Email_address, for details. +func isEmail(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return false + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return false + } + local := s[0:at] + domain := s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return false + } + + // domain if enclosed in brackets, must match an IP address + if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' { + ip := domain[1 : len(domain)-1] + if strings.HasPrefix(ip, "IPv6:") { + return isIPV6(strings.TrimPrefix(ip, "IPv6:")) + } + return isIPV4(ip) + } + + // domain must match the requirements for a hostname + if !isHostname(domain) { + return false + } + + _, err := mail.ParseAddress(s) + return err == nil +} + +// isIPV4 tells whether given string is a valid representation of an IPv4 address +// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. +func isIPV4(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return false + } + for _, group := range groups { + n, err := strconv.Atoi(group) + if err != nil { + return false + } + if n < 0 || n > 255 { + return false + } + if n != 0 && group[0] == '0' { + return false // leading zeroes should be rejected, as they are treated as octals + } + } + return true +} + +// isIPV6 tells whether given string is a valid representation of an IPv6 address +// as defined in RFC 2373, section 2.2. +func isIPV6(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if !strings.Contains(s, ":") { + return false + } + return net.ParseIP(s) != nil +} + +// isURI tells whether given string is valid URI, according to RFC 3986. +func isURI(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + u, err := urlParse(s) + return err == nil && u.IsAbs() +} + +func urlParse(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + + // if hostname is ipv6, validate it + hostname := u.Hostname() + if strings.IndexByte(hostname, ':') != -1 { + if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 { + return nil, errors.New("ipv6 address is not enclosed in brackets") + } + if !isIPV6(hostname) { + return nil, errors.New("invalid ipv6 address") + } + } + return u, nil +} + +// isURIReference tells whether given string is a valid URI Reference +// (either a URI or a relative-reference), according to RFC 3986. +func isURIReference(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := urlParse(s) + return err == nil && !strings.Contains(s, `\`) +} + +// isURITemplate tells whether given string is a valid URI Template +// according to RFC6570. +// +// Current implementation does minimal validation. +func isURITemplate(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + u, err := urlParse(s) + if err != nil { + return false + } + for _, item := range strings.Split(u.RawPath, "/") { + depth := 0 + for _, ch := range item { + switch ch { + case '{': + depth++ + if depth != 1 { + return false + } + case '}': + depth-- + if depth != 0 { + return false + } + } + } + if depth != 0 { + return false + } + } + return true +} + +// isRegex tells whether given string is a valid regular expression, +// according to the ECMA 262 regular expression dialect. +// +// The implementation uses go-lang regexp package. +func isRegex(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + _, err := regexp.Compile(s) + return err == nil +} + +// isJSONPointer tells whether given string is a valid JSON Pointer. +// +// Note: It returns false for JSON Pointer URI fragments. +func isJSONPointer(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if s != "" && !strings.HasPrefix(s, "/") { + return false + } + for _, item := range strings.Split(s, "/") { + for i := 0; i < len(item); i++ { + if item[i] == '~' { + if i == len(item)-1 { + return false + } + switch item[i+1] { + case '0', '1': + // valid + default: + return false + } + } + } + } + return true +} + +// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. +// +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func isRelativeJSONPointer(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + if s == "" { + return false + } + if s[0] == '0' { + s = s[1:] + } else if s[0] >= '0' && s[0] <= '9' { + for s != "" && s[0] >= '0' && s[0] <= '9' { + s = s[1:] + } + } else { + return false + } + return s == "#" || isJSONPointer(s) +} + +// isUUID tells whether given string is a valid uuid format +// as specified in RFC4122. +// +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details +func isUUID(v interface{}) bool { + s, ok := v.(string) + if !ok { + return true + } + parseHex := func(n int) bool { + for n > 0 { + if len(s) == 0 { + return false + } + hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F') + if !hex { + return false + } + s = s[1:] + n-- + } + return true + } + groups := []int{8, 4, 4, 4, 12} + for i, numDigits := range groups { + if !parseHex(numDigits) { + return false + } + if i == len(groups)-1 { + break + } + if len(s) == 0 || s[0] != '-' { + return false + } + s = s[1:] + } + return len(s) == 0 +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go new file mode 100644 index 0000000000..4198cfe37c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go @@ -0,0 +1,38 @@ +// Package httploader implements loader.Loader for http/https url. +// +// The package is typically only imported for the side effect of +// registering its Loaders. +// +// To use httploader, link this package into your program: +// +// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" +package httploader + +import ( + "fmt" + "io" + "net/http" + + "github.com/santhosh-tekuri/jsonschema/v5" +) + +// Client is the default HTTP Client used to Get the resource. +var Client = http.DefaultClient + +// Load loads resource from given http(s) url. +func Load(url string) (io.ReadCloser, error) { + resp, err := Client.Get(url) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + _ = resp.Body.Close() + return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) + } + return resp.Body, nil +} + +func init() { + jsonschema.Loaders["http"] = Load + jsonschema.Loaders["https"] = Load +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go new file mode 100644 index 0000000000..c94195c335 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go @@ -0,0 +1,60 @@ +package jsonschema + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +func loadFileURL(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + f := u.Path + if runtime.GOOS == "windows" { + f = strings.TrimPrefix(f, "/") + f = filepath.FromSlash(f) + } + return os.Open(f) +} + +// Loaders is a registry of functions, which know how to load +// absolute url of specific schema. +// +// New loaders can be registered by adding to this map. Key is schema, +// value is function that knows how to load url of that schema +var Loaders = map[string]func(url string) (io.ReadCloser, error){ + "file": loadFileURL, +} + +// LoaderNotFoundError is the error type returned by Load function. +// It tells that no Loader is registered for that URL Scheme. +type LoaderNotFoundError string + +func (e LoaderNotFoundError) Error() string { + return fmt.Sprintf("jsonschema: no Loader found for %s", string(e)) +} + +// LoadURL loads document at given absolute URL. The default implementation +// uses Loaders registry to lookup by schema and uses that loader. +// +// Users can change this variable, if they would like to take complete +// responsibility of loading given URL. Used by Compiler if its LoadURL +// field is nil. +var LoadURL = func(s string) (io.ReadCloser, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + loader, ok := Loaders[u.Scheme] + if !ok { + return nil, LoaderNotFoundError(s) + + } + return loader(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go new file mode 100644 index 0000000000..d65ae2a929 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go @@ -0,0 +1,77 @@ +package jsonschema + +// Flag is output format with simple boolean property valid. +type Flag struct { + Valid bool `json:"valid"` +} + +// FlagOutput returns output in flag format +func (ve *ValidationError) FlagOutput() Flag { + return Flag{} +} + +// Basic --- + +// Basic is output format with flat list of output units. +type Basic struct { + Valid bool `json:"valid"` + Errors []BasicError `json:"errors"` +} + +// BasicError is output unit in basic format. +type BasicError struct { + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` + InstanceLocation string `json:"instanceLocation"` + Error string `json:"error"` +} + +// BasicOutput returns output in basic format +func (ve *ValidationError) BasicOutput() Basic { + var errors []BasicError + var flatten func(*ValidationError) + flatten = func(ve *ValidationError) { + errors = append(errors, BasicError{ + KeywordLocation: ve.KeywordLocation, + AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, + InstanceLocation: ve.InstanceLocation, + Error: ve.Message, + }) + for _, cause := range ve.Causes { + flatten(cause) + } + } + flatten(ve) + return Basic{Errors: errors} +} + +// Detailed --- + +// Detailed is output format based on structure of schema. +type Detailed struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` + InstanceLocation string `json:"instanceLocation"` + Error string `json:"error,omitempty"` + Errors []Detailed `json:"errors,omitempty"` +} + +// DetailedOutput returns output in detailed format +func (ve *ValidationError) DetailedOutput() Detailed { + var errors []Detailed + for _, cause := range ve.Causes { + errors = append(errors, cause.DetailedOutput()) + } + var message = ve.Message + if len(ve.Causes) > 0 { + message = "" + } + return Detailed{ + KeywordLocation: ve.KeywordLocation, + AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, + InstanceLocation: ve.InstanceLocation, + Error: message, + Errors: errors, + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go new file mode 100644 index 0000000000..18349daac7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go @@ -0,0 +1,280 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +type resource struct { + url string // base url of resource. can be empty + floc string // fragment with json-pointer from root resource + doc interface{} + draft *Draft + subresources map[string]*resource // key is floc. only applicable for root resource + schema *Schema +} + +func (r *resource) String() string { + return r.url + r.floc +} + +func newResource(url string, r io.Reader) (*resource, error) { + if strings.IndexByte(url, '#') != -1 { + panic(fmt.Sprintf("BUG: newResource(%q)", url)) + } + doc, err := unmarshal(r) + if err != nil { + return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err) + } + url, err = toAbs(url) + if err != nil { + return nil, err + } + return &resource{ + url: url, + floc: "#", + doc: doc, + }, nil +} + +// fillSubschemas fills subschemas in res into r.subresources +func (r *resource) fillSubschemas(c *Compiler, res *resource) error { + if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil { + return err + } + + if r.subresources == nil { + r.subresources = make(map[string]*resource) + } + if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil { + return err + } + + // ensure subresource.url uniqueness + url2floc := make(map[string]string) + for _, sr := range r.subresources { + if sr.url != "" { + if floc, ok := url2floc[sr.url]; ok { + return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url) + } + url2floc[sr.url] = sr.floc + } + } + + return nil +} + +// listResources lists all subresources in res +func (r *resource) listResources(res *resource) []*resource { + var result []*resource + prefix := res.floc + "/" + for _, sr := range r.subresources { + if strings.HasPrefix(sr.floc, prefix) { + result = append(result, sr) + } + } + return result +} + +func (r *resource) findResource(url string) *resource { + if r.url == url { + return r + } + for _, res := range r.subresources { + if res.url == url { + return res + } + } + return nil +} + +// resolve fragment f with sr as base +func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) { + if f == "#" || f == "#/" { + return sr, nil + } + + // resolve by anchor + if !strings.HasPrefix(f, "#/") { + // check in given resource + for _, anchor := range r.draft.anchors(sr.doc) { + if anchor == f[1:] { + return sr, nil + } + } + + // check in subresources that has same base url + prefix := sr.floc + "/" + for _, res := range r.subresources { + if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url { + for _, anchor := range r.draft.anchors(res.doc) { + if anchor == f[1:] { + return res, nil + } + } + } + } + return nil, nil + } + + // resolve by ptr + floc := sr.floc + f[1:] + if res, ok := r.subresources[floc]; ok { + return res, nil + } + + // non-standrad location + doc := r.doc + for _, item := range strings.Split(floc[2:], "/") { + item = strings.Replace(item, "~1", "/", -1) + item = strings.Replace(item, "~0", "~", -1) + item, err := url.PathUnescape(item) + if err != nil { + return nil, err + } + switch d := doc.(type) { + case map[string]interface{}: + if _, ok := d[item]; !ok { + return nil, nil + } + doc = d[item] + case []interface{}: + index, err := strconv.Atoi(item) + if err != nil { + return nil, err + } + if index < 0 || index >= len(d) { + return nil, nil + } + doc = d[index] + default: + return nil, nil + } + } + + id, err := r.draft.resolveID(r.baseURL(floc), doc) + if err != nil { + return nil, err + } + res := &resource{url: id, floc: floc, doc: doc} + r.subresources[floc] = res + if err := r.fillSubschemas(c, res); err != nil { + return nil, err + } + return res, nil +} + +func (r *resource) baseURL(floc string) string { + for { + if sr, ok := r.subresources[floc]; ok { + if sr.url != "" { + return sr.url + } + } + slash := strings.LastIndexByte(floc, '/') + if slash == -1 { + break + } + floc = floc[:slash] + } + return r.url +} + +// url helpers --- + +func toAbs(s string) (string, error) { + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` { + s = "file:///" + filepath.ToSlash(s) + } + + u, err := url.Parse(s) + if err != nil { + return "", err + } + if u.IsAbs() { + return s, nil + } + + // s is filepath + if s, err = filepath.Abs(s); err != nil { + return "", err + } + if runtime.GOOS == "windows" { + s = "file:///" + filepath.ToSlash(s) + } else { + s = "file://" + s + } + u, err = url.Parse(s) // to fix spaces in filepath + return u.String(), err +} + +func resolveURL(base, ref string) (string, error) { + if ref == "" { + return base, nil + } + if strings.HasPrefix(ref, "urn:") { + return ref, nil + } + + refURL, err := url.Parse(ref) + if err != nil { + return "", err + } + if refURL.IsAbs() { + return ref, nil + } + + if strings.HasPrefix(base, "urn:") { + base, _ = split(base) + return base + ref, nil + } + + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + return baseURL.ResolveReference(refURL).String(), nil +} + +func split(uri string) (string, string) { + hash := strings.IndexByte(uri, '#') + if hash == -1 { + return uri, "#" + } + f := uri[hash:] + if f == "#/" { + f = "#" + } + return uri[0:hash], f +} + +func (s *Schema) url() string { + u, _ := split(s.Location) + return u +} + +func (s *Schema) loc() string { + _, f := split(s.Location) + return f[1:] +} + +func unmarshal(r io.Reader) (interface{}, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc interface{} + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if t, _ := decoder.Token(); t != nil { + return nil, fmt.Errorf("invalid character %v after top-level value", t) + } + return doc, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go new file mode 100644 index 0000000000..688f0a6fee --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go @@ -0,0 +1,900 @@ +package jsonschema + +import ( + "bytes" + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +// A Schema represents compiled version of json-schema. +type Schema struct { + Location string // absolute location + + Draft *Draft // draft used by schema. + meta *Schema + vocab []string + dynamicAnchors []*Schema + + // type agnostic validations + Format string + format func(interface{}) bool + Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. + Ref *Schema + RecursiveAnchor bool + RecursiveRef *Schema + DynamicAnchor string + DynamicRef *Schema + dynamicRefAnchor string + Types []string // allowed types. + Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. + Enum []interface{} // allowed values. + enumError string // error message for enum fail. captured here to avoid constructing error message every time. + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema // nil, when If is nil. + Else *Schema // nil, when If is nil. + + // object validations + MinProperties int // -1 if not specified. + MaxProperties int // -1 if not specified. + Required []string // list of required properties. + Properties map[string]*Schema + PropertyNames *Schema + RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. + PatternProperties map[*regexp.Regexp]*Schema + AdditionalProperties interface{} // nil or bool or *Schema. + Dependencies map[string]interface{} // map value is *Schema or []string. + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array validations + MinItems int // -1 if not specified. + MaxItems int // -1 if not specified. + UniqueItems bool + Items interface{} // nil or *Schema or []*Schema + AdditionalItems interface{} // nil or bool or *Schema. + PrefixItems []*Schema + Items2020 *Schema // items keyword reintroduced in draft 2020-12 + Contains *Schema + ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated" + MinContains int // 1 if not specified + MaxContains int // -1 if not specified + UnevaluatedItems *Schema + + // string validations + MinLength int // -1 if not specified. + MaxLength int // -1 if not specified. + Pattern *regexp.Regexp + ContentEncoding string + decoder func(string) ([]byte, error) + ContentMediaType string + mediaType func([]byte) error + ContentSchema *Schema + + // number validators + Minimum *big.Rat + ExclusiveMinimum *big.Rat + Maximum *big.Rat + ExclusiveMaximum *big.Rat + MultipleOf *big.Rat + + // annotations. captured only when Compiler.ExtractAnnotations is true. + Title string + Description string + Default interface{} + Comment string + ReadOnly bool + WriteOnly bool + Examples []interface{} + Deprecated bool + + // user defined extensions + Extensions map[string]ExtSchema +} + +func (s *Schema) String() string { + return s.Location +} + +func newSchema(url, floc string, draft *Draft, doc interface{}) *Schema { + // fill with default values + s := &Schema{ + Location: url + floc, + Draft: draft, + MinProperties: -1, + MaxProperties: -1, + MinItems: -1, + MaxItems: -1, + MinContains: 1, + MaxContains: -1, + MinLength: -1, + MaxLength: -1, + } + + if doc, ok := doc.(map[string]interface{}); ok { + if ra, ok := doc["$recursiveAnchor"]; ok { + if ra, ok := ra.(bool); ok { + s.RecursiveAnchor = ra + } + } + if da, ok := doc["$dynamicAnchor"]; ok { + if da, ok := da.(string); ok { + s.DynamicAnchor = da + } + } + } + return s +} + +func (s *Schema) hasVocab(name string) bool { + if s == nil { // during bootstrap + return true + } + if name == "core" { + return true + } + for _, url := range s.vocab { + if url == "https://json-schema.org/draft/2019-09/vocab/"+name { + return true + } + if url == "https://json-schema.org/draft/2020-12/vocab/"+name { + return true + } + } + return false +} + +// Validate validates given doc, against the json-schema s. +// +// the v must be the raw json value. for number precision +// unmarshal with json.UseNumber(). +// +// returns *ValidationError if v does not confirm with schema s. +// returns InfiniteLoopError if it detects loop during validation. +// returns InvalidJSONTypeError if it detects any non json value in v. +func (s *Schema) Validate(v interface{}) (err error) { + return s.validateValue(v, "") +} + +func (s *Schema) validateValue(v interface{}, vloc string) (err error) { + defer func() { + if r := recover(); r != nil { + switch r := r.(type) { + case InfiniteLoopError, InvalidJSONTypeError: + err = r.(error) + default: + panic(r) + } + } + }() + if _, err := s.validate(nil, 0, "", v, vloc); err != nil { + ve := ValidationError{ + KeywordLocation: "", + AbsoluteKeywordLocation: s.Location, + InstanceLocation: vloc, + Message: fmt.Sprintf("doesn't validate with %s", s.Location), + } + return ve.causes(err) + } + return nil +} + +// validate validates given value v with this schema. +func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) { + validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError { + return &ValidationError{ + KeywordLocation: keywordLocation(scope, keywordPath), + AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath), + InstanceLocation: vloc, + Message: fmt.Sprintf(format, a...), + } + } + + sref := schemaRef{spath, s, false} + if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil { + panic(err) + } + scope = append(scope, sref) + vscope++ + + // populate result + switch v := v.(type) { + case map[string]interface{}: + result.unevalProps = make(map[string]struct{}) + for pname := range v { + result.unevalProps[pname] = struct{}{} + } + case []interface{}: + result.unevalItems = make(map[int]struct{}) + for i := range v { + result.unevalItems[i] = struct{}{} + } + } + + validate := func(sch *Schema, schPath string, v interface{}, vpath string) error { + vloc := vloc + if vpath != "" { + vloc += "/" + vpath + } + _, err := sch.validate(scope, 0, schPath, v, vloc) + return err + } + + validateInplace := func(sch *Schema, schPath string) error { + vr, err := sch.validate(scope, vscope, schPath, v, vloc) + if err == nil { + // update result + for pname := range result.unevalProps { + if _, ok := vr.unevalProps[pname]; !ok { + delete(result.unevalProps, pname) + } + } + for i := range result.unevalItems { + if _, ok := vr.unevalItems[i]; !ok { + delete(result.unevalItems, i) + } + } + } + return err + } + + if s.Always != nil { + if !*s.Always { + return result, validationError("", "not allowed") + } + return result, nil + } + + if len(s.Types) > 0 { + vType := jsonType(v) + matched := false + for _, t := range s.Types { + if vType == t { + matched = true + break + } else if t == "integer" && vType == "number" { + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + if num.IsInt() { + matched = true + break + } + } + } + if !matched { + return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) + } + } + + var errors []error + + if len(s.Constant) > 0 { + if !equals(v, s.Constant[0]) { + switch jsonType(s.Constant[0]) { + case "object", "array": + errors = append(errors, validationError("const", "const failed")) + default: + errors = append(errors, validationError("const", "value must be %#v", s.Constant[0])) + } + } + } + + if len(s.Enum) > 0 { + matched := false + for _, item := range s.Enum { + if equals(v, item) { + matched = true + break + } + } + if !matched { + errors = append(errors, validationError("enum", s.enumError)) + } + } + + if s.format != nil && !s.format(v) { + var val = v + if v, ok := v.(string); ok { + val = quote(v) + } + errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format))) + } + + switch v := v.(type) { + case map[string]interface{}: + if s.MinProperties != -1 && len(v) < s.MinProperties { + errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v))) + } + if s.MaxProperties != -1 && len(v) > s.MaxProperties { + errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v))) + } + if len(s.Required) > 0 { + var missing []string + for _, pname := range s.Required { + if _, ok := v[pname]; !ok { + missing = append(missing, quote(pname)) + } + } + if len(missing) > 0 { + errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", "))) + } + } + + for pname, sch := range s.Properties { + if pvalue, ok := v[pname]; ok { + delete(result.unevalProps, pname) + if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + + if s.PropertyNames != nil { + for pname := range v { + if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + + if s.RegexProperties { + for pname := range v { + if !isRegex(pname) { + errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname))) + } + } + } + for pattern, sch := range s.PatternProperties { + for pname, pvalue := range v { + if pattern.MatchString(pname) { + delete(result.unevalProps, pname) + if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + } + if s.AdditionalProperties != nil { + if allowed, ok := s.AdditionalProperties.(bool); ok { + if !allowed && len(result.unevalProps) > 0 { + errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames())) + } + } else { + schema := s.AdditionalProperties.(*Schema) + for pname := range result.unevalProps { + if pvalue, ok := v[pname]; ok { + if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + } + result.unevalProps = nil + } + for dname, dvalue := range s.Dependencies { + if _, ok := v[dname]; ok { + switch dvalue := dvalue.(type) { + case *Schema: + if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil { + errors = append(errors, err) + } + case []string: + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) + } + } + } + } + } + for dname, dvalue := range s.DependentRequired { + if _, ok := v[dname]; ok { + for i, pname := range dvalue { + if _, ok := v[pname]; !ok { + errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) + } + } + } + } + for dname, sch := range s.DependentSchemas { + if _, ok := v[dname]; ok { + if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil { + errors = append(errors, err) + } + } + } + + case []interface{}: + if s.MinItems != -1 && len(v) < s.MinItems { + errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v))) + } + if s.MaxItems != -1 && len(v) > s.MaxItems { + errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v))) + } + if s.UniqueItems { + if len(v) <= 20 { + outer1: + for i := 1; i < len(v); i++ { + for j := 0; j < i; j++ { + if equals(v[i], v[j]) { + errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) + break outer1 + } + } + } + } else { + m := make(map[uint64][]int) + var h maphash.Hash + outer2: + for i, item := range v { + h.Reset() + hash(item, &h) + k := h.Sum64() + if err != nil { + panic(err) + } + arr, ok := m[k] + if ok { + for _, j := range arr { + if equals(v[j], item) { + errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) + break outer2 + } + } + } + arr = append(arr, i) + m[k] = arr + } + } + } + + // items + additionalItems + switch items := s.Items.(type) { + case *Schema: + for i, item := range v { + if err := validate(items, "items", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } + result.unevalItems = nil + case []*Schema: + for i, item := range v { + if i < len(items) { + delete(result.unevalItems, i) + if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else if sch, ok := s.AdditionalItems.(*Schema); ok { + delete(result.unevalItems, i) + if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else { + break + } + } + if additionalItems, ok := s.AdditionalItems.(bool); ok { + if additionalItems { + result.unevalItems = nil + } else if len(v) > len(items) { + errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v))) + } + } + } + + // prefixItems + items + for i, item := range v { + if i < len(s.PrefixItems) { + delete(result.unevalItems, i) + if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else if s.Items2020 != nil { + delete(result.unevalItems, i) + if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } else { + break + } + } + + // contains + minContains + maxContains + if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) { + matched := 0 + var causes []error + for i, item := range v { + if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil { + causes = append(causes, err) + } else { + matched++ + if s.ContainsEval { + delete(result.unevalItems, i) + } + } + } + if s.MinContains != -1 && matched < s.MinContains { + errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...)) + } + if s.MaxContains != -1 && matched > s.MaxContains { + errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched)) + } + } + + case string: + // minLength + maxLength + if s.MinLength != -1 || s.MaxLength != -1 { + length := utf8.RuneCount([]byte(v)) + if s.MinLength != -1 && length < s.MinLength { + errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length)) + } + if s.MaxLength != -1 && length > s.MaxLength { + errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length)) + } + } + + if s.Pattern != nil && !s.Pattern.MatchString(v) { + errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String()))) + } + + // contentEncoding + contentMediaType + if s.decoder != nil || s.mediaType != nil { + decoded := s.ContentEncoding == "" + var content []byte + if s.decoder != nil { + b, err := s.decoder(v) + if err != nil { + errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding)) + } else { + content, decoded = b, true + } + } + if decoded && s.mediaType != nil { + if s.decoder == nil { + content = []byte(v) + } + if err := s.mediaType(content); err != nil { + errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType))) + } + } + if decoded && s.ContentSchema != nil { + contentJSON, err := unmarshal(bytes.NewReader(content)) + if err != nil { + errors = append(errors, validationError("contentSchema", "value is not valid json")) + } else { + err := validate(s.ContentSchema, "contentSchema", contentJSON, "") + if err != nil { + errors = append(errors, err) + } + } + } + } + + case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: + // lazy convert to *big.Rat to avoid allocation + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprint(v)) + } + return numVal + } + f64 := func(r *big.Rat) float64 { + f, _ := r.Float64() + return f + } + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v)) + } + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v)) + } + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v)) + } + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v)) + } + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf))) + } + } + } + + // $ref + $recursiveRef + $dynamicRef + validateRef := func(sch *Schema, refPath string) error { + if sch != nil { + if err := validateInplace(sch, refPath); err != nil { + var url = sch.Location + if s.url() == sch.url() { + url = sch.loc() + } + return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err) + } + } + return nil + } + if err := validateRef(s.Ref, "$ref"); err != nil { + errors = append(errors, err) + } + if s.RecursiveRef != nil { + sch := s.RecursiveRef + if sch.RecursiveAnchor { + // recursiveRef based on scope + for _, e := range scope { + if e.schema.RecursiveAnchor { + sch = e.schema + break + } + } + } + if err := validateRef(sch, "$recursiveRef"); err != nil { + errors = append(errors, err) + } + } + if s.DynamicRef != nil { + sch := s.DynamicRef + if s.dynamicRefAnchor != "" && sch.DynamicAnchor == s.dynamicRefAnchor { + // dynamicRef based on scope + for i := len(scope) - 1; i >= 0; i-- { + sr := scope[i] + if sr.discard { + break + } + for _, da := range sr.schema.dynamicAnchors { + if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef { + sch = da + break + } + } + } + } + if err := validateRef(sch, "$dynamicRef"); err != nil { + errors = append(errors, err) + } + } + + if s.Not != nil && validateInplace(s.Not, "not") == nil { + errors = append(errors, validationError("not", "not failed")) + } + + for i, sch := range s.AllOf { + schPath := "allOf/" + strconv.Itoa(i) + if err := validateInplace(sch, schPath); err != nil { + errors = append(errors, validationError(schPath, "allOf failed").add(err)) + } + } + + if len(s.AnyOf) > 0 { + matched := false + var causes []error + for i, sch := range s.AnyOf { + if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil { + matched = true + } else { + causes = append(causes, err) + } + } + if !matched { + errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...)) + } + } + + if len(s.OneOf) > 0 { + matched := -1 + var causes []error + for i, sch := range s.OneOf { + if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil { + if matched == -1 { + matched = i + } else { + errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i)) + break + } + } else { + causes = append(causes, err) + } + } + if matched == -1 { + errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...)) + } + } + + // if + then + else + if s.If != nil { + err := validateInplace(s.If, "if") + // "if" leaves dynamic scope + scope[len(scope)-1].discard = true + if err == nil { + if s.Then != nil { + if err := validateInplace(s.Then, "then"); err != nil { + errors = append(errors, validationError("then", "if-then failed").add(err)) + } + } + } else { + if s.Else != nil { + if err := validateInplace(s.Else, "else"); err != nil { + errors = append(errors, validationError("else", "if-else failed").add(err)) + } + } + } + // restore dynamic scope + scope[len(scope)-1].discard = false + } + + for _, ext := range s.Extensions { + if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil { + errors = append(errors, err) + } + } + + // unevaluatedProperties + unevaluatedItems + switch v := v.(type) { + case map[string]interface{}: + if s.UnevaluatedProperties != nil { + for pname := range result.unevalProps { + if pvalue, ok := v[pname]; ok { + if err := validate(s.UnevaluatedProperties, "unevaluatedProperties", pvalue, escape(pname)); err != nil { + errors = append(errors, err) + } + } + } + result.unevalProps = nil + } + case []interface{}: + if s.UnevaluatedItems != nil { + for i := range result.unevalItems { + if err := validate(s.UnevaluatedItems, "unevaluatedItems", v[i], strconv.Itoa(i)); err != nil { + errors = append(errors, err) + } + } + result.unevalItems = nil + } + } + + switch len(errors) { + case 0: + return result, nil + case 1: + return result, errors[0] + default: + return result, validationError("", "").add(errors...) // empty message, used just for wrapping + } +} + +type validationResult struct { + unevalProps map[string]struct{} + unevalItems map[int]struct{} +} + +func (vr validationResult) unevalPnames() string { + pnames := make([]string, 0, len(vr.unevalProps)) + for pname := range vr.unevalProps { + pnames = append(pnames, quote(pname)) + } + return strings.Join(pnames, ", ") +} + +// jsonType returns the json type of given value v. +// +// It panics if the given value is not valid json value +func jsonType(v interface{}) string { + switch v.(type) { + case nil: + return "null" + case bool: + return "boolean" + case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: + return "number" + case string: + return "string" + case []interface{}: + return "array" + case map[string]interface{}: + return "object" + } + panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) +} + +// equals tells if given two json values are equal or not. +func equals(v1, v2 interface{}) bool { + v1Type := jsonType(v1) + if v1Type != jsonType(v2) { + return false + } + switch v1Type { + case "array": + arr1, arr2 := v1.([]interface{}), v2.([]interface{}) + if len(arr1) != len(arr2) { + return false + } + for i := range arr1 { + if !equals(arr1[i], arr2[i]) { + return false + } + } + return true + case "object": + obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) + if len(obj1) != len(obj2) { + return false + } + for k, v1 := range obj1 { + if v2, ok := obj2[k]; ok { + if !equals(v1, v2) { + return false + } + } else { + return false + } + } + return true + case "number": + num1, _ := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, _ := new(big.Rat).SetString(fmt.Sprint(v2)) + return num1.Cmp(num2) == 0 + default: + return v1 == v2 + } +} + +func hash(v interface{}, h *maphash.Hash) { + switch v := v.(type) { + case nil: + h.WriteByte(0) + case bool: + h.WriteByte(1) + if v { + h.WriteByte(1) + } else { + h.WriteByte(0) + } + case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: + h.WriteByte(2) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + h.Write(num.Num().Bytes()) + h.Write(num.Denom().Bytes()) + case string: + h.WriteByte(3) + h.WriteString(v) + case []interface{}: + h.WriteByte(4) + for _, item := range v { + hash(item, h) + } + case map[string]interface{}: + h.WriteByte(5) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + sort.Slice(props, func(i, j int) bool { + return props[i] < props[j] + }) + for _, prop := range props { + hash(prop, h) + hash(v[prop], h) + } + default: + panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) + } +} + +// escape converts given token to valid json-pointer token +func escape(token string) string { + token = strings.ReplaceAll(token, "~", "~0") + token = strings.ReplaceAll(token, "/", "~1") + return url.PathEscape(token) +} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go index c9e2dabd11..f9f0587344 100644 --- a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go @@ -2,7 +2,10 @@ package analyzer import ( "flag" +<<<<<<< HEAD "fmt" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go/ast" "go/token" "strings" @@ -365,7 +368,11 @@ func checkHTTPMethod(pass *analysis.Pass, basicLit *ast.BasicLit) { key := strings.ToUpper(currentVal) if newVal, ok := mapping.HTTPMethod[key]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -373,7 +380,11 @@ func checkHTTPStatusCode(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.HTTPStatusCode[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -381,7 +392,11 @@ func checkTimeWeekday(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeWeekday[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -389,7 +404,11 @@ func checkTimeMonth(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeMonth[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -397,7 +416,11 @@ func checkTimeLayout(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeLayout[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -405,7 +428,11 @@ func checkCryptoHash(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.CryptoHash[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -413,7 +440,11 @@ func checkRPCDefaultPath(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.RPCDefaultPath[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -423,7 +454,11 @@ func checkSQLIsolationLevel(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.SQLIsolationLevel[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -431,7 +466,11 @@ func checkTLSSignatureScheme(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TLSSignatureScheme[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -439,7 +478,11 @@ func checkConstantKind(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.ConstantKind[currentVal]; ok { +<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) +======= + report(pass, basicLit.Pos(), currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -515,6 +558,7 @@ func getBasicLitValue(basicLit *ast.BasicLit) string { return val.String() } +<<<<<<< HEAD func report(pass *analysis.Pass, rg analysis.Range, currentVal, newVal string) { pass.Report(analysis.Diagnostic{ Pos: rg.Pos(), @@ -527,4 +571,8 @@ func report(pass *analysis.Pass, rg analysis.Range, currentVal, newVal string) { }}, }}, }) +======= +func report(pass *analysis.Pass, pos token.Pos, currentVal, newVal string) { + pass.Reportf(pos, "%q can be replaced by %s", currentVal, newVal) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index d04246747c..2616f75c23 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -43,8 +43,12 @@ func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]Accepted // If *any* signature is found to be incorrect, it is skipped var acceptedKeys []AcceptedKey usedKeyids := make(map[string]string) +<<<<<<< HEAD unverified_providers := make([]Verifier, len(ev.providers)) copy(unverified_providers, ev.providers) +======= + unverified_providers := ev.providers +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, s := range e.Signatures { sig, err := b64Decode(s.Sig) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go index 691091af99..af0d52d97f 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -11,10 +11,14 @@ import ( "os" ) +<<<<<<< HEAD const ( ECDSAKeyType = "ecdsa" ECDSAKeyScheme = "ecdsa-sha2-nistp256" ) +======= +const ECDSAKeyType = "ecdsa" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and // verify signatures using ECDSA keys. @@ -92,11 +96,14 @@ func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { // LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in // a file in the custom securesystemslib format. +<<<<<<< HEAD // // Deprecated: use LoadKey(). The custom serialization format has been // deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go index d954e14b74..34dc8a710b 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -88,11 +88,14 @@ func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { // LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored // in a file in the custom securesystemslib format. +<<<<<<< HEAD // // Deprecated: use LoadKey(). The custom serialization format has been // deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go index 2abfcb27c4..07a7710be6 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -94,11 +94,14 @@ func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { // LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a // file. +<<<<<<< HEAD // // Deprecated: use LoadKey(). The custom serialization format has been // deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { @@ -108,6 +111,7 @@ func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { return LoadRSAPSSKeyFromBytes(contents) } +<<<<<<< HEAD // LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This // byte array should represent a PEM encoded RSA key, as PEM encoding is // required. The function returns an SSLibKey instance, which is a struct that @@ -115,6 +119,11 @@ func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { // // Deprecated: use LoadKey() for all key types, RSA is no longer the only key // that uses PEM serialization. +======= +// LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This byte array should represent a PEM encoded RSA key, as PEM encoding is required. +// The function returns an SSLibKey instance, which is a struct that holds the key data. + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadRSAPSSKeyFromBytes(contents []byte) (*SSLibKey, error) { pemData, keyObj, err := decodeAndParsePEM(contents) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go index 3a8259dfda..03ae596d43 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -1,6 +1,7 @@ package signerverifier import ( +<<<<<<< HEAD "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -8,6 +9,9 @@ import ( "encoding/hex" "errors" "strings" +======= + "errors" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var KeyIDHashAlgorithms = []string{"sha256", "sha512"} @@ -18,7 +22,10 @@ var ( ErrUnknownKeyType = errors.New("unknown key type") ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") ErrInvalidKey = errors.New("key object has no value") +<<<<<<< HEAD ErrInvalidPEM = errors.New("unable to parse PEM block") +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -41,6 +48,7 @@ type KeyVal struct { Identity string `json:"identity,omitempty"` Issuer string `json:"issuer,omitempty"` } +<<<<<<< HEAD // LoadKey returns an SSLibKey object when provided a PEM encoded key. // Currently, RSA, ED25519, and ECDSA keys are supported. @@ -144,3 +152,5 @@ func LoadKey(keyBytes []byte) (*SSLibKey, error) { return key, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go index e8a30b59fa..c82c9dde5b 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -27,9 +27,12 @@ var ( // LoadKeyFromSSLibBytes returns a pointer to a Key instance created from the // contents of the bytes. The key contents are expected to be in the custom // securesystemslib format. +<<<<<<< HEAD // // Deprecated: use LoadKey() for all key types, RSA is no longer the only key // that uses PEM serialization. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { var key *SSLibKey if err := json.Unmarshal(contents, &key); err != nil { diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go index 4a6d753cc8..5b388c8ee0 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go @@ -24,7 +24,10 @@ import ( "os" "strings" +<<<<<<< HEAD "github.com/go-jose/go-jose/v3/jwt" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/privacy" "github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots" @@ -34,6 +37,10 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/oauthflow" "github.com/sigstore/sigstore/pkg/signature" +<<<<<<< HEAD +======= + "go.step.sm/crypto/jose" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/term" ) @@ -209,7 +216,11 @@ func NewClient(fulcioURL string) (api.LegacyClient, error) { // or a path to an identity token via the --identity-token flag func idToken(s string) (string, error) { // If this is a valid raw token or is empty, just return it +<<<<<<< HEAD if _, err := jwt.ParseSigned(s); err == nil || s == "" { +======= + if _, err := jose.ParseSigned(s); err == nil || s == "" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return s, nil } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go index 9408186371..c94ad478d9 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go @@ -35,7 +35,11 @@ func (o *CopyOptions) AddFlags(cmd *cobra.Command) { o.Registry.AddFlags(cmd) cmd.Flags().StringVar(&o.CopyOnly, "only", "", +<<<<<<< HEAD "custom string array to only copy specific items, this flag is comma delimited. ex: --only=sig,att,sbom") +======= + "custom string array to only copy specific items, this flag is comma delimited. ex: --only=sbom,sign,att") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cmd.Flags().BoolVar(&o.SignatureOnly, "sig-only", false, "[DEPRECATED] only copy the image signature") diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go index 39900375f9..38bfd617eb 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go @@ -19,8 +19,11 @@ const SBOMAttachmentDeprecation = "WARNING: SBOM attachments are deprecated " + "and support will be removed in a Cosign release soon after 2024-02-22 " + "(see https://github.com/sigstore/cosign/issues/2755). " + "Instead, please use SBOM attestations." +<<<<<<< HEAD const RootWithoutChecksumDeprecation = "WARNING: Fetching initial root from URL " + "without providing its checksum is deprecated and will be disallowed in " + "a future Cosign release. Please provide the initial root checksum " + "via the --root-checksum argument." +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go index 9af970e0ad..38f7d3c05a 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go @@ -22,9 +22,14 @@ import ( // InitializeOptions is the top level wrapper for the initialize command. type InitializeOptions struct { +<<<<<<< HEAD Mirror string Root string RootChecksum string +======= + Mirror string + Root string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var _ Interface = (*InitializeOptions)(nil) @@ -37,7 +42,10 @@ func (o *InitializeOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Root, "root", "", "path to trusted initial root. defaults to embedded root") _ = cmd.Flags().SetAnnotation("root", cobra.BashCompSubdirsInDir, []string{}) +<<<<<<< HEAD cmd.Flags().StringVar(&o.RootChecksum, "root-checksum", "", "checksum of the initial root, required if root is downloaded via http(s). expects sha256 by default, can be changed to sha512 by providing sha512:") +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go index 9d22b4ea8b..99d90d7293 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go @@ -17,12 +17,18 @@ package options import ( "context" "crypto/tls" +<<<<<<< HEAD "crypto/x509" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "errors" "fmt" "io" "net/http" +<<<<<<< HEAD "os" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ecr "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" "github.com/chrismellard/docker-credential-acr-env/pkg/credhelper" @@ -47,10 +53,13 @@ type RegistryOptions struct { RefOpts ReferenceOptions Keychain Keychain AuthConfig authn.AuthConfig +<<<<<<< HEAD RegistryCACert string RegistryClientCert string RegistryClientKey string RegistryServerName string +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RegistryClientOpts allows overriding the result of GetRegistryClientOpts. RegistryClientOpts []remote.Option @@ -78,6 +87,7 @@ func (o *RegistryOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.AuthConfig.RegistryToken, "registry-token", "", "registry bearer auth token") +<<<<<<< HEAD cmd.Flags().StringVar(&o.RegistryCACert, "registry-cacert", "", "path to the X.509 CA certificate file in PEM format to be used for the connection to the registry") @@ -90,6 +100,8 @@ func (o *RegistryOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.RegistryServerName, "registry-server-name", "", "SAN name to use as the 'ServerName' tls.Config field to verify the mTLS connection to the registry") +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) o.RefOpts.AddFlags(cmd) } @@ -149,11 +161,16 @@ func (o *RegistryOptions) GetRegistryClientOpts(ctx context.Context) []remote.Op opts = append(opts, remote.WithAuthFromKeychain(authn.DefaultKeychain)) } +<<<<<<< HEAD tlsConfig, err := o.getTLSConfig() if err == nil { tr := http.DefaultTransport.(*http.Transport).Clone() tr.TLSClientConfig = tlsConfig opts = append(opts, remote.WithTransport(tr)) +======= + if o.AllowInsecure { + opts = append(opts, remote.WithTransport(&http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}})) // #nosec G402 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Reuse a remote.Pusher and a remote.Puller for all operations that use these opts. @@ -214,6 +231,7 @@ func (o *RegistryExperimentalOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().Var(&o.RegistryReferrersMode, "registry-referrers-mode", "mode for fetching references from the registry. allowed: legacy, oci-1-1") } +<<<<<<< HEAD func (o *RegistryOptions) getTLSConfig() (*tls.Config, error) { var tlsConfig tls.Config @@ -252,3 +270,5 @@ func (o *RegistryOptions) getTLSConfig() (*tls.Config, error) { return &tlsConfig, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go b/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go index 8ee624e93a..0a3b24e7df 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go @@ -15,9 +15,12 @@ package blob import ( +<<<<<<< HEAD "crypto/sha256" "crypto/sha512" "encoding/hex" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "io" "net/http" @@ -75,6 +78,7 @@ func LoadFileOrURL(fileRef string) ([]byte, error) { } return raw, nil } +<<<<<<< HEAD func LoadFileOrURLWithChecksum(fileRef string, checksum string) ([]byte, error) { checksumParts := strings.Split(checksum, ":") @@ -107,3 +111,5 @@ func LoadFileOrURLWithChecksum(fileRef string, checksum string) ([]byte, error) return fileContent, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go index b124691321..c5424e6102 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go @@ -24,7 +24,11 @@ import ( "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/cosign" "github.com/sigstore/cosign/v2/pkg/cosign/env" +<<<<<<< HEAD gitlab "gitlab.com/gitlab-org/api/client-go" +======= + "github.com/xanzy/go-gitlab" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go index 3a6ee79b46..cad3d24ef9 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go @@ -34,7 +34,11 @@ import ( "strings" "time" +<<<<<<< HEAD "errors" +======= + "github.com/pkg/errors" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/digitorus/timestamp" "github.com/go-openapi/runtime" @@ -152,8 +156,11 @@ type CheckOpts struct { TSARootCertificates []*x509.Certificate // TSAIntermediateCertificates are the set of intermediates for chain building TSAIntermediateCertificates []*x509.Certificate +<<<<<<< HEAD // UseSignedTimestamps enables timestamp verification using a TSA UseSignedTimestamps bool +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IgnoreTlog skip tlog verification IgnoreTlog bool @@ -668,6 +675,7 @@ func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, bundleVerified bool, err error) { var acceptableRFC3161Time, acceptableRekorBundleTime *time.Time // Timestamps for the signature we accept, or nil if not applicable. +<<<<<<< HEAD var acceptableRFC3161Timestamp *timestamp.Timestamp if co.UseSignedTimestamps { acceptableRFC3161Timestamp, err = VerifyRFC3161Timestamp(sig, co) @@ -677,6 +685,14 @@ func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, if acceptableRFC3161Timestamp != nil { acceptableRFC3161Time = &acceptableRFC3161Timestamp.Time } +======= + acceptableRFC3161Timestamp, err := VerifyRFC3161Timestamp(sig, co) + if err != nil { + return false, fmt.Errorf("unable to verify RFC3161 timestamp bundle: %w", err) + } + if acceptableRFC3161Timestamp != nil { + acceptableRFC3161Time = &acceptableRFC3161Timestamp.Time +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if !co.IgnoreTlog { diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go index 6eeaadd010..1c7243ba96 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go @@ -113,6 +113,7 @@ func WithRemoteOptions(opts ...remote.Option) Option { } } +<<<<<<< HEAD // WithMoreRemoteOptions is a functional option for adding to the default // remote options already specified func WithMoreRemoteOptions(opts ...remote.Option) Option { @@ -121,6 +122,8 @@ func WithMoreRemoteOptions(opts ...remote.Option) Option { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithTargetRepository is a functional option for overriding the default // target repository hosting the signature and attestation tags. func WithTargetRepository(repo name.Repository) Option { diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go index 80198530de..ab346e1e99 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go @@ -14,8 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 +======= +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: sigstore_bundle.proto package v1 @@ -42,22 +47,40 @@ const ( // Currently only RFC3161 signatures are provided. More formats may be added // in the future. type TimestampVerificationData struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A list of RFC3161 signed timestamps provided by the user. // This can be used when the entry has not been stored on a // transparency log, or in conjunction for a stronger trust model. // Clients MUST verify the hashed message in the message imprint // against the signature in the bundle. Rfc3161Timestamps []*v1.RFC3161SignedTimestamp `protobuf:"bytes,1,rep,name=rfc3161_timestamps,json=rfc3161Timestamps,proto3" json:"rfc3161_timestamps,omitempty"` +<<<<<<< HEAD unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimestampVerificationData) Reset() { *x = TimestampVerificationData{} +<<<<<<< HEAD mi := &file_sigstore_bundle_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_bundle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimestampVerificationData) String() string { @@ -68,7 +91,11 @@ func (*TimestampVerificationData) ProtoMessage() {} func (x *TimestampVerificationData) ProtoReflect() protoreflect.Message { mi := &file_sigstore_bundle_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -100,7 +127,14 @@ func (x *TimestampVerificationData) GetRfc3161Timestamps() []*v1.RFC3161SignedTi // the key identifier, it MUST match the `keyid` field of the signature the // extension is attached to. type VerificationMaterial struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The key material for verification purposes. // // This allows key material to be conveyed in one of three forms: @@ -135,7 +169,11 @@ type VerificationMaterial struct { // When used in a `0.3` bundle with the PGI and "keyless" signing, // form (3) MUST be used. // +<<<<<<< HEAD // Types that are valid to be assigned to Content: +======= + // Types that are assignable to Content: +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // *VerificationMaterial_PublicKey // *VerificationMaterial_X509CertificateChain @@ -152,15 +190,26 @@ type VerificationMaterial struct { // Timestamp may also come from // tlog_entries.inclusion_promise.signed_entry_timestamp. TimestampVerificationData *TimestampVerificationData `protobuf:"bytes,4,opt,name=timestamp_verification_data,json=timestampVerificationData,proto3" json:"timestamp_verification_data,omitempty"` +<<<<<<< HEAD unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerificationMaterial) Reset() { *x = VerificationMaterial{} +<<<<<<< HEAD mi := &file_sigstore_bundle_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_bundle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerificationMaterial) String() string { @@ -171,7 +220,11 @@ func (*VerificationMaterial) ProtoMessage() {} func (x *VerificationMaterial) ProtoReflect() protoreflect.Message { mi := &file_sigstore_bundle_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -186,36 +239,57 @@ func (*VerificationMaterial) Descriptor() ([]byte, []int) { return file_sigstore_bundle_proto_rawDescGZIP(), []int{1} } +<<<<<<< HEAD func (x *VerificationMaterial) GetContent() isVerificationMaterial_Content { if x != nil { return x.Content +======= +func (m *VerificationMaterial) GetContent() isVerificationMaterial_Content { + if m != nil { + return m.Content +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *VerificationMaterial) GetPublicKey() *v1.PublicKeyIdentifier { +<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*VerificationMaterial_PublicKey); ok { return x.PublicKey } +======= + if x, ok := x.GetContent().(*VerificationMaterial_PublicKey); ok { + return x.PublicKey +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *VerificationMaterial) GetX509CertificateChain() *v1.X509CertificateChain { +<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*VerificationMaterial_X509CertificateChain); ok { return x.X509CertificateChain } +======= + if x, ok := x.GetContent().(*VerificationMaterial_X509CertificateChain); ok { + return x.X509CertificateChain +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *VerificationMaterial) GetCertificate() *v1.X509Certificate { +<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*VerificationMaterial_Certificate); ok { return x.Certificate } +======= + if x, ok := x.GetContent().(*VerificationMaterial_Certificate); ok { + return x.Certificate +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -257,7 +331,14 @@ func (*VerificationMaterial_X509CertificateChain) isVerificationMaterial_Content func (*VerificationMaterial_Certificate) isVerificationMaterial_Content() {} type Bundle struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MUST be application/vnd.dev.sigstore.bundle.v0.3+json when // when encoded as JSON. // Clients must to be able to accept media type using the previously @@ -276,6 +357,7 @@ type Bundle struct { // MUST be exactly the same in the verification material and in the // DSSE envelope. VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"` +<<<<<<< HEAD // Types that are valid to be assigned to Content: // // *Bundle_MessageSignature @@ -283,13 +365,28 @@ type Bundle struct { Content isBundle_Content `protobuf_oneof:"content"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + // Types that are assignable to Content: + // + // *Bundle_MessageSignature + // *Bundle_DsseEnvelope + Content isBundle_Content `protobuf_oneof:"content"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bundle) Reset() { *x = Bundle{} +<<<<<<< HEAD mi := &file_sigstore_bundle_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bundle) String() string { @@ -300,7 +397,11 @@ func (*Bundle) ProtoMessage() {} func (x *Bundle) ProtoReflect() protoreflect.Message { mi := &file_sigstore_bundle_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -329,27 +430,43 @@ func (x *Bundle) GetVerificationMaterial() *VerificationMaterial { return nil } +<<<<<<< HEAD func (x *Bundle) GetContent() isBundle_Content { if x != nil { return x.Content +======= +func (m *Bundle) GetContent() isBundle_Content { + if m != nil { + return m.Content +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *Bundle) GetMessageSignature() *v1.MessageSignature { +<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*Bundle_MessageSignature); ok { return x.MessageSignature } +======= + if x, ok := x.GetContent().(*Bundle_MessageSignature); ok { + return x.MessageSignature +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *Bundle) GetDsseEnvelope() *dsse.Envelope { +<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*Bundle_DsseEnvelope); ok { return x.DsseEnvelope } +======= + if x, ok := x.GetContent().(*Bundle_DsseEnvelope); ok { + return x.DsseEnvelope +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -368,10 +485,17 @@ type Bundle_DsseEnvelope struct { // supported and expected type. This is part of the DSSE // protocol which is defined here: // +<<<<<<< HEAD // DSSE envelopes in a bundle MUST have exactly one signature. // This is a limitation from the DSSE spec, as it can contain // multiple signatures. There are two primary reasons: // 1. It simplifies the verification logic and policy +======= + // DSSE envelopes in a bundle MUST have exactly one signture. + // This is a limitation from the DSSE spec, as it can contain + // multiple signatures. There are two primary reasons: + // 1. It simplfies the verification logic and policy +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // 2. The bundle (currently) can only contain a single // instance of the required verification materials // @@ -479,7 +603,11 @@ func file_sigstore_bundle_proto_rawDescGZIP() []byte { } var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +<<<<<<< HEAD var file_sigstore_bundle_proto_goTypes = []any{ +======= +var file_sigstore_bundle_proto_goTypes = []interface{}{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle @@ -513,12 +641,58 @@ func file_sigstore_bundle_proto_init() { if File_sigstore_bundle_proto != nil { return } +<<<<<<< HEAD file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []any{ +======= + if !protoimpl.UnsafeEnabled { + file_sigstore_bundle_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimestampVerificationData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_bundle_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerificationMaterial); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_bundle_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []interface{}{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*VerificationMaterial_PublicKey)(nil), (*VerificationMaterial_X509CertificateChain)(nil), (*VerificationMaterial_Certificate)(nil), } +<<<<<<< HEAD file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []any{ +======= + file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []interface{}{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*Bundle_MessageSignature)(nil), (*Bundle_DsseEnvelope)(nil), } diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go index 2c5c99efde..6c8639f035 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -14,8 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 +======= +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: sigstore_common.proto package v1 @@ -123,6 +128,7 @@ const ( PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 // RSA // +<<<<<<< HEAD // Deprecated: Marked as deprecated in sigstore_common.proto. PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 // Deprecated: Marked as deprecated in sigstore_common.proto. @@ -130,6 +136,15 @@ const ( // Deprecated: Marked as deprecated in sigstore_common.proto. PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 // Deprecated: Marked as deprecated in sigstore_common.proto. +======= + // Deprecated: Do not use. + PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 + // Deprecated: Do not use. + PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 + // Deprecated: Do not use. + PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 + // Deprecated: Do not use. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 // RSA public key in PKIX format, PKCS#1v1.5 signature PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 @@ -141,7 +156,11 @@ const ( PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 // ECDSA // +<<<<<<< HEAD // Deprecated: Marked as deprecated in sigstore_common.proto. +======= + // Deprecated: Do not use. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 @@ -297,6 +316,7 @@ func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { // HashOutput captures a digest of a 'message' (generic octet sequence) // and the corresponding hash algorithm used. type HashOutput struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` // This is the raw octets of the message digest as computed by @@ -304,13 +324,31 @@ type HashOutput struct { Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` + // This is the raw octets of the message digest as computed by + // the hash algorithm. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HashOutput) Reset() { *x = HashOutput{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HashOutput) String() string { @@ -321,7 +359,11 @@ func (*HashOutput) ProtoMessage() {} func (x *HashOutput) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -352,7 +394,14 @@ func (x *HashOutput) GetDigest() []byte { // MessageSignature stores the computed signature over a message. type MessageSignature struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Message digest can be used to identify the artifact. // Clients MUST NOT attempt to use this digest to verify the associated // signature; it is intended solely for identification. @@ -365,16 +414,28 @@ type MessageSignature struct { // algorithm. // When using a key pair, the algorithm MUST be part of the public // key, which MUST be communicated out-of-band. +<<<<<<< HEAD Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MessageSignature) Reset() { *x = MessageSignature{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MessageSignature) String() string { @@ -385,7 +446,11 @@ func (*MessageSignature) ProtoMessage() {} func (x *MessageSignature) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -416,18 +481,35 @@ func (x *MessageSignature) GetSignature() []byte { // LogId captures the identity of a transparency log. type LogId struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` // The unique identity of the log, represented by its public key. KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique identity of the log, represented by its public key. + KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LogId) Reset() { *x = LogId{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LogId) String() string { @@ -438,7 +520,11 @@ func (*LogId) ProtoMessage() {} func (x *LogId) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -462,19 +548,37 @@ func (x *LogId) GetKeyId() []byte { // This message holds a RFC 3161 timestamp. type RFC3161SignedTimestamp struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` // Signed timestamp is the DER encoded TimeStampResponse. // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed timestamp is the DER encoded TimeStampResponse. + // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 + SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RFC3161SignedTimestamp) Reset() { *x = RFC3161SignedTimestamp{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RFC3161SignedTimestamp) String() string { @@ -485,7 +589,11 @@ func (*RFC3161SignedTimestamp) ProtoMessage() {} func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -508,23 +616,42 @@ func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { } type PublicKey struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DER-encoded public key, encoding method is specified by the // key_details attribute. RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` // Key encoding and signature algorithm to use for this key. KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` // Optional validity period for this key, *inclusive* of the endpoints. +<<<<<<< HEAD ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKey) Reset() { *x = PublicKey{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKey) String() string { @@ -535,7 +662,11 @@ func (*PublicKey) ProtoMessage() {} func (x *PublicKey) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -574,7 +705,14 @@ func (x *PublicKey) GetValidFor() *TimeRange { // PublicKeyIdentifier can be used to identify an (out of band) delivered // key, to verify a signature. type PublicKeyIdentifier struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Optional unauthenticated hint on which key to use. // The format of the hint must be agreed upon out of band by the // signer and the verifiers, and so is not subject to this @@ -584,16 +722,28 @@ type PublicKeyIdentifier struct { // Implementors are RECOMMENDED to derive the value from the public // key as described in RFC 6962. // See: +<<<<<<< HEAD Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKeyIdentifier) Reset() { *x = PublicKeyIdentifier{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKeyIdentifier) String() string { @@ -604,7 +754,11 @@ func (*PublicKeyIdentifier) ProtoMessage() {} func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -628,17 +782,33 @@ func (x *PublicKeyIdentifier) GetHint() string { // An ASN.1 OBJECT IDENTIFIER type ObjectIdentifier struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifier) Reset() { *x = ObjectIdentifier{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifier) String() string { @@ -649,7 +819,11 @@ func (*ObjectIdentifier) ProtoMessage() {} func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -673,18 +847,35 @@ func (x *ObjectIdentifier) GetId() []int32 { // An OID and the corresponding (byte) value. type ObjectIdentifierValuePair struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifierValuePair) Reset() { *x = ObjectIdentifierValuePair{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifierValuePair) String() string { @@ -695,7 +886,11 @@ func (*ObjectIdentifierValuePair) ProtoMessage() {} func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -725,18 +920,35 @@ func (x *ObjectIdentifierValuePair) GetValue() []byte { } type DistinguishedName struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` + CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DistinguishedName) Reset() { *x = DistinguishedName{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DistinguishedName) String() string { @@ -747,7 +959,11 @@ func (*DistinguishedName) ProtoMessage() {} func (x *DistinguishedName) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -777,18 +993,35 @@ func (x *DistinguishedName) GetCommonName() string { } type X509Certificate struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` // DER-encoded X.509 certificate. RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DER-encoded X.509 certificate. + RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509Certificate) Reset() { *x = X509Certificate{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509Certificate) String() string { @@ -799,7 +1032,11 @@ func (*X509Certificate) ProtoMessage() {} func (x *X509Certificate) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[9] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -822,6 +1059,7 @@ func (x *X509Certificate) GetRawBytes() []byte { } type SubjectAlternativeName struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` // Types that are valid to be assigned to Identity: @@ -831,13 +1069,33 @@ type SubjectAlternativeName struct { Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` + // Types that are assignable to Identity: + // + // *SubjectAlternativeName_Regexp + // *SubjectAlternativeName_Value + Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *SubjectAlternativeName) Reset() { *x = SubjectAlternativeName{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *SubjectAlternativeName) String() string { @@ -848,7 +1106,11 @@ func (*SubjectAlternativeName) ProtoMessage() {} func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[10] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -870,27 +1132,43 @@ func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED } +<<<<<<< HEAD func (x *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { if x != nil { return x.Identity +======= +func (m *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { + if m != nil { + return m.Identity +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *SubjectAlternativeName) GetRegexp() string { +<<<<<<< HEAD if x != nil { if x, ok := x.Identity.(*SubjectAlternativeName_Regexp); ok { return x.Regexp } +======= + if x, ok := x.GetIdentity().(*SubjectAlternativeName_Regexp); ok { + return x.Regexp +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return "" } func (x *SubjectAlternativeName) GetValue() string { +<<<<<<< HEAD if x != nil { if x, ok := x.Identity.(*SubjectAlternativeName_Value); ok { return x.Value } +======= + if x, ok := x.GetIdentity().(*SubjectAlternativeName_Value); ok { + return x.Value +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return "" } @@ -920,22 +1198,41 @@ func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} // certificate within a TUF root of trust or multiple untrusted certificates for // the purpose of chain building. type X509CertificateChain struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // One or more DER-encoded certificates. // // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence // has an imposed order. Unless explicitly specified, there is otherwise no // guaranteed order. +<<<<<<< HEAD Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509CertificateChain) Reset() { *x = X509CertificateChain{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509CertificateChain) String() string { @@ -946,7 +1243,11 @@ func (*X509CertificateChain) ProtoMessage() {} func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[11] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -973,18 +1274,35 @@ func (x *X509CertificateChain) GetCertificates() []*X509Certificate { // End is optional to be able to capture a period that has started but // has no known end. type TimeRange struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimeRange) Reset() { *x = TimeRange{} +<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_common_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimeRange) String() string { @@ -995,7 +1313,11 @@ func (*TimeRange) ProtoMessage() {} func (x *TimeRange) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[12] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1189,7 +1511,11 @@ func file_sigstore_common_proto_rawDescGZIP() []byte { var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +<<<<<<< HEAD var file_sigstore_common_proto_goTypes = []any{ +======= +var file_sigstore_common_proto_goTypes = []interface{}{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType @@ -1230,12 +1556,179 @@ func file_sigstore_common_proto_init() { if File_sigstore_common_proto != nil { return } +<<<<<<< HEAD file_sigstore_common_proto_msgTypes[4].OneofWrappers = []any{} file_sigstore_common_proto_msgTypes[10].OneofWrappers = []any{ (*SubjectAlternativeName_Regexp)(nil), (*SubjectAlternativeName_Value)(nil), } file_sigstore_common_proto_msgTypes[12].OneofWrappers = []any{} +======= + if !protoimpl.UnsafeEnabled { + file_sigstore_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HashOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageSignature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RFC3161SignedTimestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublicKeyIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectIdentifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectIdentifierValuePair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistinguishedName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubjectAlternativeName); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509CertificateChain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_common_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimeRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sigstore_common_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_sigstore_common_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*SubjectAlternativeName_Regexp)(nil), + (*SubjectAlternativeName_Value)(nil), + } + file_sigstore_common_proto_msgTypes[12].OneofWrappers = []interface{}{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go index 16e581ebe0..dea54e9b03 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go @@ -14,8 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 +======= +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: envelope.proto package dsse @@ -36,7 +41,14 @@ const ( // An authenticated message of arbitrary type. type Envelope struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Message to be signed. (In JSON, this is encoded as base64.) // REQUIRED. Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` @@ -54,16 +66,28 @@ type Envelope struct { // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31] // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros // REQUIRED (length >= 1). +<<<<<<< HEAD Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Envelope) Reset() { *x = Envelope{} +<<<<<<< HEAD mi := &file_envelope_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_envelope_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Envelope) String() string { @@ -74,7 +98,11 @@ func (*Envelope) ProtoMessage() {} func (x *Envelope) ProtoReflect() protoreflect.Message { mi := &file_envelope_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -111,22 +139,41 @@ func (x *Envelope) GetSignatures() []*Signature { } type Signature struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Signature itself. (In JSON, this is encoded as base64.) // REQUIRED. Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"` // *Unauthenticated* hint identifying which public key was used. // OPTIONAL. +<<<<<<< HEAD Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Signature) Reset() { *x = Signature{} +<<<<<<< HEAD mi := &file_envelope_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_envelope_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Signature) String() string { @@ -137,7 +184,11 @@ func (*Signature) ProtoMessage() {} func (x *Signature) ProtoReflect() protoreflect.Message { mi := &file_envelope_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -202,7 +253,11 @@ func file_envelope_proto_rawDescGZIP() []byte { } var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +<<<<<<< HEAD var file_envelope_proto_goTypes = []any{ +======= +var file_envelope_proto_goTypes = []interface{}{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*Envelope)(nil), // 0: io.intoto.Envelope (*Signature)(nil), // 1: io.intoto.Signature } @@ -220,6 +275,35 @@ func file_envelope_proto_init() { if File_envelope_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_envelope_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_envelope_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Signature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go index 5874bc29eb..fec52e4e05 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go @@ -14,8 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 +======= +// protoc-gen-go v1.28.1 +// protoc v3.21.6 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: sigstore_rekor.proto package v1 @@ -38,21 +43,40 @@ const ( // KindVersion contains the entry's kind and api version. type KindVersion struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Kind is the type of entry being stored in the log. // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` // The specific api version of the type. +<<<<<<< HEAD Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KindVersion) Reset() { *x = KindVersion{} +<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KindVersion) String() string { @@ -63,7 +87,11 @@ func (*KindVersion) ProtoMessage() {} func (x *KindVersion) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -103,17 +131,33 @@ func (x *KindVersion) GetVersion() string { // and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md. // An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go type Checkpoint struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Checkpoint) Reset() { *x = Checkpoint{} +<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Checkpoint) String() string { @@ -124,7 +168,11 @@ func (*Checkpoint) ProtoMessage() {} func (x *Checkpoint) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -149,7 +197,14 @@ func (x *Checkpoint) GetEnvelope() string { // InclusionProof is the proof returned from the transparency log. Can // be used for offline or online verification against the log. type InclusionProof struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The index of the entry in the tree it was written to. LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // The hash digest stored at the root of the merkle tree at the time @@ -165,16 +220,28 @@ type InclusionProof struct { Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"` // Signature of the tree head, as of the time of this proof was // generated. See above info on 'Checkpoint' for more details. +<<<<<<< HEAD Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionProof) Reset() { *x = InclusionProof{} +<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionProof) String() string { @@ -185,7 +252,11 @@ func (*InclusionProof) ProtoMessage() {} func (x *InclusionProof) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -246,17 +317,33 @@ func (x *InclusionProof) GetCheckpoint() *Checkpoint { // This is used to verify the integration timestamp's value and that the log // has promised to include the entry. type InclusionPromise struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionPromise) Reset() { *x = InclusionPromise{} +<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionPromise) String() string { @@ -267,7 +354,11 @@ func (*InclusionPromise) ProtoMessage() {} func (x *InclusionPromise) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -298,7 +389,14 @@ func (x *InclusionPromise) GetSignedEntryTimestamp() []byte { // the response from Rekor) is similar to a Signed Certificate Timestamp // as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2. type TransparencyLogEntry struct { +<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` +======= + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The global index of the entry, used when querying the log by index. LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // The unique identifier of the log. @@ -308,6 +406,7 @@ type TransparencyLogEntry struct { // verification. KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"` // The UNIX timestamp from the log when the entry was persisted. +<<<<<<< HEAD // The integration time MUST NOT be trusted if inclusion_promise // is omitted. IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` @@ -318,6 +417,13 @@ type TransparencyLogEntry struct { // or the current system time for long-lived certificates). // MUST be verified if no other suitable source of time is present, // and SHOULD be verified otherwise. +======= + IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` + // The inclusion promise/signed entry timestamp from the log. + // Required for v0.1 bundles, and MUST be verified. + // Optional for >= v0.2 bundles, and SHOULD be verified when present. + // Also may be used as a signed timestamp. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"` // The inclusion proof can be used for offline or online verification // that the entry was appended to the log, and that the log has not been @@ -342,15 +448,26 @@ type TransparencyLogEntry struct { // If not set, clients are responsible for constructing an equivalent // payload from other sources to verify the signature. CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"` +<<<<<<< HEAD unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TransparencyLogEntry) Reset() { *x = TransparencyLogEntry{} +<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_sigstore_rekor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TransparencyLogEntry) String() string { @@ -361,7 +478,11 @@ func (*TransparencyLogEntry) ProtoMessage() {} func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -513,7 +634,11 @@ func file_sigstore_rekor_proto_rawDescGZIP() []byte { } var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +<<<<<<< HEAD var file_sigstore_rekor_proto_goTypes = []any{ +======= +var file_sigstore_rekor_proto_goTypes = []interface{}{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof @@ -539,6 +664,71 @@ func file_sigstore_rekor_proto_init() { if File_sigstore_rekor_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_sigstore_rekor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KindVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Checkpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InclusionProof); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InclusionPromise); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sigstore_rekor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransparencyLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json index 3d536eb49e..0aba7f3926 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json @@ -47,7 +47,11 @@ }, "required": [ "algorithm", "value" ] } +<<<<<<< HEAD } +======= + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }, "required": [ "signature", "data" ] diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md index 6bfe65b04e..6a36d799da 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md @@ -50,6 +50,7 @@ cosign verify --key azurekms://[Key Vault Name].vault.azure.net/[Key Name] [Cont ## Authentication +<<<<<<< HEAD This module uses the [`DefaultCredential` type](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential) to authenticate. This type supports the following authentication methods: @@ -61,6 +62,19 @@ to authenticate. This type supports the following authentication methods: See the [official documentation]( https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash) for more information. +======= +There are multiple authentication methods supported for Azure Key Vault and by default they will be evaluated in the following order: + +1. Client credentials (FromEnvironment) +1. Client certificate (FromEnvironment) +1. Username password (FromEnvironment) +1. MSI (FromEnvironment) +1. CLI (FromCLI) + +You can force either `FromEnvironment` or `FromCLI` by configuring the environment variable `AZURE_AUTH_METHOD` to either `environment` or `cli`. + +For backward compatibility, if you configure `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET`, `FromEnvironment` will be used. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) If you would like to use a cloud other than the Azure public cloud, configure `AZURE_ENVIRONMENT`. The following values are accepted: - `AZUREUSGOVERNMENT`, `AZUREUSGOVERNMENTCLOUD` uses the Azure US Government Cloud diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go index 38c788c8c6..1080a11f28 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go @@ -33,6 +33,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" +<<<<<<< HEAD +======= + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/go-jose/go-jose/v3" "github.com/jellydator/ttlcache/v3" @@ -116,6 +120,7 @@ func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { return nil, err } +<<<<<<< HEAD opts := getAzClientOpts() cred, err := azidentity.NewDefaultAzureCredential(&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}) if err != nil { @@ -123,6 +128,9 @@ func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { } client, err := azkeys.NewClient(vaultURL, cred, nil) +======= + client, err := getKeysClient(vaultURL) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("new azure kms client: %w", err) } @@ -140,6 +148,47 @@ func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { return azClient, nil } +<<<<<<< HEAD +======= +type authenticationMethod string + +const ( + unknownAuthenticationMethod = "unknown" + environmentAuthenticationMethod = "environment" + cliAuthenticationMethod = "cli" +) + +// getAuthMethod returns the an authenticationMethod to use to get an Azure Authorizer. +// If no environment variables are set, unknownAuthMethod will be used. +// If the environment variable 'AZURE_AUTH_METHOD' is set to either environment or cli, use it. +// If the environment variables 'AZURE_TENANT_ID', 'AZURE_CLIENT_ID' and 'AZURE_CLIENT_SECRET' are set, use environment. +func getAuthenticationMethod() authenticationMethod { + tenantID := os.Getenv("AZURE_TENANT_ID") + clientID := os.Getenv("AZURE_CLIENT_ID") + clientSecret := os.Getenv("AZURE_CLIENT_SECRET") + authMethod := os.Getenv("AZURE_AUTH_METHOD") + + if authMethod != "" { + switch strings.ToLower(authMethod) { + case "environment": + return environmentAuthenticationMethod + case "cli": + return cliAuthenticationMethod + } + } + + if tenantID != "" && clientID != "" && clientSecret != "" { + return environmentAuthenticationMethod + } + + return unknownAuthenticationMethod +} + +type azureCredential interface { + GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func getAzClientOpts() azcore.ClientOptions { envName := os.Getenv("AZURE_ENVIRONMENT") switch envName { @@ -154,6 +203,77 @@ func getAzClientOpts() azcore.ClientOptions { } } +<<<<<<< HEAD +======= +// getAzureCredential takes an authenticationMethod and returns an Azure credential or an error. +// If the method is unknown, Environment will be tested and if it returns an error CLI will be tested. +// If the method is specified, the specified method will be used and no other will be tested. +// This means the following default order of methods will be used if nothing else is defined: +// 1. Client credentials (FromEnvironment) +// 2. Client certificate (FromEnvironment) +// 3. Username password (FromEnvironment) +// 4. MSI (FromEnvironment) +// 5. CLI (FromCLI) +func getAzureCredential(method authenticationMethod) (azureCredential, error) { + clientOpts := getAzClientOpts() + + switch method { + case environmentAuthenticationMethod: + envCred, err := azidentity.NewEnvironmentCredential(&azidentity.EnvironmentCredentialOptions{ClientOptions: clientOpts}) + if err == nil { + return envCred, nil + } + + o := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: clientOpts} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = azidentity.ClientID(ID) + } + msiCred, err := azidentity.NewManagedIdentityCredential(o) + if err == nil { + return msiCred, nil + } + + return nil, fmt.Errorf("failed to create default azure credential from env auth method: %w", err) + case cliAuthenticationMethod: + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, fmt.Errorf("failed to create default Azure credential from env auth method: %w", err) + } + return cred, nil + case unknownAuthenticationMethod: + break + default: + return nil, fmt.Errorf("you should never reach this") + } + + envCreds, err := azidentity.NewEnvironmentCredential(&azidentity.EnvironmentCredentialOptions{ClientOptions: clientOpts}) + if err == nil { + return envCreds, nil + } + + cliCreds, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return nil, fmt.Errorf("failed to create default Azure credential from env auth method: %w", err) + } + return cliCreds, nil +} + +func getKeysClient(vaultURL string) (*azkeys.Client, error) { + authMethod := getAuthenticationMethod() + cred, err := getAzureCredential(authMethod) + if err != nil { + return nil, err + } + + client, err := azkeys.NewClient(vaultURL, cred, nil) + if err != nil { + return nil, err + } + + return client, nil +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { keyBundle, err := a.getKey(ctx) if err != nil { diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go index 7baf9504b8..41fc883d82 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go @@ -19,6 +19,7 @@ package kms import ( "context" "crypto" +<<<<<<< HEAD "errors" "fmt" "os/exec" @@ -26,6 +27,12 @@ import ( "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin" +======= + "fmt" + "strings" + + "github.com/sigstore/sigstore/pkg/signature" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ProviderNotFoundError indicates that no matching KMS provider was found @@ -51,6 +58,7 @@ func AddProvider(keyResourceID string, init ProviderInit) { var providersMap = map[string]ProviderInit{} // Get returns a KMS SignerVerifier for the given resource string and hash function. +<<<<<<< HEAD // If no matching built-in provider is found, it will try to use the plugin system as a provider. // It returns a ProviderNotFoundError in these situations: // - keyResourceID doesn't match any of our hard-coded providers' schemas, @@ -72,6 +80,17 @@ func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts . return nil, fmt.Errorf("%w: %w", &ProviderNotFoundError{ref: keyResourceID}, err) } return sv, err +======= +// If no matching provider is found, Get returns a ProviderNotFoundError. It +// also returns an error if initializing the SignerVerifier fails. +func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (SignerVerifier, error) { + for ref, pi := range providersMap { + if strings.HasPrefix(keyResourceID, ref) { + return pi(ctx, keyResourceID, hashFunc, opts...) + } + } + return nil, &ProviderNotFoundError{ref: keyResourceID} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SupportedProviders returns list of initialized providers diff --git a/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go index 82bebdd2f8..88aaf389eb 100644 --- a/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go +++ b/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go @@ -250,7 +250,11 @@ func verifyTSRWithChain(ts *timestamp.Timestamp, opts VerifyOpts) error { return fmt.Errorf("error parsing hashed message: %w", err) } +<<<<<<< HEAD if len(opts.Roots) == 0 { +======= + if opts.Roots == nil || len(opts.Roots) == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("no root certificates provided for verifying the certificate chain") } diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 7c058de374..9cd8df117c 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -160,7 +160,11 @@ type FlagSet struct { args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- errorHandling ErrorHandling +<<<<<<< HEAD output io.Writer // nil means stderr; use Output() accessor +======= + output io.Writer // nil means stderr; use out() accessor +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName @@ -255,20 +259,27 @@ func (f *FlagSet) normalizeFlagName(name string) NormalizedName { return n(f, name) } +<<<<<<< HEAD // Output returns the destination for usage and error messages. os.Stderr is returned if // output was not set or was set to nil. func (f *FlagSet) Output() io.Writer { +======= +func (f *FlagSet) out() io.Writer { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if f.output == nil { return os.Stderr } return f.output } +<<<<<<< HEAD // Name returns the name of the flag set. func (f *FlagSet) Name() string { return f.name } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { @@ -365,7 +376,11 @@ func (f *FlagSet) ShorthandLookup(name string) *Flag { } if len(name) > 1 { msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) +<<<<<<< HEAD fmt.Fprintf(f.Output(), msg) +======= + fmt.Fprintf(f.out(), msg) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) } c := name[0] @@ -489,7 +504,11 @@ func (f *FlagSet) Set(name, value string) error { } if flag.Deprecated != "" { +<<<<<<< HEAD fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) +======= + fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -530,7 +549,11 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() +<<<<<<< HEAD fmt.Fprint(f.Output(), usages) +======= + fmt.Fprint(f.out(), usages) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // defaultIsZeroValue returns true if the default value for this flag represents @@ -765,7 +788,11 @@ func PrintDefaults() { // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { +<<<<<<< HEAD fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) +======= + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) f.PrintDefaults() } @@ -851,7 +878,11 @@ func (f *FlagSet) AddFlag(flag *Flag) { _, alreadyThere := f.formal[normalizedFlagName] if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) +<<<<<<< HEAD fmt.Fprintln(f.Output(), msg) +======= + fmt.Fprintln(f.out(), msg) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { @@ -867,7 +898,11 @@ func (f *FlagSet) AddFlag(flag *Flag) { } if len(flag.Shorthand) > 1 { msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) +<<<<<<< HEAD fmt.Fprintf(f.Output(), msg) +======= + fmt.Fprintf(f.out(), msg) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) } if f.shorthands == nil { @@ -877,7 +912,11 @@ func (f *FlagSet) AddFlag(flag *Flag) { used, alreadyThere := f.shorthands[c] if alreadyThere { msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) +<<<<<<< HEAD fmt.Fprintf(f.Output(), msg) +======= + fmt.Fprintf(f.out(), msg) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) } f.shorthands[c] = flag @@ -916,7 +955,11 @@ func VarP(value Value, name, shorthand, usage string) { func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) if f.errorHandling != ContinueOnError { +<<<<<<< HEAD fmt.Fprintln(f.Output(), err) +======= + fmt.Fprintln(f.out(), err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) f.usage() } return err @@ -1067,7 +1110,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } if flag.ShorthandDeprecated != "" { +<<<<<<< HEAD fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) +======= + fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } err = fn(flag, value) diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 06b8bcb572..41c2bce29a 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -16,9 +16,12 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { +<<<<<<< HEAD if s == "" { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index d1ff0a96ba..0ab2b13385 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -31,7 +31,15 @@ func (s *stringArrayValue) Append(val string) error { func (s *stringArrayValue) Replace(val []string) error { out := make([]string, len(val)) for i, d := range val { +<<<<<<< HEAD out[i] = d +======= + var err error + out[i] = d + if err != nil { + return err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } *s.value = out return nil diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go index ebd3cacd47..7f211724b7 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go @@ -3,7 +3,10 @@ package jwtbundle import ( "crypto" "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "os" "sync" @@ -70,7 +73,11 @@ func Parse(trustDomain spiffeid.TrustDomain, bundleBytes []byte) (*Bundle, error bundle := New(trustDomain) for i, key := range jwks.Keys { if err := bundle.AddJWTAuthority(key.KeyID, key.Key); err != nil { +<<<<<<< HEAD return nil, jwtbundleErr.New("error adding authority %d of JWKS: %v", i, errors.Unwrap(err)) +======= + return nil, jwtbundleErr.New("error adding authority %d of JWKS: %v", i, errs.Unwrap(err)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go index 394878e1b2..44c67b1fd3 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go @@ -5,17 +5,29 @@ // // You can create a new bundle for a specific trust domain: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := jwtbundle.New(td) // // Or you can load it from disk: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := jwtbundle.Load(td, "bundle.jwks") // // The bundle can be initialized with JWT authorities: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // var jwtAuthorities map[string]crypto.PublicKey = ... // bundle := jwtbundle.FromJWTAuthorities(td, jwtAuthorities) // diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go index 13b103e24c..9f807acfe2 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go @@ -4,7 +4,10 @@ import ( "crypto" "crypto/x509" "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "os" "sync" @@ -107,7 +110,11 @@ func Parse(trustDomain spiffeid.TrustDomain, bundleBytes []byte) (*Bundle, error bundle.AddX509Authority(key.Certificates[0]) case jwtSVIDUse: if err := bundle.AddJWTAuthority(key.KeyID, key.Key); err != nil { +<<<<<<< HEAD return nil, spiffebundleErr.New("error adding authority %d of JWKS: %v", i, errors.Unwrap(err)) +======= + return nil, spiffebundleErr.New("error adding authority %d of JWKS: %v", i, errs.Unwrap(err)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go index db9dcde31f..02ac3bf510 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go @@ -5,17 +5,29 @@ // // You can create a new bundle for a specific trust domain: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := spiffebundle.New(td) // // Or you can load it from disk: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := spiffebundle.Load(td, "bundle.json") // // The bundle can be initialized with X.509 or JWT authorities: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // var x509Authorities []*x509.Certificate = ... // bundle := spiffebundle.FromX509Authorities(td, x509Authorities) diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go index 889554f822..40869fb265 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go @@ -5,17 +5,29 @@ // // You can create a new bundle for a specific trust domain: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := x509bundle.New(td) // // Or you can load it from disk: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := x509bundle.Load(td, "bundle.pem") // // The bundle can be initialized with X.509 authorities: // +<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") +======= +// td := spiffeid.RequireTrustDomain("example.org") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // var x509Authorities []*x509.Certificate = ... // bundle := x509bundle.FromX509Authorities(td, x509Authorities) // diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go index a0039b114c..147f5feb1b 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go @@ -22,6 +22,7 @@ func GetDefaultAddress() (string, bool) { // a Workload API endpoint exposed as either a Unix // Domain Socket or TCP socket. func ValidateAddress(addr string) error { +<<<<<<< HEAD _, err := TargetFromAddress(addr) return err } @@ -29,6 +30,15 @@ func ValidateAddress(addr string) error { // TargetFromAddress parses the endpoint address and returns a gRPC target // string for dialing. func TargetFromAddress(addr string) (string, error) { +======= + _, err := parseTargetFromStringAddr(addr) + return err +} + +// parseTargetFromStringAddr parses the endpoint address and returns a gRPC target +// string for dialing. +func parseTargetFromStringAddr(addr string) (string, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) u, err := url.Parse(addr) if err != nil { return "", errors.New("workload endpoint socket is not a valid URI: " + err.Error()) diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go index 58738b42e4..9e11950062 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go @@ -24,6 +24,10 @@ func (c *Client) setAddress() error { } var err error +<<<<<<< HEAD c.config.address, err = TargetFromAddress(c.config.address) +======= + c.config.address, err = parseTargetFromStringAddr(c.config.address) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go index 0a14266682..49f8031a51 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go @@ -45,7 +45,11 @@ func (c *Client) setAddress() error { c.config.dialOptions = append(c.config.dialOptions, grpc.WithContextDialer(winio.DialPipeContext)) } +<<<<<<< HEAD c.config.address, err = TargetFromAddress(c.config.address) +======= + c.config.address, err = parseTargetFromStringAddr(c.config.address) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go index ce57a4c210..6dcd54c9b0 100644 --- a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -29,7 +29,11 @@ func run(pass *analysis.Pass) (interface{}, error) { callExpr := node.(*ast.CallExpr) if p, f, ok := getCallExprFunction(callExpr); ok && p == "fmt" && f == "Sprintf" { if err := checkForHostPortConstruction(callExpr); err != nil { +<<<<<<< HEAD pass.Reportf(node.Pos(), "%s", err.Error()) +======= + pass.Reportf(node.Pos(), err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } }) @@ -52,7 +56,11 @@ func getCallExprFunction(callExpr *ast.CallExpr) (pkg string, fn string, result // getStringLiteral returns the value at a position if it's a string literal. func getStringLiteral(args []ast.Expr, pos int) (string, bool) { +<<<<<<< HEAD if len(args) < pos+1 { +======= + if len(args) < pos + 1 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "", false } @@ -72,9 +80,15 @@ func getStringLiteral(args []ast.Expr, pos int) (string, bool) { // essentially scheme://%s:, or scheme://user:pass@%s:. // // Matching requirements: +<<<<<<< HEAD // - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) // - A format string substitution in the host portion, preceded by an optional username/password@ // - A colon indicating a port will be specified +======= +// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// - A format string substitution in the host portion, preceded by an optional username/password@ +// - A colon indicating a port will be specified +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func checkForHostPortConstruction(sprintf *ast.CallExpr) error { fs, ok := getStringLiteral(sprintf.Args, 0) if !ok { @@ -93,4 +107,8 @@ func checkForHostPortConstruction(sprintf *ast.CallExpr) error { } return nil +<<<<<<< HEAD } +======= +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index 3bb5e02ab5..8bb51e9da1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -51,9 +51,12 @@ const ( DefaultImagePullBackOffTimeout = 0 * time.Minute +<<<<<<< HEAD // Default maximum resolution timeout used by the resolution controller before timing out when exceeded DefaultMaximumResolutionTimeout = 1 * time.Minute +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defaultTimeoutMinutesKey = "default-timeout-minutes" defaultServiceAccountKey = "default-service-account" defaultManagedByLabelValueKey = "default-managed-by-label-value" @@ -66,7 +69,10 @@ const ( defaultResolverTypeKey = "default-resolver-type" defaultContainerResourceRequirementsKey = "default-container-resource-requirements" defaultImagePullBackOffTimeout = "default-imagepullbackoff-timeout" +<<<<<<< HEAD defaultMaximumResolutionTimeout = "default-maximum-resolution-timeout" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // DefaultConfig holds all the default configurations for the config. @@ -87,7 +93,10 @@ type Defaults struct { DefaultResolverType string DefaultContainerResourceRequirements map[string]corev1.ResourceRequirements DefaultImagePullBackOffTimeout time.Duration +<<<<<<< HEAD DefaultMaximumResolutionTimeout time.Duration +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GetDefaultsConfigName returns the name of the configmap containing all @@ -119,7 +128,10 @@ func (cfg *Defaults) Equals(other *Defaults) bool { other.DefaultMaxMatrixCombinationsCount == cfg.DefaultMaxMatrixCombinationsCount && other.DefaultResolverType == cfg.DefaultResolverType && other.DefaultImagePullBackOffTimeout == cfg.DefaultImagePullBackOffTimeout && +<<<<<<< HEAD other.DefaultMaximumResolutionTimeout == cfg.DefaultMaximumResolutionTimeout && +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reflect.DeepEqual(other.DefaultForbiddenEnv, cfg.DefaultForbiddenEnv) } @@ -133,13 +145,20 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { DefaultMaxMatrixCombinationsCount: DefaultMaxMatrixCombinationsCount, DefaultResolverType: DefaultResolverTypeValue, DefaultImagePullBackOffTimeout: DefaultImagePullBackOffTimeout, +<<<<<<< HEAD DefaultMaximumResolutionTimeout: DefaultMaximumResolutionTimeout, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if defaultTimeoutMin, ok := cfgMap[defaultTimeoutMinutesKey]; ok { timeout, err := strconv.ParseInt(defaultTimeoutMin, 10, 0) if err != nil { +<<<<<<< HEAD return nil, fmt.Errorf("failed parsing default config %q", defaultTimeoutMinutesKey) +======= + return nil, fmt.Errorf("failed parsing tracing config %q", defaultTimeoutMinutesKey) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tc.DefaultTimeoutMinutes = int(timeout) } @@ -179,7 +198,11 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { if defaultMaxMatrixCombinationsCount, ok := cfgMap[defaultMaxMatrixCombinationsCountKey]; ok { matrixCombinationsCount, err := strconv.ParseInt(defaultMaxMatrixCombinationsCount, 10, 0) if err != nil { +<<<<<<< HEAD return nil, fmt.Errorf("failed parsing default config %q", defaultMaxMatrixCombinationsCountKey) +======= + return nil, fmt.Errorf("failed parsing tracing config %q", defaultMaxMatrixCombinationsCountKey) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tc.DefaultMaxMatrixCombinationsCount = int(matrixCombinationsCount) } @@ -207,11 +230,16 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { if defaultImagePullBackOff, ok := cfgMap[defaultImagePullBackOffTimeout]; ok { timeout, err := time.ParseDuration(defaultImagePullBackOff) if err != nil { +<<<<<<< HEAD return nil, fmt.Errorf("failed parsing default config %q", defaultImagePullBackOffTimeout) +======= + return nil, fmt.Errorf("failed parsing tracing config %q", defaultImagePullBackOffTimeout) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tc.DefaultImagePullBackOffTimeout = timeout } +<<<<<<< HEAD if defaultMaximumResolutionTimeout, ok := cfgMap[defaultMaximumResolutionTimeout]; ok { timeout, err := time.ParseDuration(defaultMaximumResolutionTimeout) if err != nil { @@ -220,6 +248,8 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { tc.DefaultMaximumResolutionTimeout = timeout } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &tc, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go index 286ed06d80..e001f21515 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -146,7 +146,11 @@ func (ps ParamSpecs) ValidateNoDuplicateNames() *apis.FieldError { return errs } +<<<<<<< HEAD // validateParamEnums validates feature flag, duplication and allowed types for Param Enum +======= +// validateParamEnum validates feature flag, duplication and allowed types for Param Enum +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ps ParamSpecs) validateParamEnums(ctx context.Context) *apis.FieldError { var errs *apis.FieldError for _, p := range ps { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go index 66b8daab06..474691bdaa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go @@ -15,7 +15,10 @@ package v1alpha1 import ( "context" +<<<<<<< HEAD "fmt" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "github.com/tektoncd/pipeline/pkg/apis/config" @@ -129,6 +132,7 @@ func validateParameterVariables(ctx context.Context, sas StepActionSpec, params stringParameterNames := sets.NewString(stringParams.GetNames()...) arrayParameterNames := sets.NewString(arrayParams.GetNames()...) errs = errs.Also(v1.ValidateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams)) +<<<<<<< HEAD errs = errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames)) return errs.Also(validateDefaultParameterReferences(params)) } @@ -203,6 +207,9 @@ func validateDefaultParameterReferences(params v1.ParamSpecs) *apis.FieldError { } return errs +======= + return errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index 9844a84435..557ef24d3c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -137,7 +137,11 @@ func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError { return errs } +<<<<<<< HEAD // validateParamEnums validates feature flag, duplication and allowed types for Param Enum +======= +// validateParamEnum validates feature flag, duplication and allowed types for Param Enum +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ps ParamSpecs) validateParamEnums(ctx context.Context) *apis.FieldError { var errs *apis.FieldError for _, p := range ps { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index a4c0725338..ea2ad966b1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -640,7 +640,11 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T resultsNames.Insert(r.Name) } for idx, step := range steps { +<<<<<<< HEAD errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariablesWithDetail(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) +======= + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return errs } @@ -790,7 +794,11 @@ func validateStepVariables(ctx context.Context, step Step, prefix string, vars s errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name") errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image")) errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) +<<<<<<< HEAD errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariablesWithDetail(step.Script, prefix, vars).ViaField("script")) +======= + errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script")) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, cmd := range step.Command { errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index df428c31bf..6958cae036 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -54,6 +54,7 @@ var intIndexRegex = regexp.MustCompile(intIndex) // - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" // - vars: names of known variables func ValidateNoReferencesToUnknownVariables(value, prefix string, vars sets.String) *apis.FieldError { +<<<<<<< HEAD return validateNoReferencesToUnknownVariables(value, prefix, vars, false) } @@ -64,6 +65,8 @@ func ValidateNoReferencesToUnknownVariablesWithDetail(value, prefix string, vars } func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.String, withDetail bool) *apis.FieldError { +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ @@ -74,6 +77,7 @@ func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.Stri for _, v := range vs { v = TrimArrayIndex(v) if !vars.Has(v) { +<<<<<<< HEAD var msg string if withDetail { msg = fmt.Sprintf("non-existent variable `%s` in %q", v, value) @@ -82,6 +86,10 @@ func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.Stri } return &apis.FieldError{ Message: msg, +======= + return &apis.FieldError{ + Message: fmt.Sprintf("non-existent variable in %q", value), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Empty path is required to make the `ViaField`, … work Paths: []string{""}, } diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh index 49f7c72a51..16d8b4a319 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh @@ -44,10 +44,15 @@ fi header "Setting up environment" +<<<<<<< HEAD set -x install_pipeline_crd export SYSTEM_NAMESPACE=tekton-pipelines set +x +======= +install_pipeline_crd +export SYSTEM_NAMESPACE=tekton-pipelines +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) failed=0 diff --git a/vendor/github.com/tektoncd/pipeline/test/featureflags.go b/vendor/github.com/tektoncd/pipeline/test/featureflags.go index be0944eb71..d79115f4a9 100644 --- a/vendor/github.com/tektoncd/pipeline/test/featureflags.go +++ b/vendor/github.com/tektoncd/pipeline/test/featureflags.go @@ -70,7 +70,11 @@ func requireAnyGate(gates map[string]string) func(context.Context, *testing.T, * } } +<<<<<<< HEAD // requireAllGates returns a setup func that will skip the current +======= +// requireAllgates returns a setup func that will skip the current +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // test if all of the feature-flags in the given map don't match // what's in the feature-flags ConfigMap. It will fatally fail // the test if it cannot get the feature-flag configmap. diff --git a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh index ba72739e3d..cccbf807f0 100644 --- a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh @@ -29,6 +29,23 @@ export DISABLE_YAML_LINTING=1 source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scripts/presubmit-tests.sh +<<<<<<< HEAD +======= +function check_go_lint() { + header "Testing if golint has been done" + + # deadline of 5m, and show all the issues + GOFLAGS="-mod=mod" make golangci-lint-check + + if [[ $? != 0 ]]; then + results_banner "Go Lint" 1 + exit 1 + fi + + results_banner "Go Lint" 0 +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) function check_yaml_lint() { header "Testing if yamllint has been done" @@ -63,6 +80,10 @@ EOF } function post_build_tests() { +<<<<<<< HEAD +======= + check_go_lint +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) check_yaml_lint ko_resolve } diff --git a/vendor/github.com/tektoncd/plumbing/.gitignore b/vendor/github.com/tektoncd/plumbing/.gitignore index acab30f0bd..608903f21b 100644 --- a/vendor/github.com/tektoncd/plumbing/.gitignore +++ b/vendor/github.com/tektoncd/plumbing/.gitignore @@ -13,9 +13,12 @@ **/.bin **/.DS_Store +<<<<<<< HEAD # Release **/source.tar.gz tekton/**/.ko.yaml tekton/**/vendor/ -.release-*/ \ No newline at end of file +.release-*/ +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/tektoncd/plumbing/OWNERS b/vendor/github.com/tektoncd/plumbing/OWNERS index 500e269817..7235cb16d0 100644 --- a/vendor/github.com/tektoncd/plumbing/OWNERS +++ b/vendor/github.com/tektoncd/plumbing/OWNERS @@ -3,6 +3,7 @@ approvers: - abayer - afrittoli +<<<<<<< HEAD - AlanGreene - bobcatfish - chitrangpatel @@ -16,3 +17,13 @@ approvers: # Alumni ❤️ # - nikhil-thomas # - dlorenc +======= +- bobcatfish +- dibyom +- dlorenc +- jerop +- nikhil-thomas +- savitaashture +- vdemeester +- wlynch +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/tetafro/godot/.goreleaser.yml b/vendor/github.com/tetafro/godot/.goreleaser.yml index 2f0c2466a5..bfb25164e7 100644 --- a/vendor/github.com/tetafro/godot/.goreleaser.yml +++ b/vendor/github.com/tetafro/godot/.goreleaser.yml @@ -1,5 +1,8 @@ +<<<<<<< HEAD version: 2 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) builds: - dir: ./cmd/godot checksum: diff --git a/vendor/github.com/tetafro/godot/getters.go b/vendor/github.com/tetafro/godot/getters.go index eff836b405..d5b3ba1a53 100644 --- a/vendor/github.com/tetafro/godot/getters.go +++ b/vendor/github.com/tetafro/godot/getters.go @@ -6,7 +6,10 @@ import ( "go/ast" "go/token" "os" +<<<<<<< HEAD "path/filepath" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "regexp" "strings" ) @@ -37,10 +40,16 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { file: file, } +<<<<<<< HEAD +======= + var err error + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Read original file. This is necessary for making a replacements for // inline comments. I couldn't find a better way to get original line // with code and comment without reading the file. Function `Format` // from "go/format" won't help here if the original file is not gofmt-ed. +<<<<<<< HEAD filename := getFilename(fset, file) @@ -51,10 +60,37 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { var err error pf.lines, err = readFile(filename) +======= + pf.lines, err = readFile(file, fset) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("read file: %w", err) } +<<<<<<< HEAD +======= + // Dirty hack. For some cases Go generates temporary files during + // compilation process if there is a cgo block in the source file. Some of + // these temporary files are just copies of original source files but with + // new generated comments at the top. Because of them the content differs + // from AST. For some reason it differs only in golangci-lint. I failed to + // find out the exact description of the process, so let's just skip files + // generated by cgo. + if isCgoGenerated(pf.lines) { + return nil, errUnsuitableInput + } + + // Check consistency to avoid checking slice indexes in each function. + // Note that `PositionFor` is used with `adjusted=false` to skip `//line` + // directives that can set references to other files (e.g. templates) + // instead of the real ones, and break consistency here. + // Issue: https://github.com/tetafro/godot/issues/32 + lastComment := pf.file.Comments[len(pf.file.Comments)-1] + if p := pf.fset.PositionFor(lastComment.End(), false); len(pf.lines) < p.Line { + return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &pf, nil } @@ -231,12 +267,21 @@ func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { } // readFile reads file and returns its lines as strings. +<<<<<<< HEAD func readFile(filename string) ([]string, error) { f, err := os.ReadFile(filepath.Clean(filename)) if err != nil { return nil, err //nolint:wrapcheck } +======= +func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { + fname := fset.File(file.Package) + f, err := os.ReadFile(fname.Name()) + if err != nil { + return nil, err //nolint:wrapcheck + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return strings.Split(string(f), "\n"), nil } @@ -262,6 +307,7 @@ func matchAny(s string, rr []*regexp.Regexp) bool { return false } +<<<<<<< HEAD func getFilename(fset *token.FileSet, file *ast.File) string { filename := fset.PositionFor(file.Pos(), true).Filename if !strings.HasSuffix(filename, ".go") { @@ -269,4 +315,13 @@ func getFilename(fset *token.FileSet, file *ast.File) string { } return filename +======= +func isCgoGenerated(lines []string) bool { + for i := range lines { + if strings.Contains(lines[i], "Code generated by cmd/cgo") { + return true + } + } + return false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go index ae860d728c..0cccd0ab19 100644 --- a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go +++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -23,7 +23,11 @@ var Analyzer = &analysis.Analyzer{ } const ( +<<<<<<< HEAD Doc = "checks whether HTTP response body is closed successfully" +======= + Doc = "bodyclose checks whether HTTP response body is closed successfully" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nethttpPath = "net/http" closeMethod = "Close" @@ -114,6 +118,7 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { if len(*call.Referrers()) == 0 { return true } +<<<<<<< HEAD if instr, ok := b.Instrs[i].(*ssa.Call); ok { // httptest.ResponseRecorder is not needed closing the response body because no-op. @@ -126,6 +131,8 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cRefs := *call.Referrers() for _, cRef := range cRefs { val, ok := r.getResVal(cRef) @@ -161,6 +168,7 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { return r.calledInFunc(f, called) } +<<<<<<< HEAD // Case when calling Close() from struct field or method if s, ok := aref.(*ssa.Store); ok { if f, ok := s.Addr.(*ssa.FieldAddr); ok { @@ -177,6 +185,8 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case *ssa.Call, *ssa.Defer: // Indirect function call // Hacky way to extract CommonCall @@ -223,6 +233,7 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } +<<<<<<< HEAD case *ssa.Phi: // Called in the higher-level block if resRef.Referrers() == nil { return true @@ -251,6 +262,8 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } @@ -263,9 +276,13 @@ func (r *runner) getReqCall(instr ssa.Instruction) (*ssa.Call, bool) { if !ok { return nil, false } +<<<<<<< HEAD callType := call.Type().String() if !strings.Contains(callType, r.resTyp.String()) || strings.Contains(callType, "net/http.ResponseController") { +======= + if !strings.Contains(call.Type().String(), r.resTyp.String()) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, false } return call, true diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go index 127f7efd84..5ccac50fd7 100644 --- a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go +++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -44,10 +44,13 @@ type WrapcheckConfig struct { // list to your config. IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` +<<<<<<< HEAD // ExtraIgnoreSigs defines an additional list of signatures to ignore, on // top of IgnoreSigs. ExtraIgnoreSigs []string `mapstructure:"extraIgnoreSigs" yaml:"extraIgnoreSigs"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IgnoreSigRegexps defines a list of regular expressions which if matched // to the signature of the function call returning the error, will be ignored. This // allows you to specify functions that wrapcheck will not report as @@ -280,7 +283,11 @@ func reportUnwrapped( // Check for ignored signatures fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() +<<<<<<< HEAD if contains(cfg.IgnoreSigs, fnSig) || contains(cfg.ExtraIgnoreSigs, fnSig) { +======= + if contains(cfg.IgnoreSigs, fnSig) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } else if containsMatch(regexpsSig, fnSig) { return diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md index 8bbe7eab68..019836afd5 100644 --- a/vendor/github.com/ultraware/funlen/README.md +++ b/vendor/github.com/ultraware/funlen/README.md @@ -16,6 +16,7 @@ The default values are used internally, but might to be adjusted for your specif Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. +<<<<<<< HEAD ## Configuration Available configuration options: @@ -36,6 +37,8 @@ linters-settings: ignore-comments: false ``` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Exclude for tests golangci-lint offers a way to exclude linters in certain cases. More info can be found here: https://golangci-lint.run/usage/configuration/#issues-configuration. diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go new file mode 100644 index 0000000000..b68ddb926f --- /dev/null +++ b/vendor/github.com/ultraware/funlen/main.go @@ -0,0 +1,124 @@ +package funlen + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" +) + +const ( + defaultLineLimit = 60 + defaultStmtLimit = 40 +) + +// Run runs this linter on the provided code +func Run(file *ast.File, fset *token.FileSet, lineLimit int, stmtLimit int, ignoreComments bool) []Message { + if lineLimit == 0 { + lineLimit = defaultLineLimit + } + if stmtLimit == 0 { + stmtLimit = defaultStmtLimit + } + + cmap := ast.NewCommentMap(fset, file, file.Comments) + + var msgs []Message + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + if stmtLimit > 0 { + if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { + msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit)) + continue + } + } + + if lineLimit > 0 { + if lines := getLines(fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { + msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) + } + } + } + + return msgs +} + +// Message contains a message +type Message struct { + Pos token.Position + Message string +} + +func makeLineMessage(fset *token.FileSet, funcInfo *ast.Ident, lines, lineLimit int) Message { + return Message{ + fset.Position(funcInfo.Pos()), + fmt.Sprintf("Function '%s' is too long (%d > %d)\n", funcInfo.Name, lines, lineLimit), + } +} + +func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit int) Message { + return Message{ + fset.Position(funcInfo.Pos()), + fmt.Sprintf("Function '%s' has too many statements (%d > %d)\n", funcInfo.Name, stmts, stmtLimit), + } +} + +func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { // nolint: interfacer + var lineCount int + var commentCount int + + lineCount = fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 + + if !ignoreComments { + return lineCount + } + + for _, c := range cmap.Comments() { + // If the CommenGroup's lines are inside the function + // count how many comments are in the CommentGroup + if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && + (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { + commentCount += len(c.List) + } + } + + return lineCount - commentCount +} + +func parseStmts(s []ast.Stmt) (total int) { + for _, v := range s { + total++ + switch stmt := v.(type) { + case *ast.BlockStmt: + total += parseStmts(stmt.List) - 1 + case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, + *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + total += parseBodyListStmts(stmt) + case *ast.CaseClause: + total += parseStmts(stmt.Body) + case *ast.AssignStmt: + total += checkInlineFunc(stmt.Rhs[0]) + case *ast.GoStmt: + total += checkInlineFunc(stmt.Call.Fun) + case *ast.DeferStmt: + total += checkInlineFunc(stmt.Call.Fun) + } + } + return +} + +func checkInlineFunc(stmt ast.Expr) int { + if block, ok := stmt.(*ast.FuncLit); ok { + return parseStmts(block.Body.List) + } + return 0 +} + +func parseBodyListStmts(t interface{}) int { + i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() + return parseStmts(i.([]ast.Stmt)) +} diff --git a/vendor/github.com/ultraware/whitespace/README.md b/vendor/github.com/ultraware/whitespace/README.md index 660c13d78c..4a69805049 100644 --- a/vendor/github.com/ultraware/whitespace/README.md +++ b/vendor/github.com/ultraware/whitespace/README.md @@ -4,6 +4,10 @@ Whitespace is a linter that checks for unnecessary newlines at the start and end ## Installation guide +<<<<<<< HEAD To install as a standalone linter, run `go install github.com/ultraware/whitespace/cmd/whitespace@latest`. +======= +To install as a standalone linter, run `go install github.com/ultraware/whitespace`. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Whitespace is also included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. diff --git a/vendor/github.com/ultraware/whitespace/whitespace.go b/vendor/github.com/ultraware/whitespace/whitespace.go index 44e68124c3..bec0531110 100644 --- a/vendor/github.com/ultraware/whitespace/whitespace.go +++ b/vendor/github.com/ultraware/whitespace/whitespace.go @@ -9,8 +9,58 @@ import ( "golang.org/x/tools/go/analysis" ) +<<<<<<< HEAD // Settings contains settings for edge-cases. type Settings struct { +======= +// MessageType describes what should happen to fix the warning. +type MessageType uint8 + +// List of MessageTypes. +const ( + MessageTypeRemove MessageType = iota + 1 + MessageTypeAdd +) + +// RunningMode describes the mode the linter is run in. This can be either +// native or golangci-lint. +type RunningMode uint8 + +const ( + RunningModeNative RunningMode = iota + RunningModeGolangCI +) + +// Message contains a message and diagnostic information. +type Message struct { + // Diagnostic is what position the diagnostic should be put at. This isn't + // always the same as the fix start, f.ex. when we fix trailing newlines we + // put the diagnostic at the right bracket but we fix between the end of the + // last statement and the bracket. + Diagnostic token.Pos + + // FixStart is the span start of the fix. + FixStart token.Pos + + // FixEnd is the span end of the fix. + FixEnd token.Pos + + // LineNumbers represent the actual line numbers in the file. This is set + // when finding the diagnostic to make it easier to suggest fixes in + // golangci-lint. + LineNumbers []int + + // MessageType represents the type of message it is. + MessageType MessageType + + // Message is the diagnostic to show. + Message string +} + +// Settings contains settings for edge-cases. +type Settings struct { + Mode RunningMode +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MultiIf bool MultiFunc bool } @@ -41,16 +91,25 @@ func flags(settings *Settings) flag.FlagSet { return *flags } +<<<<<<< HEAD func Run(pass *analysis.Pass, settings *Settings) { for _, file := range pass.Files { filename := pass.Fset.Position(file.Pos()).Filename +======= +func Run(pass *analysis.Pass, settings *Settings) []Message { + messages := []Message{} + + for _, file := range pass.Files { + filename := pass.Fset.Position(file.Pos()).Filename +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !strings.HasSuffix(filename, ".go") { continue } fileMessages := runFile(file, pass.Fset, *settings) +<<<<<<< HEAD for _, message := range fileMessages { pass.Report(message) } @@ -59,6 +118,38 @@ func Run(pass *analysis.Pass, settings *Settings) { func runFile(file *ast.File, fset *token.FileSet, settings Settings) []analysis.Diagnostic { var messages []analysis.Diagnostic +======= + if settings.Mode == RunningModeGolangCI { + messages = append(messages, fileMessages...) + continue + } + + for _, message := range fileMessages { + pass.Report(analysis.Diagnostic{ + Pos: message.Diagnostic, + Category: "whitespace", + Message: message.Message, + SuggestedFixes: []analysis.SuggestedFix{ + { + TextEdits: []analysis.TextEdit{ + { + Pos: message.FixStart, + End: message.FixEnd, + NewText: []byte("\n"), + }, + }, + }, + }, + }) + } + } + + return messages +} + +func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { + var messages []Message +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, f := range file.Decls { decl, ok := f.(*ast.FuncDecl) @@ -78,7 +169,11 @@ func runFile(file *ast.File, fset *token.FileSet, settings Settings) []analysis. type visitor struct { comments []*ast.CommentGroup fset *token.FileSet +<<<<<<< HEAD messages []analysis.Diagnostic +======= + messages []Message +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) wantNewline map[*ast.BlockStmt]bool settings Settings } @@ -112,6 +207,7 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { startMsg := checkStart(v.fset, opening, first) if wantNewline && startMsg == nil && len(stmt.List) >= 1 { +<<<<<<< HEAD v.messages = append(v.messages, analysis.Diagnostic{ Pos: opening, Message: "multi-line statement should be followed by a newline", @@ -122,6 +218,15 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { NewText: []byte("\n"), }}, }}, +======= + v.messages = append(v.messages, Message{ + Diagnostic: opening, + FixStart: stmt.List[0].Pos(), + FixEnd: stmt.List[0].Pos(), + LineNumbers: []int{v.fset.PositionFor(stmt.List[0].Pos(), false).Line}, + MessageType: MessageTypeAdd, + Message: "multi-line statement should be followed by a newline", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } else if !wantNewline && startMsg != nil { v.messages = append(v.messages, *startMsg) @@ -144,7 +249,11 @@ func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { } func posLine(fset *token.FileSet, pos token.Pos) int { +<<<<<<< HEAD return fset.Position(pos).Line +======= + return fset.PositionFor(pos, false).Line +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.BlockStmt) (token.Pos, ast.Node, ast.Node) { @@ -191,12 +300,17 @@ func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.B return openingPos, first, last } +<<<<<<< HEAD func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *analysis.Diagnostic { +======= +func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if first == nil { return nil } if posLine(fset, start)+1 < posLine(fset, first.Pos()) { +<<<<<<< HEAD return &analysis.Diagnostic{ Pos: start, Message: "unnecessary leading newline", @@ -207,18 +321,32 @@ func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *analysis. NewText: []byte("\n"), }}, }}, +======= + return &Message{ + Diagnostic: start, + FixStart: start, + FixEnd: first.Pos(), + LineNumbers: linesBetween(fset, start, first.Pos()), + MessageType: MessageTypeRemove, + Message: "unnecessary leading newline", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil } +<<<<<<< HEAD func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *analysis.Diagnostic { +======= +func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if last == nil { return nil } if posLine(fset, end)-1 > posLine(fset, last.End()) { +<<<<<<< HEAD return &analysis.Diagnostic{ Pos: end, Message: "unnecessary trailing newline", @@ -229,8 +357,32 @@ func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *analysis.Diagn NewText: []byte("\n"), }}, }}, +======= + return &Message{ + Diagnostic: end, + FixStart: last.End(), + FixEnd: end, + LineNumbers: linesBetween(fset, last.End(), end), + MessageType: MessageTypeRemove, + Message: "unnecessary trailing newline", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil } +<<<<<<< HEAD +======= + +func linesBetween(fset *token.FileSet, a, b token.Pos) []int { + lines := []int{} + aPosition := fset.PositionFor(a, false) + bPosition := fset.PositionFor(b, false) + + for i := aPosition.Line + 1; i < bPosition.Line; i++ { + lines = append(lines, i) + } + + return lines +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/uudashr/gocognit/README.md b/vendor/github.com/uudashr/gocognit/README.md index 415e81e739..925b9ce33a 100644 --- a/vendor/github.com/uudashr/gocognit/README.md +++ b/vendor/github.com/uudashr/gocognit/README.md @@ -147,14 +147,24 @@ The following structures receive a nesting increment commensurate with their nes ## Installation +<<<<<<< HEAD ```shell go install github.com/uudashr/gocognit/cmd/gocognit@latest +======= +``` +$ go install github.com/uudashr/gocognit/cmd/gocognit@latest +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` or +<<<<<<< HEAD ```shell go get github.com/uudashr/gocognit/cmd/gocognit +======= +``` +$ go get github.com/uudashr/gocognit/cmd/gocognit +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` ## Usage @@ -169,6 +179,7 @@ Usage: Flags: +<<<<<<< HEAD -over N show functions with complexity > N only and return exit code 1 if the output is non-empty -top N show the top N most complex functions only @@ -180,6 +191,16 @@ Flags: -f format string the format to use (default "{{.Complexity}} {{.PkgName}} {{.FuncName}} {{.Pos}}") -ignore expr ignore files matching the given regexp +======= + -over N show functions with complexity > N only + and return exit code 1 if the output is non-empty + -top N show the top N most complex functions only + -avg show the average complexity over all functions, + not depending on whether -over or -top are set + -json encode the output as JSON + -f format string the format to use + (default "{{.PkgName}}.{{.FuncName}}:{{.Complexity}}:{{.Pos}}") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) The (default) output fields for each line are: @@ -194,6 +215,7 @@ or equal to The struct being passed to the template is: type Stat struct { +<<<<<<< HEAD PkgName string FuncName string Complexity int @@ -212,6 +234,12 @@ The struct being passed to the template is: Offset int Line int Column int +======= + PkgName string + FuncName string + Complexity int + Pos token.Position +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ``` @@ -240,6 +268,7 @@ func IgnoreMe() { } ``` +<<<<<<< HEAD ## Diagnostic To understand how the complexity are calculated, we can enable the diagnostic by using `-d` flag. @@ -310,6 +339,8 @@ It will show the diagnostic output in JSON format ``` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Related project - [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. - [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. diff --git a/vendor/github.com/uudashr/gocognit/doc.go b/vendor/github.com/uudashr/gocognit/doc.go index 797b192282..036fbe86fe 100644 --- a/vendor/github.com/uudashr/gocognit/doc.go +++ b/vendor/github.com/uudashr/gocognit/doc.go @@ -1,3 +1,7 @@ +<<<<<<< HEAD // Package gocognit defines Analyzer other utilities to checks and calculate // the complexity of function based on "cognitive complexity" methods. +======= +// Package gocognit defines Analyzer other utilities to checks and calculate the complexity of function based on "cognitive complexity" methods. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gocognit diff --git a/vendor/github.com/uudashr/gocognit/gocognit.go b/vendor/github.com/uudashr/gocognit/gocognit.go index e51ee2a042..d9759a8d48 100644 --- a/vendor/github.com/uudashr/gocognit/gocognit.go +++ b/vendor/github.com/uudashr/gocognit/gocognit.go @@ -4,7 +4,10 @@ import ( "fmt" "go/ast" "go/token" +<<<<<<< HEAD "strconv" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" @@ -13,6 +16,7 @@ import ( // Stat is statistic of the complexity. type Stat struct { +<<<<<<< HEAD PkgName string FuncName string Complexity int @@ -65,6 +69,12 @@ func (d Diagnostic) String() string { } return fmt.Sprintf("+%d (nesting=%d)", d.Inc, d.Nesting) +======= + PkgName string + FuncName string + Complexity int + Pos token.Position +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s Stat) String() string { @@ -73,11 +83,14 @@ func (s Stat) String() string { // ComplexityStats builds the complexity statistics. func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { +<<<<<<< HEAD return ComplexityStatsWithDiagnostic(f, fset, stats, false) } // ComplexityStatsWithDiagnostic builds the complexity statistics with diagnostic. func ComplexityStatsWithDiagnostic(f *ast.File, fset *token.FileSet, stats []Stat, enableDiagnostics bool) []Stat { +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, decl := range f.Decls { if fn, ok := decl.(*ast.FuncDecl); ok { d := parseDirective(fn.Doc) @@ -85,6 +98,7 @@ func ComplexityStatsWithDiagnostic(f *ast.File, fset *token.FileSet, stats []Sta continue } +<<<<<<< HEAD res := ScanComplexity(fn, enableDiagnostics) stats = append(stats, Stat{ @@ -122,6 +136,19 @@ func generateDiagnostics(fset *token.FileSet, diags []diagnostic) []Diagnostic { return out } +======= + stats = append(stats, Stat{ + PkgName: f.Name.Name, + FuncName: funcName(fn), + Complexity: Complexity(fn), + Pos: fset.Position(fn.Pos()), + }) + } + } + return stats +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type directive struct { Ignore bool } @@ -146,16 +173,23 @@ func funcName(fn *ast.FuncDecl) string { if fn.Recv != nil { if fn.Recv.NumFields() > 0 { typ := fn.Recv.List[0].Type +<<<<<<< HEAD return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) } } +======= + return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fn.Name.Name } // Complexity calculates the cognitive complexity of a function. func Complexity(fn *ast.FuncDecl) int { +<<<<<<< HEAD res := ScanComplexity(fn, false) return res.Complexity @@ -186,6 +220,14 @@ type diagnostic struct { Nesting int Text string Pos token.Pos +======= + v := complexityVisitor{ + name: fn.Name, + } + + ast.Walk(&v, fn) + return v.complexity +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type complexityVisitor struct { @@ -194,9 +236,12 @@ type complexityVisitor struct { nesting int elseNodes map[ast.Node]bool calculatedExprs map[ast.Expr]bool +<<<<<<< HEAD diagnosticsEnabled bool diagnostics []diagnostic +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (v *complexityVisitor) incNesting() { @@ -207,6 +252,7 @@ func (v *complexityVisitor) decNesting() { v.nesting-- } +<<<<<<< HEAD func (v *complexityVisitor) incComplexity(text string, pos token.Pos) { v.complexity++ @@ -234,6 +280,14 @@ func (v *complexityVisitor) nestIncComplexity(text string, pos token.Pos) { Text: text, Pos: pos, }) +======= +func (v *complexityVisitor) incComplexity() { + v.complexity++ +} + +func (v *complexityVisitor) nestIncComplexity() { + v.complexity += (v.nesting + 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (v *complexityVisitor) markAsElseNode(n ast.Node) { @@ -292,12 +346,19 @@ func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { case *ast.CallExpr: return v.visitCallExpr(n) } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { +<<<<<<< HEAD v.incIfComplexity(n, "if", n.Pos()) +======= + v.incIfComplexity(n) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -305,12 +366,26 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { ast.Walk(v, n.Cond) +<<<<<<< HEAD v.incNesting() ast.Walk(v, n.Body) v.decNesting() if _, ok := n.Else.(*ast.BlockStmt); ok { v.incComplexity("else", n.Else.Pos()) +======= + pure := !v.markedAsElseNode(n) // pure `if` statement, not an `else if` + if pure { + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() + } else { + ast.Walk(v, n.Body) + } + + if _, ok := n.Else.(*ast.BlockStmt); ok { + v.incComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ast.Walk(v, n.Else) } else if _, ok := n.Else.(*ast.IfStmt); ok { @@ -322,7 +397,11 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { } func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { +<<<<<<< HEAD v.nestIncComplexity("switch", n.Pos()) +======= + v.nestIncComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -335,12 +414,19 @@ func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visitor { +<<<<<<< HEAD v.nestIncComplexity("switch", n.Pos()) +======= + v.nestIncComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -353,22 +439,36 @@ func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visit v.incNesting() ast.Walk(v, n.Body) v.decNesting() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitSelectStmt(n *ast.SelectStmt) ast.Visitor { +<<<<<<< HEAD v.nestIncComplexity("select", n.Pos()) +======= + v.nestIncComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v.incNesting() ast.Walk(v, n.Body) v.decNesting() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { +<<<<<<< HEAD v.nestIncComplexity("for", n.Pos()) +======= + v.nestIncComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -385,12 +485,19 @@ func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { +<<<<<<< HEAD v.nestIncComplexity("for", n.Pos()) +======= + v.nestIncComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Key; n != nil { ast.Walk(v, n) @@ -405,7 +512,10 @@ func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -415,15 +525,23 @@ func (v *complexityVisitor) visitFuncLit(n *ast.FuncLit) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { if n.Label != nil { +<<<<<<< HEAD v.incComplexity(n.Tok.String(), n.Pos()) } +======= + v.incComplexity() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } @@ -434,12 +552,19 @@ func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { var lastOp token.Token for _, op := range ops { if lastOp != op { +<<<<<<< HEAD v.incComplexity(op.String(), n.OpPos) +======= + v.incComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lastOp = op } } } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } @@ -448,38 +573,61 @@ func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { obj, name := callIdent.Obj, callIdent.Name if obj == v.name.Obj && name == v.name.Name { // called by same function directly (direct recursion) +<<<<<<< HEAD v.incComplexity(name, n.Pos()) } } +======= + v.incComplexity() + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { v.markCalculated(exp) +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if exp, ok := exp.(*ast.BinaryExpr); ok { return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) } return nil } +<<<<<<< HEAD func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt, text string, pos token.Pos) { if v.markedAsElseNode(n) { v.incComplexity(text, pos) } else { v.nestIncComplexity(text, pos) +======= +func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { + if v.markedAsElseNode(n) { + v.incComplexity() + } else { + v.nestIncComplexity() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { var out []token.Token out = append(out, x...) +<<<<<<< HEAD if isBinaryLogicalOp(op) { out = append(out, op) } +======= + if isBinaryLogicalOp(op) { + out = append(out, op) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) out = append(out, y...) return out } diff --git a/vendor/github.com/uudashr/gocognit/recv.go b/vendor/github.com/uudashr/gocognit/recv.go index eaf3c9762d..6b7a6b3479 100644 --- a/vendor/github.com/uudashr/gocognit/recv.go +++ b/vendor/github.com/uudashr/gocognit/recv.go @@ -20,6 +20,9 @@ func recvString(recv ast.Expr) string { case *ast.IndexListExpr: return recvString(t.X) } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "BADRECV" } diff --git a/vendor/github.com/uudashr/gocognit/recv_pre118.go b/vendor/github.com/uudashr/gocognit/recv_pre118.go index a47ba1bd5a..0bbc62434f 100644 --- a/vendor/github.com/uudashr/gocognit/recv_pre118.go +++ b/vendor/github.com/uudashr/gocognit/recv_pre118.go @@ -16,6 +16,9 @@ func recvString(recv ast.Expr) string { case *ast.StarExpr: return "*" + recvString(t.X) } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "BADRECV" } diff --git a/vendor/github.com/uudashr/iface/opaque/opaque.go b/vendor/github.com/uudashr/iface/opaque/opaque.go index 59e406322f..e0b53f43b2 100644 --- a/vendor/github.com/uudashr/iface/opaque/opaque.go +++ b/vendor/github.com/uudashr/iface/opaque/opaque.go @@ -274,11 +274,17 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { stmtTypName = removePkgPrefix(stmtTypName) } +<<<<<<< HEAD msg := fmt.Sprintf("%s function return %s interface at the %s result, abstract a single concrete implementation of %s", +======= + pass.Reportf(result.Pos(), + "%s function return %s interface at the %s result, abstract a single concrete implementation of %s", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) funcDecl.Name.Name, retTypeName, positionStr(currentIdx), stmtTypName) +<<<<<<< HEAD pass.Report(analysis.Diagnostic{ Pos: result.Pos(), @@ -296,6 +302,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { }, }, }) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }) diff --git a/vendor/github.com/uudashr/iface/unused/unused.go b/vendor/github.com/uudashr/iface/unused/unused.go index 9c301ae673..b4080adb0c 100644 --- a/vendor/github.com/uudashr/iface/unused/unused.go +++ b/vendor/github.com/uudashr/iface/unused/unused.go @@ -48,8 +48,12 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Collect all interface type declarations +<<<<<<< HEAD ifaceDecls := make(map[string]*ast.TypeSpec) genDecls := make(map[string]*ast.GenDecl) // ifaceName -> GenDecl +======= + ifaceDecls := make(map[string]token.Pos) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nodeFilter := []ast.Node{ (*ast.GenDecl)(nil), @@ -81,7 +85,11 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { _, ok = ts.Type.(*ast.InterfaceType) if !ok { +<<<<<<< HEAD continue +======= + return +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if r.debug { @@ -94,8 +102,12 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { continue } +<<<<<<< HEAD ifaceDecls[ts.Name.Name] = ts genDecls[ts.Name.Name] = decl +======= + ifaceDecls[ts.Name.Name] = ts.Pos() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }) @@ -119,24 +131,33 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { return } +<<<<<<< HEAD ts, ok := ifaceDecls[ident.Name] if !ok { return } if ts.Pos() == ident.Pos() { +======= + pos := ifaceDecls[ident.Name] + if pos == ident.Pos() { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The identifier is the interface type declaration return } delete(ifaceDecls, ident.Name) +<<<<<<< HEAD delete(genDecls, ident.Name) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if r.debug { fmt.Printf("Package %s %s\n", pass.Pkg.Path(), pass.Pkg.Name()) } +<<<<<<< HEAD for name, ts := range ifaceDecls { decl := genDecls[name] @@ -164,6 +185,10 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { }, }, }) +======= + for name, pos := range ifaceDecls { + pass.Reportf(pos, "interface %s is declared but not used within the package", name) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil, nil diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 6a6b3e0182..3b6eaab29c 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -56,11 +56,14 @@ func (tr *Reader) RawBytes() []byte { } +<<<<<<< HEAD // ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next() func (tr *Reader) ExpectedPadding() int64 { return tr.pad } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewReader creates a new Reader reading from r. func NewReader(r io.Reader) *Reader { return &Reader{r: r, curr: ®FileReader{r, 0}} diff --git a/vendor/github.com/xanzy/go-gitlab/.gitignore b/vendor/github.com/xanzy/go-gitlab/.gitignore new file mode 100644 index 0000000000..76a9f4df79 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/.gitignore @@ -0,0 +1,33 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# IDE specific files and folders +.idea +*.iml +*.swp +*.swo + +# vendor +vendor diff --git a/vendor/github.com/xanzy/go-gitlab/.golangci.yml b/vendor/github.com/xanzy/go-gitlab/.golangci.yml new file mode 100644 index 0000000000..7c05feeefc --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/.golangci.yml @@ -0,0 +1,56 @@ +# This file contains all available configuration options +# with their default values. + +# Options for analysis running +run: + concurrency: 4 + timeout: 10m + issues-exit-code: 1 + tests: true + +# Output configuration options +output: + formats: + - format: line-number + +# All available settings of specific linters +linters-settings: + misspell: + locale: US + ignore-words: + - noteable + +linters: + enable: + - asciicheck + - dogsled + - errorlint + - exportloopref + - goconst + - gosimple + - govet + - ineffassign + - megacheck + - misspell + - nakedret + - nolintlint + - staticcheck + - typecheck + - unconvert + - unused + - whitespace + disable: + - errcheck + disable-all: false + fast: false + +issues: + # List of regexps of issue texts to exclude. + exclude: + - "^.*, make it a constant$" + + # Maximum issues count per one linter (set to 0 to disable) + max-issues-per-linter: 0 + + # Maximum count of issues with the same text (set to 0 to disable) + max-same-issues: 0 diff --git a/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md b/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md new file mode 100644 index 0000000000..32bd822745 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md @@ -0,0 +1,53 @@ +# How to Contribute + +We want to make contributing to this project as easy as possible. + +## Reporting Issues + +If you have an issue, please report it on the [issue tracker](https://github.com/xanzy/go-gitlab/issues). + +When you are up for writing a PR to solve the issue you encountered, it's not +needed to first open a separate issue. In that case only opening a PR with a +description of the issue you are trying to solve is just fine. + +## Contributing Code + +Pull requests are always welcome. When in doubt if your contribution fits within +the rest of the project, feel free to first open an issue to discuss your idea. + +This is not needed when fixing a bug or adding an enhancement, as long as the +enhancement you are trying to add can be found in the public GitLab API docs as +this project only supports what is in the public API docs. + +## Coding style + +We try to follow the Go best practices, where it makes sense, and use +[`gofumpt`](https://github.com/mvdan/gofumpt) to format code in this project. +As a general rule of thumb we prefer to keep line width for comments below 80 +chars and for code (where possible and sensible) below 100 chars. + +Before making a PR, please look at the rest this package and try to make sure +your contribution is consistent with the rest of the coding style. + +New struct field or methods should be placed (as much as possible) in the same +order as the ordering used in the public API docs. The idea is that this makes it +easier to find things. + +### Setting up your local development environment to Contribute to `go-gitlab` + +1. [Fork](https://github.com/xanzy/go-gitlab/fork), then clone the repository. + ```sh + git clone https://github.com//go-gitlab.git + # or via ssh + git clone git@github.com:/go-gitlab.git + ``` +1. Install dependencies: + ```sh + make setup + ``` +1. Make your changes on your feature branch +1. Run the tests and `gofumpt` + ```sh + make test && make fmt + ``` +1. Open up your pull request diff --git a/vendor/github.com/xanzy/go-gitlab/LICENSE b/vendor/github.com/xanzy/go-gitlab/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xanzy/go-gitlab/Makefile b/vendor/github.com/xanzy/go-gitlab/Makefile new file mode 100644 index 0000000000..749cb2953e --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/Makefile @@ -0,0 +1,22 @@ +##@ General + +.PHONY: help +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +fmt: ## Format code + @gofumpt -l -w . + +lint: ## Run linter + @golangci-lint run + +setup: ## Setup your local environment + go mod tidy + @go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + @go install mvdan.cc/gofumpt@latest +.PHONY: setup + +test: ## Run tests + go test ./... -race diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md new file mode 100644 index 0000000000..fa5a049a3b --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/README.md @@ -0,0 +1,208 @@ +# go-gitlab + +A GitLab API client enabling Go programs to interact with GitLab in a simple and uniform way + +[![Build Status](https://github.com/xanzy/go-gitlab/workflows/Lint%20and%20Test/badge.svg)](https://github.com/xanzy/go-gitlab/actions?workflow=Lint%20and%20Test) +[![Sourcegraph](https://sourcegraph.com/github.com/xanzy/go-gitlab/-/badge.svg)](https://sourcegraph.com/github.com/xanzy/go-gitlab?badge) +[![GoDoc](https://godoc.org/github.com/xanzy/go-gitlab?status.svg)](https://godoc.org/github.com/xanzy/go-gitlab) +[![Go Report Card](https://goreportcard.com/badge/github.com/xanzy/go-gitlab)](https://goreportcard.com/report/github.com/xanzy/go-gitlab) +[![Coverage](https://github.com/xanzy/go-gitlab/wiki/coverage.svg)](https://raw.githack.com/wiki/xanzy/go-gitlab/coverage.html) + +## NOTE + +Release v0.6.0 (released on 25-08-2017) no longer supports the older V3 GitLab API. If +you need V3 support, please use the `f-api-v3` branch. This release contains some backwards +incompatible changes that were needed to fully support the V4 GitLab API. + +## Coverage + +This API client package covers most of the existing GitLab API calls and is updated regularly +to add new and/or missing endpoints. Currently, the following services are supported: + +- [x] Applications +- [x] Award Emojis +- [x] Branches +- [x] Broadcast Messages +- [x] Commits +- [x] Container Registry +- [x] Custom Attributes +- [x] Deploy Keys +- [x] Deployments +- [x] Discussions (threaded comments) +- [x] Environments +- [x] Epic Issues +- [x] Epics +- [x] Error Tracking +- [x] Events +- [x] Feature Flags +- [x] Geo Nodes +- [x] Generic Packages +- [x] GitLab CI Config Templates +- [x] Gitignores Templates +- [x] Group Access Requests +- [x] Group Issue Boards +- [x] Group Members +- [x] Group Milestones +- [x] Group Wikis +- [x] Group-Level Variables +- [x] Groups +- [x] Instance Clusters +- [x] Invites +- [x] Issue Boards +- [x] Issues +- [x] Jobs +- [x] Keys +- [x] Labels +- [x] License +- [x] Markdown +- [x] Merge Request Approvals +- [x] Merge Requests +- [x] Namespaces +- [x] Notes (comments) +- [x] Notification Settings +- [x] Open Source License Templates +- [x] Packages +- [x] Pages +- [x] Pages Domains +- [x] Personal Access Tokens +- [x] Pipeline Schedules +- [x] Pipeline Triggers +- [x] Pipelines +- [x] Plan limits +- [x] Project Access Requests +- [x] Project Badges +- [x] Project Clusters +- [x] Project Import/export +- [x] Project Members +- [x] Project Milestones +- [x] Project Repository Storage Moves +- [x] Project Snippets +- [x] Project Vulnerabilities +- [x] Project-Level Variables +- [x] Projects (including setting Webhooks) +- [x] Protected Branches +- [x] Protected Environments +- [x] Protected Tags +- [x] Repositories +- [x] Repository Files +- [x] Repository Submodules +- [x] Runners +- [x] Search +- [x] Services +- [x] Settings +- [x] Sidekiq Metrics +- [x] System Hooks +- [x] Tags +- [x] Todos +- [x] Topics +- [x] Users +- [x] Validate CI Configuration +- [x] Version +- [x] Wikis + +## Usage + +```go +import "github.com/xanzy/go-gitlab" +``` + +Construct a new GitLab client, then use the various services on the client to +access different parts of the GitLab API. For example, to list all +users: + +```go +git, err := gitlab.NewClient("yourtokengoeshere") +if err != nil { + log.Fatalf("Failed to create client: %v", err) +} +users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) +``` + +There are a few `With...` option functions that can be used to customize +the API client. For example, to set a custom base URL: + +```go +git, err := gitlab.NewClient("yourtokengoeshere", gitlab.WithBaseURL("https://git.mydomain.com/api/v4")) +if err != nil { + log.Fatalf("Failed to create client: %v", err) +} +users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) +``` + +Some API methods have optional parameters that can be passed. For example, +to list all projects for user "svanharmelen": + +```go +git := gitlab.NewClient("yourtokengoeshere") +opt := &gitlab.ListProjectsOptions{Search: gitlab.Ptr("svanharmelen")} +projects, _, err := git.Projects.ListProjects(opt) +``` + +### Examples + +The [examples](https://github.com/xanzy/go-gitlab/tree/master/examples) directory +contains a couple for clear examples, of which one is partially listed here as well: + +```go +package main + +import ( + "log" + + "github.com/xanzy/go-gitlab" +) + +func main() { + git, err := gitlab.NewClient("yourtokengoeshere") + if err != nil { + log.Fatalf("Failed to create client: %v", err) + } + + // Create new project + p := &gitlab.CreateProjectOptions{ + Name: gitlab.Ptr("My Project"), + Description: gitlab.Ptr("Just a test project to play with"), + MergeRequestsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), + SnippetsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), + Visibility: gitlab.Ptr(gitlab.PublicVisibility), + } + project, _, err := git.Projects.CreateProject(p) + if err != nil { + log.Fatal(err) + } + + // Add a new snippet + s := &gitlab.CreateProjectSnippetOptions{ + Title: gitlab.Ptr("Dummy Snippet"), + FileName: gitlab.Ptr("snippet.go"), + Content: gitlab.Ptr("package main...."), + Visibility: gitlab.Ptr(gitlab.PublicVisibility), + } + _, _, err = git.ProjectSnippets.CreateSnippet(project.ID, s) + if err != nil { + log.Fatal(err) + } +} +``` + +For complete usage of go-gitlab, see the full [package docs](https://godoc.org/github.com/xanzy/go-gitlab). + +## ToDo + +- The biggest thing this package still needs is tests :disappointed: + +## Issues + +- If you have an issue: report it on the [issue tracker](https://github.com/xanzy/go-gitlab/issues) + +## Author + +Sander van Harmelen () + +## Contributing + +Contributions are always welcome. For more information, check out the [contributing guide](https://github.com/xanzy/go-gitlab/blob/master/CONTRIBUTING.md) + +## License + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/vendor/github.com/xanzy/go-gitlab/access_requests.go b/vendor/github.com/xanzy/go-gitlab/access_requests.go new file mode 100644 index 0000000000..2e07187e58 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/access_requests.go @@ -0,0 +1,253 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// AccessRequest represents a access request for a group or project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html +type AccessRequest struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + RequestedAt *time.Time `json:"requested_at"` + AccessLevel AccessLevelValue `json:"access_level"` +} + +// AccessRequestsService handles communication with the project/group +// access requests related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/access_requests.html +type AccessRequestsService struct { + client *Client +} + +// ListAccessRequestsOptions represents the available +// ListProjectAccessRequests() or ListGroupAccessRequests() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project +type ListAccessRequestsOptions ListOptions + +// ListProjectAccessRequests gets a list of access requests +// viewable by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project +func (s *AccessRequestsService) ListProjectAccessRequests(pid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ars []*AccessRequest + resp, err := s.client.Do(req, &ars) + if err != nil { + return nil, resp, err + } + + return ars, resp, nil +} + +// ListGroupAccessRequests gets a list of access requests +// viewable by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project +func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ars []*AccessRequest + resp, err := s.client.Do(req, &ars) + if err != nil { + return nil, resp, err + } + + return ars, resp, nil +} + +// RequestProjectAccess requests access for the authenticated user +// to a group or project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#request-access-to-a-group-or-project +func (s *AccessRequestsService) RequestProjectAccess(pid interface{}, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + ar := new(AccessRequest) + resp, err := s.client.Do(req, ar) + if err != nil { + return nil, resp, err + } + + return ar, resp, nil +} + +// RequestGroupAccess requests access for the authenticated user +// to a group or project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#request-access-to-a-group-or-project +func (s *AccessRequestsService) RequestGroupAccess(gid interface{}, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + ar := new(AccessRequest) + resp, err := s.client.Do(req, ar) + if err != nil { + return nil, resp, err + } + + return ar, resp, nil +} + +// ApproveAccessRequestOptions represents the available +// ApproveProjectAccessRequest() and ApproveGroupAccessRequest() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#approve-an-access-request +type ApproveAccessRequestOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` +} + +// ApproveProjectAccessRequest approves an access request for the given user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#approve-an-access-request +func (s *AccessRequestsService) ApproveProjectAccessRequest(pid interface{}, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_requests/%d/approve", PathEscape(project), user) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ar := new(AccessRequest) + resp, err := s.client.Do(req, ar) + if err != nil { + return nil, resp, err + } + + return ar, resp, nil +} + +// ApproveGroupAccessRequest approves an access request for the given user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#approve-an-access-request +func (s *AccessRequestsService) ApproveGroupAccessRequest(gid interface{}, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_requests/%d/approve", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ar := new(AccessRequest) + resp, err := s.client.Do(req, ar) + if err != nil { + return nil, resp, err + } + + return ar, resp, nil +} + +// DenyProjectAccessRequest denies an access request for the given user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#deny-an-access-request +func (s *AccessRequestsService) DenyProjectAccessRequest(pid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/access_requests/%d", PathEscape(project), user) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DenyGroupAccessRequest denies an access request for the given user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/access_requests.html#deny-an-access-request +func (s *AccessRequestsService) DenyGroupAccessRequest(gid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/access_requests/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/appearance.go b/vendor/github.com/xanzy/go-gitlab/appearance.go new file mode 100644 index 0000000000..f21893c0e0 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/appearance.go @@ -0,0 +1,110 @@ +// +// Copyright 2023, 徐晓伟 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import "net/http" + +// AppearanceService handles communication with appearance of the Gitlab API. +// +// Gitlab API docs : https://docs.gitlab.com/ee/api/appearance.html +type AppearanceService struct { + client *Client +} + +// Appearance represents a GitLab appearance. +// +// Gitlab API docs : https://docs.gitlab.com/ee/api/appearance.html +type Appearance struct { + Title string `json:"title"` + Description string `json:"description"` + PWAName string `json:"pwa_name"` + PWAShortName string `json:"pwa_short_name"` + PWADescription string `json:"pwa_description"` + PWAIcon string `json:"pwa_icon"` + Logo string `json:"logo"` + HeaderLogo string `json:"header_logo"` + Favicon string `json:"favicon"` + NewProjectGuidelines string `json:"new_project_guidelines"` + ProfileImageGuidelines string `json:"profile_image_guidelines"` + HeaderMessage string `json:"header_message"` + FooterMessage string `json:"footer_message"` + MessageBackgroundColor string `json:"message_background_color"` + MessageFontColor string `json:"message_font_color"` + EmailHeaderAndFooterEnabled bool `json:"email_header_and_footer_enabled"` +} + +// GetAppearance gets the current appearance configuration of the GitLab instance. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/appearance.html#get-current-appearance-configuration +func (s *AppearanceService) GetAppearance(options ...RequestOptionFunc) (*Appearance, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "application/appearance", nil, options) + if err != nil { + return nil, nil, err + } + + as := new(Appearance) + resp, err := s.client.Do(req, as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// ChangeAppearanceOptions represents the available ChangeAppearance() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/appearance.html#change-appearance-configuration +type ChangeAppearanceOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + PWAName *string `url:"pwa_name,omitempty" json:"pwa_name,omitempty"` + PWAShortName *string `url:"pwa_short_name,omitempty" json:"pwa_short_name,omitempty"` + PWADescription *string `url:"pwa_description,omitempty" json:"pwa_description,omitempty"` + PWAIcon *string `url:"pwa_icon,omitempty" json:"pwa_icon,omitempty"` + Logo *string `url:"logo,omitempty" json:"logo,omitempty"` + HeaderLogo *string `url:"header_logo,omitempty" json:"header_logo,omitempty"` + Favicon *string `url:"favicon,omitempty" json:"favicon,omitempty"` + NewProjectGuidelines *string `url:"new_project_guidelines,omitempty" json:"new_project_guidelines,omitempty"` + ProfileImageGuidelines *string `url:"profile_image_guidelines,omitempty" json:"profile_image_guidelines,omitempty"` + HeaderMessage *string `url:"header_message,omitempty" json:"header_message,omitempty"` + FooterMessage *string `url:"footer_message,omitempty" json:"footer_message,omitempty"` + MessageBackgroundColor *string `url:"message_background_color,omitempty" json:"message_background_color,omitempty"` + MessageFontColor *string `url:"message_font_color,omitempty" json:"message_font_color,omitempty"` + EmailHeaderAndFooterEnabled *bool `url:"email_header_and_footer_enabled,omitempty" json:"email_header_and_footer_enabled,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` +} + +// ChangeAppearance changes the appearance configuration. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/appearance.html#change-appearance-configuration +func (s *AppearanceService) ChangeAppearance(opt *ChangeAppearanceOptions, options ...RequestOptionFunc) (*Appearance, *Response, error) { + req, err := s.client.NewRequest(http.MethodPut, "application/appearance", opt, options) + if err != nil { + return nil, nil, err + } + + as := new(Appearance) + resp, err := s.client.Do(req, as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/applications.go b/vendor/github.com/xanzy/go-gitlab/applications.go new file mode 100644 index 0000000000..5335f6cffc --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/applications.go @@ -0,0 +1,106 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ApplicationsService handles communication with administrables applications +// of the Gitlab API. +// +// Gitlab API docs : https://docs.gitlab.com/ee/api/applications.html +type ApplicationsService struct { + client *Client +} + +// Application represents a GitLab application +type Application struct { + ID int `json:"id"` + ApplicationID string `json:"application_id"` + ApplicationName string `json:"application_name"` + Secret string `json:"secret"` + CallbackURL string `json:"callback_url"` + Confidential bool `json:"confidential"` +} + +// CreateApplicationOptions represents the available CreateApplication() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/applications.html#create-an-application +type CreateApplicationOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + RedirectURI *string `url:"redirect_uri,omitempty" json:"redirect_uri,omitempty"` + Scopes *string `url:"scopes,omitempty" json:"scopes,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` +} + +// CreateApplication creates a new application owned by the authenticated user. +// +// Gitlab API docs : https://docs.gitlab.com/ee/api/applications.html#create-an-application +func (s *ApplicationsService) CreateApplication(opt *CreateApplicationOptions, options ...RequestOptionFunc) (*Application, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "applications", opt, options) + if err != nil { + return nil, nil, err + } + + a := new(Application) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// ListApplicationsOptions represents the available +// ListApplications() options. +type ListApplicationsOptions ListOptions + +// ListApplications get a list of administrables applications by the authenticated user +// +// Gitlab API docs : https://docs.gitlab.com/ee/api/applications.html#list-all-applications +func (s *ApplicationsService) ListApplications(opt *ListApplicationsOptions, options ...RequestOptionFunc) ([]*Application, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "applications", opt, options) + if err != nil { + return nil, nil, err + } + + var as []*Application + resp, err := s.client.Do(req, &as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// DeleteApplication removes a specific application. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/applications.html#delete-an-application +func (s *ApplicationsService) DeleteApplication(application int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("applications/%d", application) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/audit_events.go b/vendor/github.com/xanzy/go-gitlab/audit_events.go new file mode 100644 index 0000000000..de312e5606 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/audit_events.go @@ -0,0 +1,202 @@ +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// AuditEvent represents an audit event for a group, a project or the instance. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html +type AuditEvent struct { + ID int `json:"id"` + AuthorID int `json:"author_id"` + EntityID int `json:"entity_id"` + EntityType string `json:"entity_type"` + Details AuditEventDetails `json:"details"` + CreatedAt *time.Time `json:"created_at"` + EventType string `json:"event_type"` +} + +// AuditEventDetails represents the details portion of an audit event for +// a group, a project or the instance. The exact fields that are returned +// for an audit event depend on the action being recorded. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html +type AuditEventDetails struct { + With string `json:"with"` + Add string `json:"add"` + As string `json:"as"` + Change string `json:"change"` + From string `json:"from"` + To string `json:"to"` + Remove string `json:"remove"` + CustomMessage string `json:"custom_message"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthorClass string `json:"author_class"` + TargetID interface{} `json:"target_id"` + TargetType string `json:"target_type"` + TargetDetails string `json:"target_details"` + IPAddress string `json:"ip_address"` + EntityPath string `json:"entity_path"` + FailedLogin string `json:"failed_login"` +} + +// AuditEventsService handles communication with the project/group/instance +// audit event related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html +type AuditEventsService struct { + client *Client +} + +// ListAuditEventsOptions represents the available ListProjectAuditEvents(), +// ListGroupAuditEvents() or ListInstanceAuditEvents() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html +type ListAuditEventsOptions struct { + ListOptions + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` +} + +// ListInstanceAuditEvents gets a list of audit events for instance. +// Authentication as Administrator is required. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-all-instance-audit-events +func (s *AuditEventsService) ListInstanceAuditEvents(opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "audit_events", opt, options) + if err != nil { + return nil, nil, err + } + + var aes []*AuditEvent + resp, err := s.client.Do(req, &aes) + if err != nil { + return nil, resp, err + } + + return aes, resp, nil +} + +// GetInstanceAuditEvent gets a specific instance audit event. +// Authentication as Administrator is required. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-single-instance-audit-event +func (s *AuditEventsService) GetInstanceAuditEvent(event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { + u := fmt.Sprintf("audit_events/%d", event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ae := new(AuditEvent) + resp, err := s.client.Do(req, ae) + if err != nil { + return nil, resp, err + } + + return ae, resp, nil +} + +// ListGroupAuditEvents gets a list of audit events for the specified group +// viewable by the authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-all-group-audit-events +func (s *AuditEventsService) ListGroupAuditEvents(gid interface{}, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/audit_events", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var aes []*AuditEvent + resp, err := s.client.Do(req, &aes) + if err != nil { + return nil, resp, err + } + + return aes, resp, nil +} + +// GetGroupAuditEvent gets a specific group audit event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-a-specific-group-audit-event +func (s *AuditEventsService) GetGroupAuditEvent(gid interface{}, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/audit_events/%d", PathEscape(group), event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ae := new(AuditEvent) + resp, err := s.client.Do(req, ae) + if err != nil { + return nil, resp, err + } + + return ae, resp, nil +} + +// ListProjectAuditEvents gets a list of audit events for the specified project +// viewable by the authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-all-project-audit-events +func (s *AuditEventsService) ListProjectAuditEvents(pid interface{}, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/audit_events", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var aes []*AuditEvent + resp, err := s.client.Do(req, &aes) + if err != nil { + return nil, resp, err + } + + return aes, resp, nil +} + +// GetProjectAuditEvent gets a specific project audit event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/audit_events.html#retrieve-a-specific-project-audit-event +func (s *AuditEventsService) GetProjectAuditEvent(pid interface{}, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/audit_events/%d", PathEscape(project), event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ae := new(AuditEvent) + resp, err := s.client.Do(req, ae) + if err != nil { + return nil, resp, err + } + + return ae, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/avatar.go b/vendor/github.com/xanzy/go-gitlab/avatar.go new file mode 100644 index 0000000000..1a7b923f3d --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/avatar.go @@ -0,0 +1,64 @@ +// +// Copyright 2021, Pavel Kostohrys +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" +) + +// AvatarRequestsService handles communication with the avatar related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/avatar.html +type AvatarRequestsService struct { + client *Client +} + +// Avatar represents a GitLab avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/avatar.html +type Avatar struct { + AvatarURL string `json:"avatar_url"` +} + +// GetAvatarOptions represents the available GetAvatar() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/avatar.html#get-a-single-avatar-url +type GetAvatarOptions struct { + Email *string `url:"email,omitempty" json:"email,omitempty"` + Size *int `url:"size,omitempty" json:"size,omitempty"` +} + +// GetAvatar gets the avatar URL for a user with the given email address. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/avatar.html#get-a-single-avatar-url +func (s *AvatarRequestsService) GetAvatar(opt *GetAvatarOptions, options ...RequestOptionFunc) (*Avatar, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "avatar", opt, options) + if err != nil { + return nil, nil, err + } + + avatar := new(Avatar) + response, err := s.client.Do(req, avatar) + if err != nil { + return nil, response, err + } + + return avatar, response, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/award_emojis.go b/vendor/github.com/xanzy/go-gitlab/award_emojis.go new file mode 100644 index 0000000000..f7673a3a5a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/award_emojis.go @@ -0,0 +1,468 @@ +// +// Copyright 2021, Arkbriar +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// AwardEmojiService handles communication with the emoji awards related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/award_emoji.html +type AwardEmojiService struct { + client *Client +} + +// AwardEmoji represents a GitLab Award Emoji. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/award_emoji.html +type AwardEmoji struct { + ID int `json:"id"` + Name string `json:"name"` + User struct { + Name string `json:"name"` + Username string `json:"username"` + ID int `json:"id"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"user"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + AwardableID int `json:"awardable_id"` + AwardableType string `json:"awardable_type"` +} + +const ( + awardMergeRequest = "merge_requests" + awardIssue = "issues" + awardSnippets = "snippets" +) + +// ListAwardEmojiOptions represents the available options for listing emoji +// for each resources +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html +type ListAwardEmojiOptions ListOptions + +// ListMergeRequestAwardEmoji gets a list of all award emoji on the merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#list-an-awardables-award-emojis +func (s *AwardEmojiService) ListMergeRequestAwardEmoji(pid interface{}, mergeRequestIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return s.listAwardEmoji(pid, awardMergeRequest, mergeRequestIID, opt, options...) +} + +// ListIssueAwardEmoji gets a list of all award emoji on the issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#list-an-awardables-award-emojis +func (s *AwardEmojiService) ListIssueAwardEmoji(pid interface{}, issueIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return s.listAwardEmoji(pid, awardIssue, issueIID, opt, options...) +} + +// ListSnippetAwardEmoji gets a list of all award emoji on the snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#list-an-awardables-award-emojis +func (s *AwardEmojiService) ListSnippetAwardEmoji(pid interface{}, snippetID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return s.listAwardEmoji(pid, awardSnippets, snippetID, opt, options...) +} + +func (s *AwardEmojiService) listAwardEmoji(pid interface{}, resource string, resourceID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/award_emoji", + PathEscape(project), + resource, + resourceID, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var as []*AwardEmoji + resp, err := s.client.Do(req, &as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// GetMergeRequestAwardEmoji get an award emoji from merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#get-single-award-emoji +func (s *AwardEmojiService) GetMergeRequestAwardEmoji(pid interface{}, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.getAwardEmoji(pid, awardMergeRequest, mergeRequestIID, awardID, options...) +} + +// GetIssueAwardEmoji get an award emoji from issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#get-single-award-emoji +func (s *AwardEmojiService) GetIssueAwardEmoji(pid interface{}, issueIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.getAwardEmoji(pid, awardIssue, issueIID, awardID, options...) +} + +// GetSnippetAwardEmoji get an award emoji from snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#get-single-award-emoji +func (s *AwardEmojiService) GetSnippetAwardEmoji(pid interface{}, snippetID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.getAwardEmoji(pid, awardSnippets, snippetID, awardID, options...) +} + +func (s *AwardEmojiService) getAwardEmoji(pid interface{}, resource string, resourceID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/award_emoji/%d", + PathEscape(project), + resource, + resourceID, + awardID, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(AwardEmoji) + resp, err := s.client.Do(req, &a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// CreateAwardEmojiOptions represents the available options for awarding emoji +// for a resource +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji +type CreateAwardEmojiOptions struct { + Name string `json:"name"` +} + +// CreateMergeRequestAwardEmoji get an award emoji from merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji +func (s *AwardEmojiService) CreateMergeRequestAwardEmoji(pid interface{}, mergeRequestIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.createAwardEmoji(pid, awardMergeRequest, mergeRequestIID, opt, options...) +} + +// CreateIssueAwardEmoji get an award emoji from issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji +func (s *AwardEmojiService) CreateIssueAwardEmoji(pid interface{}, issueIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.createAwardEmoji(pid, awardIssue, issueIID, opt, options...) +} + +// CreateSnippetAwardEmoji get an award emoji from snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji +func (s *AwardEmojiService) CreateSnippetAwardEmoji(pid interface{}, snippetID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.createAwardEmoji(pid, awardSnippets, snippetID, opt, options...) +} + +func (s *AwardEmojiService) createAwardEmoji(pid interface{}, resource string, resourceID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/award_emoji", + PathEscape(project), + resource, + resourceID, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + a := new(AwardEmoji) + resp, err := s.client.Do(req, &a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// DeleteIssueAwardEmoji delete award emoji on an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji +func (s *AwardEmojiService) DeleteIssueAwardEmoji(pid interface{}, issueIID, awardID int, options ...RequestOptionFunc) (*Response, error) { + return s.deleteAwardEmoji(pid, awardIssue, issueIID, awardID, options...) +} + +// DeleteMergeRequestAwardEmoji delete award emoji on a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji +func (s *AwardEmojiService) DeleteMergeRequestAwardEmoji(pid interface{}, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*Response, error) { + return s.deleteAwardEmoji(pid, awardMergeRequest, mergeRequestIID, awardID, options...) +} + +// DeleteSnippetAwardEmoji delete award emoji on a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji +func (s *AwardEmojiService) DeleteSnippetAwardEmoji(pid interface{}, snippetID, awardID int, options ...RequestOptionFunc) (*Response, error) { + return s.deleteAwardEmoji(pid, awardSnippets, snippetID, awardID, options...) +} + +// DeleteAwardEmoji Delete an award emoji on the specified resource. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji +func (s *AwardEmojiService) deleteAwardEmoji(pid interface{}, resource string, resourceID, awardID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/award_emoji/%d", PathEscape(project), resource, + resourceID, awardID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// ListIssuesAwardEmojiOnNote gets a list of all award emoji on a note from the +// issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#list-a-comments-award-emojis +func (s *AwardEmojiService) ListIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return s.listAwardEmojiOnNote(pid, awardIssue, issueID, noteID, opt, options...) +} + +// ListMergeRequestAwardEmojiOnNote gets a list of all award emoji on a note +// from the merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#list-a-comments-award-emojis +func (s *AwardEmojiService) ListMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return s.listAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, opt, options...) +} + +// ListSnippetAwardEmojiOnNote gets a list of all award emoji on a note from the +// snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#list-a-comments-award-emojis +func (s *AwardEmojiService) ListSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + return s.listAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, opt, options...) +} + +func (s *AwardEmojiService) listAwardEmojiOnNote(pid interface{}, resources string, ressourceID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji", PathEscape(project), resources, + ressourceID, noteID) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var as []*AwardEmoji + resp, err := s.client.Do(req, &as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// GetIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#get-an-award-emoji-for-a-comment +func (s *AwardEmojiService) GetIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.getSingleNoteAwardEmoji(pid, awardIssue, issueID, noteID, awardID, options...) +} + +// GetMergeRequestAwardEmojiOnNote gets an award emoji on a note from a +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#get-an-award-emoji-for-a-comment +func (s *AwardEmojiService) GetMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.getSingleNoteAwardEmoji(pid, awardMergeRequest, mergeRequestIID, noteID, awardID, + options...) +} + +// GetSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#get-an-award-emoji-for-a-comment +func (s *AwardEmojiService) GetSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.getSingleNoteAwardEmoji(pid, awardSnippets, snippetIID, noteID, awardID, options...) +} + +func (s *AwardEmojiService) getSingleNoteAwardEmoji(pid interface{}, ressource string, resourceID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji/%d", + PathEscape(project), + ressource, + resourceID, + noteID, + awardID, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(AwardEmoji) + resp, err := s.client.Do(req, &a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// CreateIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment +func (s *AwardEmojiService) CreateIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.createAwardEmojiOnNote(pid, awardIssue, issueID, noteID, opt, options...) +} + +// CreateMergeRequestAwardEmojiOnNote gets an award emoji on a note from a +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment +func (s *AwardEmojiService) CreateMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.createAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, opt, options...) +} + +// CreateSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment +func (s *AwardEmojiService) CreateSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + return s.createAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, opt, options...) +} + +// CreateAwardEmojiOnNote award emoji on a note. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment +func (s *AwardEmojiService) createAwardEmojiOnNote(pid interface{}, resource string, resourceID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji", + PathEscape(project), + resource, + resourceID, + noteID, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + a := new(AwardEmoji) + resp, err := s.client.Do(req, &a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// DeleteIssuesAwardEmojiOnNote deletes an award emoji on a note from an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji-from-a-comment +func (s *AwardEmojiService) DeleteIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { + return s.deleteAwardEmojiOnNote(pid, awardIssue, issueID, noteID, awardID, options...) +} + +// DeleteMergeRequestAwardEmojiOnNote deletes an award emoji on a note from a +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji-from-a-comment +func (s *AwardEmojiService) DeleteMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { + return s.deleteAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, awardID, + options...) +} + +// DeleteSnippetAwardEmojiOnNote deletes an award emoji on a note from a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji-from-a-comment +func (s *AwardEmojiService) DeleteSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { + return s.deleteAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, awardID, options...) +} + +func (s *AwardEmojiService) deleteAwardEmojiOnNote(pid interface{}, resource string, resourceID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji/%d", + PathEscape(project), + resource, + resourceID, + noteID, + awardID, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/boards.go b/vendor/github.com/xanzy/go-gitlab/boards.go new file mode 100644 index 0000000000..22e2cd7d9a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/boards.go @@ -0,0 +1,367 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// IssueBoardsService handles communication with the issue board related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html +type IssueBoardsService struct { + client *Client +} + +// IssueBoard represents a GitLab issue board. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html +type IssueBoard struct { + ID int `json:"id"` + Name string `json:"name"` + Project *Project `json:"project"` + Milestone *Milestone `json:"milestone"` + Assignee *struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"assignee"` + Lists []*BoardList `json:"lists"` + Weight int `json:"weight"` + Labels []*LabelDetails `json:"labels"` +} + +func (b IssueBoard) String() string { + return Stringify(b) +} + +// BoardList represents a GitLab board list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html +type BoardList struct { + ID int `json:"id"` + Assignee *struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + } `json:"assignee"` + Iteration *ProjectIteration `json:"iteration"` + Label *Label `json:"label"` + MaxIssueCount int `json:"max_issue_count"` + MaxIssueWeight int `json:"max_issue_weight"` + Milestone *Milestone `json:"milestone"` + Position int `json:"position"` +} + +func (b BoardList) String() string { + return Stringify(b) +} + +// CreateIssueBoardOptions represents the available CreateIssueBoard() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-an-issue-board +type CreateIssueBoardOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// CreateIssueBoard creates a new issue board. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-an-issue-board +func (s *IssueBoardsService) CreateIssueBoard(pid interface{}, opt *CreateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + board := new(IssueBoard) + resp, err := s.client.Do(req, board) + if err != nil { + return nil, resp, err + } + + return board, resp, nil +} + +// UpdateIssueBoardOptions represents the available UpdateIssueBoard() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#update-an-issue-board +type UpdateIssueBoardOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` + Weight *int `url:"weight,omitempty" json:"weight,omitempty"` +} + +// UpdateIssueBoard update an issue board. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#update-an-issue-board +func (s *IssueBoardsService) UpdateIssueBoard(pid interface{}, board int, opt *UpdateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + is := new(IssueBoard) + resp, err := s.client.Do(req, is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// DeleteIssueBoard deletes an issue board. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#delete-an-issue-board +func (s *IssueBoardsService) DeleteIssueBoard(pid interface{}, board int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListIssueBoardsOptions represents the available ListIssueBoards() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-project-issue-boards +type ListIssueBoardsOptions ListOptions + +// ListIssueBoards gets a list of all issue boards in a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-project-issue-boards +func (s *IssueBoardsService) ListIssueBoards(pid interface{}, opt *ListIssueBoardsOptions, options ...RequestOptionFunc) ([]*IssueBoard, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var is []*IssueBoard + resp, err := s.client.Do(req, &is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// GetIssueBoard gets a single issue board of a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#show-a-single-issue-board +func (s *IssueBoardsService) GetIssueBoard(pid interface{}, board int, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ib := new(IssueBoard) + resp, err := s.client.Do(req, ib) + if err != nil { + return nil, resp, err + } + + return ib, resp, nil +} + +// GetIssueBoardListsOptions represents the available GetIssueBoardLists() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-board-lists-in-a-project-issue-board +type GetIssueBoardListsOptions ListOptions + +// GetIssueBoardLists gets a list of the issue board's lists. Does not include +// backlog and closed lists. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-board-lists-in-a-project-issue-board +func (s *IssueBoardsService) GetIssueBoardLists(pid interface{}, board int, opt *GetIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d/lists", PathEscape(project), board) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var bl []*BoardList + resp, err := s.client.Do(req, &bl) + if err != nil { + return nil, resp, err + } + + return bl, resp, nil +} + +// GetIssueBoardList gets a single issue board list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#show-a-single-board-list +func (s *IssueBoardsService) GetIssueBoardList(pid interface{}, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", + PathEscape(project), + board, + list, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + bl := new(BoardList) + resp, err := s.client.Do(req, bl) + if err != nil { + return nil, resp, err + } + + return bl, resp, nil +} + +// CreateIssueBoardListOptions represents the available CreateIssueBoardList() +// options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-a-board-list +type CreateIssueBoardListOptions struct { + LabelID *int `url:"label_id,omitempty" json:"label_id,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` +} + +// CreateIssueBoardList creates a new issue board list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-a-board-list +func (s *IssueBoardsService) CreateIssueBoardList(pid interface{}, board int, opt *CreateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d/lists", PathEscape(project), board) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + bl := new(BoardList) + resp, err := s.client.Do(req, bl) + if err != nil { + return nil, resp, err + } + + return bl, resp, nil +} + +// UpdateIssueBoardListOptions represents the available UpdateIssueBoardList() +// options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#reorder-a-list-in-a-board +type UpdateIssueBoardListOptions struct { + Position *int `url:"position" json:"position"` +} + +// UpdateIssueBoardList updates the position of an existing issue board list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#reorder-a-list-in-a-board +func (s *IssueBoardsService) UpdateIssueBoardList(pid interface{}, board, list int, opt *UpdateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", + PathEscape(project), + board, + list, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + bl := new(BoardList) + resp, err := s.client.Do(req, bl) + if err != nil { + return nil, resp, err + } + + return bl, resp, nil +} + +// DeleteIssueBoardList soft deletes an issue board list. Only for admins and +// project owners. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/boards.html#delete-a-board-list-from-a-board +func (s *IssueBoardsService) DeleteIssueBoardList(pid interface{}, board, list int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", + PathEscape(project), + board, + list, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/branches.go b/vendor/github.com/xanzy/go-gitlab/branches.go new file mode 100644 index 0000000000..2ff5b81ea8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/branches.go @@ -0,0 +1,252 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// BranchesService handles communication with the branch related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/branches.html +type BranchesService struct { + client *Client +} + +// Branch represents a GitLab branch. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/branches.html +type Branch struct { + Commit *Commit `json:"commit"` + Name string `json:"name"` + Protected bool `json:"protected"` + Merged bool `json:"merged"` + Default bool `json:"default"` + CanPush bool `json:"can_push"` + DevelopersCanPush bool `json:"developers_can_push"` + DevelopersCanMerge bool `json:"developers_can_merge"` + WebURL string `json:"web_url"` +} + +func (b Branch) String() string { + return Stringify(b) +} + +// ListBranchesOptions represents the available ListBranches() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#list-repository-branches +type ListBranchesOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + Regex *string `url:"regex,omitempty" json:"regex,omitempty"` +} + +// ListBranches gets a list of repository branches from a project, sorted by +// name alphabetically. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#list-repository-branches +func (s *BranchesService) ListBranches(pid interface{}, opts *ListBranchesOptions, options ...RequestOptionFunc) ([]*Branch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/branches", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var b []*Branch + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// GetBranch gets a single project repository branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#get-single-repository-branch +func (s *BranchesService) GetBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/branches/%s", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + b := new(Branch) + resp, err := s.client.Do(req, b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// ProtectBranchOptions represents the available ProtectBranch() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#protect-repository-branch +type ProtectBranchOptions struct { + DevelopersCanPush *bool `url:"developers_can_push,omitempty" json:"developers_can_push,omitempty"` + DevelopersCanMerge *bool `url:"developers_can_merge,omitempty" json:"developers_can_merge,omitempty"` +} + +// ProtectBranch protects a single project repository branch. This is an +// idempotent function, protecting an already protected repository branch +// still returns a 200 OK status code. +// +// Deprecated: This endpoint has been replaced by +// ProtectedBranchesService.ProtectRepositoryBranches() +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#protect-repository-branch +func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/branches/%s/protect", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodPut, u, opts, options) + if err != nil { + return nil, nil, err + } + + b := new(Branch) + resp, err := s.client.Do(req, b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// UnprotectBranch unprotects a single project repository branch. This is an +// idempotent function, unprotecting an already unprotected repository branch +// still returns a 200 OK status code. +// +// Deprecated: This endpoint has been replaced by +// ProtectedBranchesService.UnprotectRepositoryBranches() +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#unprotect-repository-branch +func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/branches/%s/unprotect", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + if err != nil { + return nil, nil, err + } + + b := new(Branch) + resp, err := s.client.Do(req, b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// CreateBranchOptions represents the available CreateBranch() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#create-repository-branch +type CreateBranchOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +// CreateBranch creates branch from commit SHA or existing branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#create-repository-branch +func (s *BranchesService) CreateBranch(pid interface{}, opt *CreateBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/branches", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + b := new(Branch) + resp, err := s.client.Do(req, b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// DeleteBranch deletes an existing branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#delete-repository-branch +func (s *BranchesService) DeleteBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/repository/branches/%s", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteMergedBranches deletes all branches that are merged into the project's default branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/branches.html#delete-merged-branches +func (s *BranchesService) DeleteMergedBranches(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/repository/merged_branches", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go b/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go new file mode 100644 index 0000000000..3d0c61d9fc --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go @@ -0,0 +1,191 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// BroadcastMessagesService handles communication with the broadcast +// messages methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/broadcast_messages.html +type BroadcastMessagesService struct { + client *Client +} + +// BroadcastMessage represents a GitLab issue board. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages +type BroadcastMessage struct { + Message string `json:"message"` + StartsAt *time.Time `json:"starts_at"` + EndsAt *time.Time `json:"ends_at"` + Font string `json:"font"` + ID int `json:"id"` + Active bool `json:"active"` + TargetAccessLevels []AccessLevelValue `json:"target_access_levels"` + TargetPath string `json:"target_path"` + BroadcastType string `json:"broadcast_type"` + Dismissable bool `json:"dismissable"` + + // Deprecated: This parameter was removed in GitLab 15.6. + Color string `json:"color"` +} + +// ListBroadcastMessagesOptions represents the available ListBroadcastMessages() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages +type ListBroadcastMessagesOptions ListOptions + +// ListBroadcastMessages gets a list of all broadcasted messages. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages +func (s *BroadcastMessagesService) ListBroadcastMessages(opt *ListBroadcastMessagesOptions, options ...RequestOptionFunc) ([]*BroadcastMessage, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "broadcast_messages", opt, options) + if err != nil { + return nil, nil, err + } + + var bs []*BroadcastMessage + resp, err := s.client.Do(req, &bs) + if err != nil { + return nil, resp, err + } + + return bs, resp, nil +} + +// GetBroadcastMessage gets a single broadcast message. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-a-specific-broadcast-message +func (s *BroadcastMessagesService) GetBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { + u := fmt.Sprintf("broadcast_messages/%d", broadcast) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + b := new(BroadcastMessage) + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// CreateBroadcastMessageOptions represents the available CreateBroadcastMessage() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#create-a-broadcast-message +type CreateBroadcastMessageOptions struct { + Message *string `url:"message" json:"message"` + StartsAt *time.Time `url:"starts_at,omitempty" json:"starts_at,omitempty"` + EndsAt *time.Time `url:"ends_at,omitempty" json:"ends_at,omitempty"` + Font *string `url:"font,omitempty" json:"font,omitempty"` + TargetAccessLevels []AccessLevelValue `url:"target_access_levels,omitempty" json:"target_access_levels,omitempty"` + TargetPath *string `url:"target_path,omitempty" json:"target_path,omitempty"` + BroadcastType *string `url:"broadcast_type,omitempty" json:"broadcast_type,omitempty"` + Dismissable *bool `url:"dismissable,omitempty" json:"dismissable,omitempty"` + + // Deprecated: This parameter was removed in GitLab 15.6. + Color *string `url:"color,omitempty" json:"color,omitempty"` +} + +// CreateBroadcastMessage creates a message to broadcast. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#create-a-broadcast-message +func (s *BroadcastMessagesService) CreateBroadcastMessage(opt *CreateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "broadcast_messages", opt, options) + if err != nil { + return nil, nil, err + } + + b := new(BroadcastMessage) + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// UpdateBroadcastMessageOptions represents the available CreateBroadcastMessage() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#update-a-broadcast-message +type UpdateBroadcastMessageOptions struct { + Message *string `url:"message,omitempty" json:"message,omitempty"` + StartsAt *time.Time `url:"starts_at,omitempty" json:"starts_at,omitempty"` + EndsAt *time.Time `url:"ends_at,omitempty" json:"ends_at,omitempty"` + Font *string `url:"font,omitempty" json:"font,omitempty"` + TargetAccessLevels []AccessLevelValue `url:"target_access_levels,omitempty" json:"target_access_levels,omitempty"` + TargetPath *string `url:"target_path,omitempty" json:"target_path,omitempty"` + BroadcastType *string `url:"broadcast_type,omitempty" json:"broadcast_type,omitempty"` + Dismissable *bool `url:"dismissable,omitempty" json:"dismissable,omitempty"` + + // Deprecated: This parameter was removed in GitLab 15.6. + Color *string `url:"color,omitempty" json:"color,omitempty"` +} + +// UpdateBroadcastMessage update a broadcasted message. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#update-a-broadcast-message +func (s *BroadcastMessagesService) UpdateBroadcastMessage(broadcast int, opt *UpdateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { + u := fmt.Sprintf("broadcast_messages/%d", broadcast) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + b := new(BroadcastMessage) + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b, resp, nil +} + +// DeleteBroadcastMessage deletes a broadcasted message. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/broadcast_messages.html#delete-a-broadcast-message +func (s *BroadcastMessagesService) DeleteBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("broadcast_messages/%d", broadcast) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go b/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go new file mode 100644 index 0000000000..992fe11802 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go @@ -0,0 +1,95 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// CIYMLTemplatesService handles communication with the gitlab +// CI YML templates related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html +type CIYMLTemplatesService struct { + client *Client +} + +// CIYMLTemplate represents a GitLab CI YML template. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html +type CIYMLTemplate struct { + Name string `json:"name"` + Content string `json:"content"` +} + +// CIYMLTemplateListItem represents a GitLab CI YML template from the list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html +type CIYMLTemplateListItem struct { + Key string `json:"key"` + Name string `json:"name"` +} + +// ListCIYMLTemplatesOptions represents the available ListAllTemplates() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html#list-gitlab-ci-yaml-templates +type ListCIYMLTemplatesOptions ListOptions + +// ListAllTemplates get all GitLab CI YML templates. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html#list-gitlab-ci-yaml-templates +func (s *CIYMLTemplatesService) ListAllTemplates(opt *ListCIYMLTemplatesOptions, options ...RequestOptionFunc) ([]*CIYMLTemplateListItem, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "templates/gitlab_ci_ymls", opt, options) + if err != nil { + return nil, nil, err + } + + var cts []*CIYMLTemplateListItem + resp, err := s.client.Do(req, &cts) + if err != nil { + return nil, resp, err + } + + return cts, resp, nil +} + +// GetTemplate get a single GitLab CI YML template. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html#single-gitlab-ci-yaml-template +func (s *CIYMLTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*CIYMLTemplate, *Response, error) { + u := fmt.Sprintf("templates/gitlab_ci_ymls/%s", PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ct := new(CIYMLTemplate) + resp, err := s.client.Do(req, ct) + if err != nil { + return nil, resp, err + } + + return ct, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/client_options.go b/vendor/github.com/xanzy/go-gitlab/client_options.go new file mode 100644 index 0000000000..2ff7bab9bf --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/client_options.go @@ -0,0 +1,142 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" + "time" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// ClientOptionFunc can be used to customize a new GitLab API client. +type ClientOptionFunc func(*Client) error + +// WithBaseURL sets the base URL for API requests to a custom endpoint. +func WithBaseURL(urlStr string) ClientOptionFunc { + return func(c *Client) error { + return c.setBaseURL(urlStr) + } +} + +// WithCustomBackoff can be used to configure a custom backoff policy. +func WithCustomBackoff(backoff retryablehttp.Backoff) ClientOptionFunc { + return func(c *Client) error { + c.client.Backoff = backoff + return nil + } +} + +// WithCustomLeveledLogger can be used to configure a custom retryablehttp +// leveled logger. +func WithCustomLeveledLogger(leveledLogger retryablehttp.LeveledLogger) ClientOptionFunc { + return func(c *Client) error { + c.client.Logger = leveledLogger + return nil + } +} + +// WithCustomLimiter injects a custom rate limiter to the client. +func WithCustomLimiter(limiter RateLimiter) ClientOptionFunc { + return func(c *Client) error { + c.configureLimiterOnce.Do(func() {}) + c.limiter = limiter + return nil + } +} + +// WithCustomLogger can be used to configure a custom retryablehttp logger. +func WithCustomLogger(logger retryablehttp.Logger) ClientOptionFunc { + return func(c *Client) error { + c.client.Logger = logger + return nil + } +} + +// WithCustomRetry can be used to configure a custom retry policy. +func WithCustomRetry(checkRetry retryablehttp.CheckRetry) ClientOptionFunc { + return func(c *Client) error { + c.client.CheckRetry = checkRetry + return nil + } +} + +// WithCustomRetryMax can be used to configure a custom maximum number of retries. +func WithCustomRetryMax(retryMax int) ClientOptionFunc { + return func(c *Client) error { + c.client.RetryMax = retryMax + return nil + } +} + +// WithCustomRetryWaitMinMax can be used to configure a custom minimum and +// maximum time to wait between retries. +func WithCustomRetryWaitMinMax(waitMin, waitMax time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.client.RetryWaitMin = waitMin + c.client.RetryWaitMax = waitMax + return nil + } +} + +// WithErrorHandler can be used to configure a custom error handler. +func WithErrorHandler(handler retryablehttp.ErrorHandler) ClientOptionFunc { + return func(c *Client) error { + c.client.ErrorHandler = handler + return nil + } +} + +// WithHTTPClient can be used to configure a custom HTTP client. +func WithHTTPClient(httpClient *http.Client) ClientOptionFunc { + return func(c *Client) error { + c.client.HTTPClient = httpClient + return nil + } +} + +// WithRequestLogHook can be used to configure a custom request log hook. +func WithRequestLogHook(hook retryablehttp.RequestLogHook) ClientOptionFunc { + return func(c *Client) error { + c.client.RequestLogHook = hook + return nil + } +} + +// WithResponseLogHook can be used to configure a custom response log hook. +func WithResponseLogHook(hook retryablehttp.ResponseLogHook) ClientOptionFunc { + return func(c *Client) error { + c.client.ResponseLogHook = hook + return nil + } +} + +// WithoutRetries disables the default retry logic. +func WithoutRetries() ClientOptionFunc { + return func(c *Client) error { + c.disableRetries = true + return nil + } +} + +// WithRequestOptions can be used to configure default request options applied to every request. +func WithRequestOptions(options ...RequestOptionFunc) ClientOptionFunc { + return func(c *Client) error { + c.defaultRequestOptions = append(c.defaultRequestOptions, options...) + return nil + } +} diff --git a/vendor/github.com/xanzy/go-gitlab/cluster_agents.go b/vendor/github.com/xanzy/go-gitlab/cluster_agents.go new file mode 100644 index 0000000000..1153feae68 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/cluster_agents.go @@ -0,0 +1,294 @@ +// +// Copyright 2022, Timo Furrer +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ClusterAgentsService handles communication with the cluster agents related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/cluster_agents.html +type ClusterAgentsService struct { + client *Client +} + +// Agent represents a GitLab agent for Kubernetes. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/cluster_agents.html +type Agent struct { + ID int `json:"id"` + Name string `json:"name"` + CreatedAt *time.Time `json:"created_at"` + CreatedByUserID int `json:"created_by_user_id"` + ConfigProject ConfigProject `json:"config_project"` +} + +type ConfigProject struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` +} + +func (a Agent) String() string { + return Stringify(a) +} + +// AgentToken represents a GitLab agent token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent +type AgentToken struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AgentID int `json:"agent_id"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + CreatedByUserID int `json:"created_by_user_id"` + LastUsedAt *time.Time `json:"last_used_at"` + Token string `json:"token"` +} + +func (a AgentToken) String() string { + return Stringify(a) +} + +// ListAgentsOptions represents the available ListAgents() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-the-agents-for-a-project +type ListAgentsOptions ListOptions + +// ListAgents returns a list of agents registered for the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-the-agents-for-a-project +func (s *ClusterAgentsService) ListAgents(pid interface{}, opt *ListAgentsOptions, options ...RequestOptionFunc) ([]*Agent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) + if err != nil { + return nil, nil, err + } + + var as []*Agent + resp, err := s.client.Do(req, &as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// GetAgent gets a single agent details. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#get-details-about-an-agent +func (s *ClusterAgentsService) GetAgent(pid interface{}, id int, options ...RequestOptionFunc) (*Agent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) + + req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(Agent) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// RegisterAgentOptions represents the available RegisterAgent() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#register-an-agent-with-a-project +type RegisterAgentOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// RegisterAgent registers an agent to the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#register-an-agent-with-a-project +func (s *ClusterAgentsService) RegisterAgent(pid interface{}, opt *RegisterAgentOptions, options ...RequestOptionFunc) (*Agent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) + if err != nil { + return nil, nil, err + } + + a := new(Agent) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// DeleteAgent deletes an existing agent registration. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#delete-a-registered-agent +func (s *ClusterAgentsService) DeleteAgent(pid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) + + req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListAgentTokensOptions represents the available ListAgentTokens() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent +type ListAgentTokensOptions ListOptions + +// ListAgentTokens returns a list of tokens for an agent. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent +func (s *ClusterAgentsService) ListAgentTokens(pid interface{}, aid int, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) + + req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) + if err != nil { + return nil, nil, err + } + + var ats []*AgentToken + resp, err := s.client.Do(req, &ats) + if err != nil { + return nil, resp, err + } + + return ats, resp, nil +} + +// GetAgentToken gets a single agent token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#get-a-single-agent-token +func (s *ClusterAgentsService) GetAgentToken(pid interface{}, aid int, id int, options ...RequestOptionFunc) (*AgentToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) + + req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) + if err != nil { + return nil, nil, err + } + + at := new(AgentToken) + resp, err := s.client.Do(req, at) + if err != nil { + return nil, resp, err + } + + return at, resp, nil +} + +// CreateAgentTokenOptions represents the available CreateAgentToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#create-an-agent-token +type CreateAgentTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` +} + +// CreateAgentToken creates a new token for an agent. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#create-an-agent-token +func (s *ClusterAgentsService) CreateAgentToken(pid interface{}, aid int, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) + + req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) + if err != nil { + return nil, nil, err + } + + at := new(AgentToken) + resp, err := s.client.Do(req, at) + if err != nil { + return nil, resp, err + } + + return at, resp, nil +} + +// RevokeAgentToken revokes an agent token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#revoke-an-agent-token +func (s *ClusterAgentsService) RevokeAgentToken(pid interface{}, aid int, id int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) + + req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/github.com/xanzy/go-gitlab/commits.go new file mode 100644 index 0000000000..c1a9ef3d60 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/commits.go @@ -0,0 +1,610 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" + "time" +) + +// CommitsService handles communication with the commit related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html +type CommitsService struct { + client *Client +} + +// Commit represents a GitLab commit. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html +type Commit struct { + ID string `json:"id"` + ShortID string `json:"short_id"` + Title string `json:"title"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthoredDate *time.Time `json:"authored_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + CommittedDate *time.Time `json:"committed_date"` + CreatedAt *time.Time `json:"created_at"` + Message string `json:"message"` + ParentIDs []string `json:"parent_ids"` + Stats *CommitStats `json:"stats"` + Status *BuildStateValue `json:"status"` + LastPipeline *PipelineInfo `json:"last_pipeline"` + ProjectID int `json:"project_id"` + Trailers map[string]string `json:"trailers"` + ExtendedTrailers map[string]string `json:"extended_trailers"` + WebURL string `json:"web_url"` +} + +// CommitStats represents the number of added and deleted files in a commit. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html +type CommitStats struct { + Additions int `json:"additions"` + Deletions int `json:"deletions"` + Total int `json:"total"` +} + +func (c Commit) String() string { + return Stringify(c) +} + +// ListCommitsOptions represents the available ListCommits() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-repository-commits +type ListCommitsOptions struct { + ListOptions + RefName *string `url:"ref_name,omitempty" json:"ref_name,omitempty"` + Since *time.Time `url:"since,omitempty" json:"since,omitempty"` + Until *time.Time `url:"until,omitempty" json:"until,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Author *string `url:"author,omitempty" json:"author,omitempty"` + All *bool `url:"all,omitempty" json:"all,omitempty"` + WithStats *bool `url:"with_stats,omitempty" json:"with_stats,omitempty"` + FirstParent *bool `url:"first_parent,omitempty" json:"first_parent,omitempty"` + Trailers *bool `url:"trailers,omitempty" json:"trailers,omitempty"` +} + +// ListCommits gets a list of repository commits in a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-repository-commits +func (s *CommitsService) ListCommits(pid interface{}, opt *ListCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var c []*Commit + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// CommitRef represents the reference of branches/tags in a commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-references-a-commit-is-pushed-to +type CommitRef struct { + Type string `json:"type"` + Name string `json:"name"` +} + +// GetCommitRefsOptions represents the available GetCommitRefs() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-references-a-commit-is-pushed-to +type GetCommitRefsOptions struct { + ListOptions + Type *string `url:"type,omitempty" json:"type,omitempty"` +} + +// GetCommitRefs gets all references (from branches or tags) a commit is pushed to +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-references-a-commit-is-pushed-to +func (s *CommitsService) GetCommitRefs(pid interface{}, sha string, opt *GetCommitRefsOptions, options ...RequestOptionFunc) ([]*CommitRef, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/refs", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var cs []*CommitRef + resp, err := s.client.Do(req, &cs) + if err != nil { + return nil, resp, err + } + + return cs, resp, nil +} + +// GetCommitOptions represents the available GetCommit() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit +type GetCommitOptions struct { + Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` +} + +// GetCommit gets a specific commit identified by the commit hash or name of a +// branch or tag. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit +func (s *CommitsService) GetCommit(pid interface{}, sha string, opt *GetCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + if sha == "" { + return nil, nil, fmt.Errorf("SHA must be a non-empty string") + } + u := fmt.Sprintf("projects/%s/repository/commits/%s", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(Commit) + resp, err := s.client.Do(req, c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// CreateCommitOptions represents the available options for a new commit. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions +type CreateCommitOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` + StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` + StartSHA *string `url:"start_sha,omitempty" json:"start_sha,omitempty"` + StartProject *string `url:"start_project,omitempty" json:"start_project,omitempty"` + Actions []*CommitActionOptions `url:"actions" json:"actions"` + AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` + AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` + Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` + Force *bool `url:"force,omitempty" json:"force,omitempty"` +} + +// CommitActionOptions represents the available options for a new single +// file action. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions +type CommitActionOptions struct { + Action *FileActionValue `url:"action,omitempty" json:"action,omitempty"` + FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"` + PreviousPath *string `url:"previous_path,omitempty" json:"previous_path,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"` + LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"` + ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` +} + +// CreateCommit creates a commit with multiple files and actions. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions +func (s *CommitsService) CreateCommit(pid interface{}, opt *CreateCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(Commit) + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// Diff represents a GitLab diff. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html +type Diff struct { + Diff string `json:"diff"` + NewPath string `json:"new_path"` + OldPath string `json:"old_path"` + AMode string `json:"a_mode"` + BMode string `json:"b_mode"` + NewFile bool `json:"new_file"` + RenamedFile bool `json:"renamed_file"` + DeletedFile bool `json:"deleted_file"` +} + +func (d Diff) String() string { + return Stringify(d) +} + +// GetCommitDiffOptions represents the available GetCommitDiff() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-the-diff-of-a-commit +type GetCommitDiffOptions struct { + ListOptions + Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` +} + +// GetCommitDiff gets the diff of a commit in a project.. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-the-diff-of-a-commit +func (s *CommitsService) GetCommitDiff(pid interface{}, sha string, opt *GetCommitDiffOptions, options ...RequestOptionFunc) ([]*Diff, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/diff", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var d []*Diff + resp, err := s.client.Do(req, &d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CommitComment represents a GitLab commit comment. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html +type CommitComment struct { + Note string `json:"note"` + Path string `json:"path"` + Line int `json:"line"` + LineType string `json:"line_type"` + Author Author `json:"author"` +} + +// Author represents a GitLab commit author +type Author struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + Blocked bool `json:"blocked"` + CreatedAt *time.Time `json:"created_at"` +} + +func (c CommitComment) String() string { + return Stringify(c) +} + +// GetCommitCommentsOptions represents the available GetCommitComments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-the-comments-of-a-commit +type GetCommitCommentsOptions ListOptions + +// GetCommitComments gets the comments of a commit in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-the-comments-of-a-commit +func (s *CommitsService) GetCommitComments(pid interface{}, sha string, opt *GetCommitCommentsOptions, options ...RequestOptionFunc) ([]*CommitComment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/comments", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var c []*CommitComment + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// PostCommitCommentOptions represents the available PostCommitComment() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#post-comment-to-commit +type PostCommitCommentOptions struct { + Note *string `url:"note,omitempty" json:"note,omitempty"` + Path *string `url:"path" json:"path"` + Line *int `url:"line" json:"line"` + LineType *string `url:"line_type" json:"line_type"` +} + +// PostCommitComment adds a comment to a commit. Optionally you can post +// comments on a specific line of a commit. Therefor both path, line_new and +// line_old are required. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#post-comment-to-commit +func (s *CommitsService) PostCommitComment(pid interface{}, sha string, opt *PostCommitCommentOptions, options ...RequestOptionFunc) (*CommitComment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/comments", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(CommitComment) + resp, err := s.client.Do(req, c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// GetCommitStatusesOptions represents the available GetCommitStatuses() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-the-statuses-of-a-commit +type GetCommitStatusesOptions struct { + ListOptions + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Stage *string `url:"stage,omitempty" json:"stage,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + All *bool `url:"all,omitempty" json:"all,omitempty"` +} + +// CommitStatus represents a GitLab commit status. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#commit-status +type CommitStatus struct { + ID int `json:"id"` + SHA string `json:"sha"` + Ref string `json:"ref"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + Name string `json:"name"` + AllowFailure bool `json:"allow_failure"` + Coverage float64 `json:"coverage"` + PipelineId int `json:"pipeline_id"` + Author Author `json:"author"` + Description string `json:"description"` + TargetURL string `json:"target_url"` +} + +// GetCommitStatuses gets the statuses of a commit in a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-the-statuses-of-a-commit +func (s *CommitsService) GetCommitStatuses(pid interface{}, sha string, opt *GetCommitStatusesOptions, options ...RequestOptionFunc) ([]*CommitStatus, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/statuses", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var cs []*CommitStatus + resp, err := s.client.Do(req, &cs) + if err != nil { + return nil, resp, err + } + + return cs, resp, nil +} + +// SetCommitStatusOptions represents the available SetCommitStatus() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#set-the-pipeline-status-of-a-commit +type SetCommitStatusOptions struct { + State BuildStateValue `url:"state" json:"state"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Context *string `url:"context,omitempty" json:"context,omitempty"` + TargetURL *string `url:"target_url,omitempty" json:"target_url,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Coverage *float64 `url:"coverage,omitempty" json:"coverage,omitempty"` + PipelineID *int `url:"pipeline_id,omitempty" json:"pipeline_id,omitempty"` +} + +// SetCommitStatus sets the status of a commit in a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#set-the-pipeline-status-of-a-commit +func (s *CommitsService) SetCommitStatus(pid interface{}, sha string, opt *SetCommitStatusOptions, options ...RequestOptionFunc) (*CommitStatus, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/statuses/%s", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + cs := new(CommitStatus) + resp, err := s.client.Do(req, &cs) + if err != nil { + return nil, resp, err + } + + return cs, resp, nil +} + +// ListMergeRequestsByCommit gets merge request associated with a commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#list-merge-requests-associated-with-a-commit +func (s *CommitsService) ListMergeRequestsByCommit(pid interface{}, sha string, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/merge_requests", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var mrs []*MergeRequest + resp, err := s.client.Do(req, &mrs) + if err != nil { + return nil, resp, err + } + + return mrs, resp, nil +} + +// CherryPickCommitOptions represents the available CherryPickCommit() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#cherry-pick-a-commit +type CherryPickCommitOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` + Message *string `url:"message,omitempty" json:"message,omitempty"` +} + +// CherryPickCommit cherry picks a commit to a given branch. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#cherry-pick-a-commit +func (s *CommitsService) CherryPickCommit(pid interface{}, sha string, opt *CherryPickCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/cherry_pick", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(Commit) + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// RevertCommitOptions represents the available RevertCommit() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#revert-a-commit +type RevertCommitOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` +} + +// RevertCommit reverts a commit in a given branch. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#revert-a-commit +func (s *CommitsService) RevertCommit(pid interface{}, sha string, opt *RevertCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/revert", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(Commit) + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// GPGSignature represents a Gitlab commit's GPG Signature. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#get-gpg-signature-of-a-commit +type GPGSignature struct { + KeyID int `json:"gpg_key_id"` + KeyPrimaryKeyID string `json:"gpg_key_primary_keyid"` + KeyUserName string `json:"gpg_key_user_name"` + KeyUserEmail string `json:"gpg_key_user_email"` + VerificationStatus string `json:"verification_status"` + KeySubkeyID int `json:"gpg_key_subkey_id"` +} + +// GetGPGSignature gets a GPG signature of a commit. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#get-gpg-signature-of-a-commit +func (s *CommitsService) GetGPGSignature(pid interface{}, sha string, options ...RequestOptionFunc) (*GPGSignature, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/signature", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + sig := new(GPGSignature) + resp, err := s.client.Do(req, &sig) + if err != nil { + return nil, resp, err + } + + return sig, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/container_registry.go b/vendor/github.com/xanzy/go-gitlab/container_registry.go new file mode 100644 index 0000000000..bec477df65 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/container_registry.go @@ -0,0 +1,311 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ContainerRegistryService handles communication with the container registry +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/container_registry.html +type ContainerRegistryService struct { + client *Client +} + +// RegistryRepository represents a GitLab content registry repository. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/container_registry.html +type RegistryRepository struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + ProjectID int `json:"project_id"` + Location string `json:"location"` + CreatedAt *time.Time `json:"created_at"` + CleanupPolicyStartedAt *time.Time `json:"cleanup_policy_started_at"` + Status *ContainerRegistryStatus `json:"status"` + TagsCount int `json:"tags_count"` + Tags []*RegistryRepositoryTag `json:"tags"` +} + +func (s RegistryRepository) String() string { + return Stringify(s) +} + +// RegistryRepositoryTag represents a GitLab registry image tag. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/container_registry.html +type RegistryRepositoryTag struct { + Name string `json:"name"` + Path string `json:"path"` + Location string `json:"location"` + Revision string `json:"revision"` + ShortRevision string `json:"short_revision"` + Digest string `json:"digest"` + CreatedAt *time.Time `json:"created_at"` + TotalSize int `json:"total_size"` +} + +func (s RegistryRepositoryTag) String() string { + return Stringify(s) +} + +// ListRegistryRepositoriesOptions represents the available +// ListRegistryRepositories() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repositories +type ListRegistryRepositoriesOptions struct { + ListOptions + + // Deprecated: These options are deprecated for ListGroupRegistryRepositories calls. (Removed in GitLab 15.0) + Tags *bool `url:"tags,omitempty" json:"tags,omitempty"` + TagsCount *bool `url:"tags_count,omitempty" json:"tags_count,omitempty"` +} + +// ListProjectRegistryRepositories gets a list of registry repositories in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#within-a-project +func (s *ContainerRegistryService) ListProjectRegistryRepositories(pid interface{}, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/registry/repositories", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var repos []*RegistryRepository + resp, err := s.client.Do(req, &repos) + if err != nil { + return nil, resp, err + } + + return repos, resp, nil +} + +// ListGroupRegistryRepositories gets a list of registry repositories in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#within-a-group +func (s *ContainerRegistryService) ListGroupRegistryRepositories(gid interface{}, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/registry/repositories", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var repos []*RegistryRepository + resp, err := s.client.Do(req, &repos) + if err != nil { + return nil, resp, err + } + + return repos, resp, nil +} + +// GetSingleRegistryRepositoryOptions represents the available +// GetSingleRegistryRepository() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#get-details-of-a-single-repository +type GetSingleRegistryRepositoryOptions struct { + Tags *bool `url:"tags,omitempty" json:"tags,omitempty"` + TagsCount *bool `url:"tags_count,omitempty" json:"tags_count,omitempty"` +} + +// GetSingleRegistryRepository gets the details of single registry repository. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#get-details-of-a-single-repository +func (s *ContainerRegistryService) GetSingleRegistryRepository(pid interface{}, opt *GetSingleRegistryRepositoryOptions, options ...RequestOptionFunc) (*RegistryRepository, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("registry/repositories/%s", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + repo := new(RegistryRepository) + resp, err := s.client.Do(req, repo) + if err != nil { + return nil, resp, err + } + + return repo, resp, nil +} + +// DeleteRegistryRepository deletes a repository in a registry. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#delete-registry-repository +func (s *ContainerRegistryService) DeleteRegistryRepository(pid interface{}, repository int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/registry/repositories/%d", PathEscape(project), repository) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListRegistryRepositoryTagsOptions represents the available +// ListRegistryRepositoryTags() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repository-tags +type ListRegistryRepositoryTagsOptions ListOptions + +// ListRegistryRepositoryTags gets a list of tags for given registry repository. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repository-tags +func (s *ContainerRegistryService) ListRegistryRepositoryTags(pid interface{}, repository int, opt *ListRegistryRepositoryTagsOptions, options ...RequestOptionFunc) ([]*RegistryRepositoryTag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags", + PathEscape(project), + repository, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var tags []*RegistryRepositoryTag + resp, err := s.client.Do(req, &tags) + if err != nil { + return nil, resp, err + } + + return tags, resp, nil +} + +// GetRegistryRepositoryTagDetail get details of a registry repository tag +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#get-details-of-a-registry-repository-tag +func (s *ContainerRegistryService) GetRegistryRepositoryTagDetail(pid interface{}, repository int, tagName string, options ...RequestOptionFunc) (*RegistryRepositoryTag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags/%s", + PathEscape(project), + repository, + tagName, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + tag := new(RegistryRepositoryTag) + resp, err := s.client.Do(req, &tag) + if err != nil { + return nil, resp, err + } + + return tag, resp, nil +} + +// DeleteRegistryRepositoryTag deletes a registry repository tag. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#delete-a-registry-repository-tag +func (s *ContainerRegistryService) DeleteRegistryRepositoryTag(pid interface{}, repository int, tagName string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags/%s", + PathEscape(project), + repository, + tagName, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteRegistryRepositoryTagsOptions represents the available +// DeleteRegistryRepositoryTags() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#delete-registry-repository-tags-in-bulk +type DeleteRegistryRepositoryTagsOptions struct { + NameRegexpDelete *string `url:"name_regex_delete,omitempty" json:"name_regex_delete,omitempty"` + NameRegexpKeep *string `url:"name_regex_keep,omitempty" json:"name_regex_keep,omitempty"` + KeepN *int `url:"keep_n,omitempty" json:"keep_n,omitempty"` + OlderThan *string `url:"older_than,omitempty" json:"older_than,omitempty"` + + // Deprecated: NameRegexp is deprecated in favor of NameRegexpDelete. + NameRegexp *string `url:"name_regex,omitempty" json:"name_regex,omitempty"` +} + +// DeleteRegistryRepositoryTags deletes repository tags in bulk based on +// given criteria. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#delete-registry-repository-tags-in-bulk +func (s *ContainerRegistryService) DeleteRegistryRepositoryTags(pid interface{}, repository int, opt *DeleteRegistryRepositoryTagsOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags", + PathEscape(project), + repository, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/custom_attributes.go b/vendor/github.com/xanzy/go-gitlab/custom_attributes.go new file mode 100644 index 0000000000..244059db7c --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/custom_attributes.go @@ -0,0 +1,188 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// CustomAttributesService handles communication with the group, project and +// user custom attributes related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/custom_attributes.html +type CustomAttributesService struct { + client *Client +} + +// CustomAttribute struct is used to unmarshal response to api calls. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/custom_attributes.html +type CustomAttribute struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// ListCustomUserAttributes lists the custom attributes of the specified user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#list-custom-attributes +func (s *CustomAttributesService) ListCustomUserAttributes(user int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { + return s.listCustomAttributes("users", user, options...) +} + +// ListCustomGroupAttributes lists the custom attributes of the specified group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#list-custom-attributes +func (s *CustomAttributesService) ListCustomGroupAttributes(group int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { + return s.listCustomAttributes("groups", group, options...) +} + +// ListCustomProjectAttributes lists the custom attributes of the specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#list-custom-attributes +func (s *CustomAttributesService) ListCustomProjectAttributes(project int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { + return s.listCustomAttributes("projects", project, options...) +} + +func (s *CustomAttributesService) listCustomAttributes(resource string, id int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { + u := fmt.Sprintf("%s/%d/custom_attributes", resource, id) + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var cas []*CustomAttribute + resp, err := s.client.Do(req, &cas) + if err != nil { + return nil, resp, err + } + return cas, resp, nil +} + +// GetCustomUserAttribute returns the user attribute with a speciifc key. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#single-custom-attribute +func (s *CustomAttributesService) GetCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + return s.getCustomAttribute("users", user, key, options...) +} + +// GetCustomGroupAttribute returns the group attribute with a speciifc key. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#single-custom-attribute +func (s *CustomAttributesService) GetCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + return s.getCustomAttribute("groups", group, key, options...) +} + +// GetCustomProjectAttribute returns the project attribute with a speciifc key. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#single-custom-attribute +func (s *CustomAttributesService) GetCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + return s.getCustomAttribute("projects", project, key, options...) +} + +func (s *CustomAttributesService) getCustomAttribute(resource string, id int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, key) + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var ca *CustomAttribute + resp, err := s.client.Do(req, &ca) + if err != nil { + return nil, resp, err + } + return ca, resp, nil +} + +// SetCustomUserAttribute sets the custom attributes of the specified user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#set-custom-attribute +func (s *CustomAttributesService) SetCustomUserAttribute(user int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + return s.setCustomAttribute("users", user, c, options...) +} + +// SetCustomGroupAttribute sets the custom attributes of the specified group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#set-custom-attribute +func (s *CustomAttributesService) SetCustomGroupAttribute(group int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + return s.setCustomAttribute("groups", group, c, options...) +} + +// SetCustomProjectAttribute sets the custom attributes of the specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#set-custom-attribute +func (s *CustomAttributesService) SetCustomProjectAttribute(project int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + return s.setCustomAttribute("projects", project, c, options...) +} + +func (s *CustomAttributesService) setCustomAttribute(resource string, id int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { + u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, c.Key) + req, err := s.client.NewRequest(http.MethodPut, u, c, options) + if err != nil { + return nil, nil, err + } + + ca := new(CustomAttribute) + resp, err := s.client.Do(req, ca) + if err != nil { + return nil, resp, err + } + return ca, resp, nil +} + +// DeleteCustomUserAttribute removes the custom attribute of the specified user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#delete-custom-attribute +func (s *CustomAttributesService) DeleteCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*Response, error) { + return s.deleteCustomAttribute("users", user, key, options...) +} + +// DeleteCustomGroupAttribute removes the custom attribute of the specified group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#delete-custom-attribute +func (s *CustomAttributesService) DeleteCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*Response, error) { + return s.deleteCustomAttribute("groups", group, key, options...) +} + +// DeleteCustomProjectAttribute removes the custom attribute of the specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/custom_attributes.html#delete-custom-attribute +func (s *CustomAttributesService) DeleteCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*Response, error) { + return s.deleteCustomAttribute("projects", project, key, options...) +} + +func (s *CustomAttributesService) deleteCustomAttribute(resource string, id int, key string, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, key) + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go new file mode 100644 index 0000000000..e343bef980 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go @@ -0,0 +1,275 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// DeployKeysService handles communication with the keys related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/deploy_keys.html +type DeployKeysService struct { + client *Client +} + +// InstanceDeployKey represents a GitLab deploy key with the associated +// projects it has write access to. +type InstanceDeployKey struct { + ID int `json:"id"` + Title string `json:"title"` + CreatedAt *time.Time `json:"created_at"` + Key string `json:"key"` + Fingerprint string `json:"fingerprint"` + ProjectsWithWriteAccess []*DeployKeyProject `json:"projects_with_write_access"` +} + +func (k InstanceDeployKey) String() string { + return Stringify(k) +} + +// DeployKeyProject refers to a project an InstanceDeployKey has write access to. +type DeployKeyProject struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` +} + +func (k DeployKeyProject) String() string { + return Stringify(k) +} + +// ProjectDeployKey represents a GitLab project deploy key. +type ProjectDeployKey struct { + ID int `json:"id"` + Title string `json:"title"` + Key string `json:"key"` + CreatedAt *time.Time `json:"created_at"` + CanPush bool `json:"can_push"` +} + +func (k ProjectDeployKey) String() string { + return Stringify(k) +} + +// ListProjectDeployKeysOptions represents the available ListAllDeployKeys() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#list-all-deploy-keys +type ListInstanceDeployKeysOptions struct { + ListOptions + Public *bool `url:"public,omitempty" json:"public,omitempty"` +} + +// ListAllDeployKeys gets a list of all deploy keys +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#list-all-deploy-keys +func (s *DeployKeysService) ListAllDeployKeys(opt *ListInstanceDeployKeysOptions, options ...RequestOptionFunc) ([]*InstanceDeployKey, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "deploy_keys", opt, options) + if err != nil { + return nil, nil, err + } + + var ks []*InstanceDeployKey + resp, err := s.client.Do(req, &ks) + if err != nil { + return nil, resp, err + } + + return ks, resp, nil +} + +// ListProjectDeployKeysOptions represents the available ListProjectDeployKeys() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#list-deploy-keys-for-project +type ListProjectDeployKeysOptions ListOptions + +// ListProjectDeployKeys gets a list of a project's deploy keys +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#list-deploy-keys-for-project +func (s *DeployKeysService) ListProjectDeployKeys(pid interface{}, opt *ListProjectDeployKeysOptions, options ...RequestOptionFunc) ([]*ProjectDeployKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_keys", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ks []*ProjectDeployKey + resp, err := s.client.Do(req, &ks) + if err != nil { + return nil, resp, err + } + + return ks, resp, nil +} + +// GetDeployKey gets a single deploy key. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#get-a-single-deploy-key +func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(ProjectDeployKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// AddDeployKeyOptions represents the available ADDDeployKey() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key +type AddDeployKeyOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` +} + +// AddDeployKey creates a new deploy key for a project. If deploy key already +// exists in another project - it will be joined to project but only if +// original one was is accessible by same user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key +func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_keys", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + k := new(ProjectDeployKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// DeleteDeployKey deletes a deploy key from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#delete-deploy-key +func (s *DeployKeysService) DeleteDeployKey(pid interface{}, deployKey int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// EnableDeployKey enables a deploy key. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#enable-a-deploy-key +func (s *DeployKeysService) EnableDeployKey(pid interface{}, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_keys/%d/enable", PathEscape(project), deployKey) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(ProjectDeployKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// UpdateDeployKeyOptions represents the available UpdateDeployKey() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#update-deploy-key +type UpdateDeployKeyOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` +} + +// UpdateDeployKey updates a deploy key for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_keys.html#update-deploy-key +func (s *DeployKeysService) UpdateDeployKey(pid interface{}, deployKey int, opt *UpdateDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + k := new(ProjectDeployKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go b/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go new file mode 100644 index 0000000000..f744b0ec90 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go @@ -0,0 +1,290 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// DeployTokensService handles communication with the deploy tokens related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/deploy_tokens.html +type DeployTokensService struct { + client *Client +} + +// DeployToken represents a GitLab deploy token. +type DeployToken struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + ExpiresAt *time.Time `json:"expires_at"` + Revoked bool `json:"revoked"` + Expired bool `json:"expired"` + Token string `json:"token,omitempty"` + Scopes []string `json:"scopes"` +} + +func (k DeployToken) String() string { + return Stringify(k) +} + +// ListAllDeployTokens gets a list of all deploy tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-all-deploy-tokens +func (s *DeployTokensService) ListAllDeployTokens(options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "deploy_tokens", nil, options) + if err != nil { + return nil, nil, err + } + + var ts []*DeployToken + resp, err := s.client.Do(req, &ts) + if err != nil { + return nil, resp, err + } + + return ts, resp, nil +} + +// ListProjectDeployTokensOptions represents the available ListProjectDeployTokens() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-project-deploy-tokens +type ListProjectDeployTokensOptions ListOptions + +// ListProjectDeployTokens gets a list of a project's deploy tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-project-deploy-tokens +func (s *DeployTokensService) ListProjectDeployTokens(pid interface{}, opt *ListProjectDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_tokens", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ts []*DeployToken + resp, err := s.client.Do(req, &ts) + if err != nil { + return nil, resp, err + } + + return ts, resp, nil +} + +// GetProjectDeployToken gets a single deploy token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#get-a-project-deploy-token +func (s *DeployTokensService) GetProjectDeployToken(pid interface{}, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_tokens/%d", PathEscape(project), deployToken) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(DeployToken) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// CreateProjectDeployTokenOptions represents the available CreateProjectDeployToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-project-deploy-token +type CreateProjectDeployTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` +} + +// CreateProjectDeployToken creates a new deploy token for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-project-deploy-token +func (s *DeployTokensService) CreateProjectDeployToken(pid interface{}, opt *CreateProjectDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deploy_tokens", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(DeployToken) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// DeleteProjectDeployToken removes a deploy token from the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#delete-a-project-deploy-token +func (s *DeployTokensService) DeleteProjectDeployToken(pid interface{}, deployToken int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/deploy_tokens/%d", PathEscape(project), deployToken) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListGroupDeployTokensOptions represents the available ListGroupDeployTokens() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-group-deploy-tokens +type ListGroupDeployTokensOptions ListOptions + +// ListGroupDeployTokens gets a list of a group’s deploy tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-group-deploy-tokens +func (s *DeployTokensService) ListGroupDeployTokens(gid interface{}, opt *ListGroupDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/deploy_tokens", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ts []*DeployToken + resp, err := s.client.Do(req, &ts) + if err != nil { + return nil, resp, err + } + + return ts, resp, nil +} + +// GetGroupDeployToken gets a single deploy token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#get-a-group-deploy-token +func (s *DeployTokensService) GetGroupDeployToken(gid interface{}, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/deploy_tokens/%d", PathEscape(group), deployToken) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(DeployToken) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// CreateGroupDeployTokenOptions represents the available CreateGroupDeployToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-group-deploy-token +type CreateGroupDeployTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` +} + +// CreateGroupDeployToken creates a new deploy token for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-group-deploy-token +func (s *DeployTokensService) CreateGroupDeployToken(gid interface{}, opt *CreateGroupDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/deploy_tokens", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(DeployToken) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// DeleteGroupDeployToken removes a deploy token from the group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deploy_tokens.html#delete-a-group-deploy-token +func (s *DeployTokensService) DeleteGroupDeployToken(gid interface{}, deployToken int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/deploy_tokens/%d", PathEscape(group), deployToken) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/deployments.go b/vendor/github.com/xanzy/go-gitlab/deployments.go new file mode 100644 index 0000000000..05301acfc8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/deployments.go @@ -0,0 +1,260 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// DeploymentsService handles communication with the deployment related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html +type DeploymentsService struct { + client *Client +} + +// Deployment represents the Gitlab deployment +type Deployment struct { + ID int `json:"id"` + IID int `json:"iid"` + Ref string `json:"ref"` + SHA string `json:"sha"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + User *ProjectUser `json:"user"` + Environment *Environment `json:"environment"` + Deployable struct { + ID int `json:"id"` + Status string `json:"status"` + Stage string `json:"stage"` + Name string `json:"name"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + Coverage float64 `json:"coverage"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + Duration float64 `json:"duration"` + User *User `json:"user"` + Commit *Commit `json:"commit"` + Pipeline struct { + ID int `json:"id"` + SHA string `json:"sha"` + Ref string `json:"ref"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + } `json:"pipeline"` + Runner *Runner `json:"runner"` + } `json:"deployable"` +} + +// ListProjectDeploymentsOptions represents the available ListProjectDeployments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-project-deployments +type ListProjectDeploymentsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Environment *string `url:"environment,omitempty" json:"environment,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` + + // Only for Gitlab versions less than 14 + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + + // Only for Gitlab 14 or higher + FinishedAfter *time.Time `url:"finished_after,omitempty" json:"finished_after,omitempty"` + FinishedBefore *time.Time `url:"finished_before,omitempty" json:"finished_before,omitempty"` +} + +// ListProjectDeployments gets a list of deployments in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-project-deployments +func (s *DeploymentsService) ListProjectDeployments(pid interface{}, opts *ListProjectDeploymentsOptions, options ...RequestOptionFunc) ([]*Deployment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deployments", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var ds []*Deployment + resp, err := s.client.Do(req, &ds) + if err != nil { + return nil, resp, err + } + + return ds, resp, nil +} + +// GetProjectDeployment get a deployment for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#get-a-specific-deployment +func (s *DeploymentsService) GetProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Deployment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + d := new(Deployment) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CreateProjectDeploymentOptions represents the available +// CreateProjectDeployment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment +type CreateProjectDeploymentOptions struct { + Environment *string `url:"environment,omitempty" json:"environment,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` + Tag *bool `url:"tag,omitempty" json:"tag,omitempty"` + Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` +} + +// CreateProjectDeployment creates a project deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment +func (s *DeploymentsService) CreateProjectDeployment(pid interface{}, opt *CreateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deployments", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Deployment) + resp, err := s.client.Do(req, &d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// UpdateProjectDeploymentOptions represents the available +// UpdateProjectDeployment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment +type UpdateProjectDeploymentOptions struct { + Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` +} + +// UpdateProjectDeployment updates a project deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment +func (s *DeploymentsService) UpdateProjectDeployment(pid interface{}, deployment int, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Deployment) + resp, err := s.client.Do(req, &d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// ApproveOrRejectProjectDeploymentOptions represents the available +// ApproveOrRejectProjectDeployment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#approve-or-reject-a-blocked-deployment +type ApproveOrRejectProjectDeploymentOptions struct { + Status *DeploymentApprovalStatus `url:"status,omitempty" json:"status,omitempty"` + Comment *string `url:"comment,omitempty" json:"comment,omitempty"` + RepresentedAs *string `url:"represented_as,omitempty" json:"represented_as,omitempty"` +} + +// ApproveOrRejectProjectDeployment approve or reject a blocked deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#approve-or-reject-a-blocked-deployment +func (s *DeploymentsService) ApproveOrRejectProjectDeployment(pid interface{}, deployment int, + opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc, +) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d/approval", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteProjectDeployment delete a project deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#delete-a-specific-deployment +func (s *DeploymentsService) DeleteProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go b/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go new file mode 100644 index 0000000000..8417f9177f --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go @@ -0,0 +1,53 @@ +// Copyright 2022, Daniela Filipe Bento +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package gitlab + +import ( + "fmt" + "net/http" +) + +// DeploymentMergeRequestsService handles communication with the deployment's +// merge requests related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-of-merge-requests-associated-with-a-deployment +type DeploymentMergeRequestsService struct { + client *Client +} + +// ListDeploymentMergeRequests get the merge requests associated with deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-of-merge-requests-associated-with-a-deployment +func (s *DeploymentMergeRequestsService) ListDeploymentMergeRequests(pid interface{}, deployment int, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d/merge_requests", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var mrs []*MergeRequest + resp, err := s.client.Do(req, &mrs) + if err != nil { + return nil, resp, err + } + + return mrs, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/discussions.go b/vendor/github.com/xanzy/go-gitlab/discussions.go new file mode 100644 index 0000000000..7f791c585f --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/discussions.go @@ -0,0 +1,1143 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// DiscussionsService handles communication with the discussions related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/discussions.html +type DiscussionsService struct { + client *Client +} + +// Discussion represents a GitLab discussion. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/discussions.html +type Discussion struct { + ID string `json:"id"` + IndividualNote bool `json:"individual_note"` + Notes []*Note `json:"notes"` +} + +func (d Discussion) String() string { + return Stringify(d) +} + +// ListIssueDiscussionsOptions represents the available ListIssueDiscussions() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-issue-discussion-items +type ListIssueDiscussionsOptions ListOptions + +// ListIssueDiscussions gets a list of all discussions for a single +// issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-issue-discussion-items +func (s *DiscussionsService) ListIssueDiscussions(pid interface{}, issue int, opt *ListIssueDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/discussions", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ds []*Discussion + resp, err := s.client.Do(req, &ds) + if err != nil { + return nil, resp, err + } + + return ds, resp, nil +} + +// GetIssueDiscussion returns a single discussion for a specific project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#get-single-issue-discussion-item +func (s *DiscussionsService) GetIssueDiscussion(pid interface{}, issue int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s", + PathEscape(project), + issue, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CreateIssueDiscussionOptions represents the available CreateIssueDiscussion() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-issue-thread +type CreateIssueDiscussionOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// CreateIssueDiscussion creates a new discussion to a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-issue-thread +func (s *DiscussionsService) CreateIssueDiscussion(pid interface{}, issue int, opt *CreateIssueDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/discussions", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// AddIssueDiscussionNoteOptions represents the available AddIssueDiscussionNote() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-issue-thread +type AddIssueDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// AddIssueDiscussionNote creates a new discussion to a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-issue-thread +func (s *DiscussionsService) AddIssueDiscussionNote(pid interface{}, issue int, discussion string, opt *AddIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes", + PathEscape(project), + issue, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateIssueDiscussionNoteOptions represents the available +// UpdateIssueDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-issue-thread-note +type UpdateIssueDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// UpdateIssueDiscussionNote modifies existing discussion of an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-issue-thread-note +func (s *DiscussionsService) UpdateIssueDiscussionNote(pid interface{}, issue int, discussion string, note int, opt *UpdateIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes/%d", + PathEscape(project), + issue, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteIssueDiscussionNote deletes an existing discussion of an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#delete-an-issue-thread-note +func (s *DiscussionsService) DeleteIssueDiscussionNote(pid interface{}, issue int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes/%d", + PathEscape(project), + issue, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListSnippetDiscussionsOptions represents the available ListSnippetDiscussions() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-snippet-discussion-items +type ListSnippetDiscussionsOptions ListOptions + +// ListSnippetDiscussions gets a list of all discussions for a single +// snippet. Snippet discussions are comments users can post to a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-snippet-discussion-items +func (s *DiscussionsService) ListSnippetDiscussions(pid interface{}, snippet int, opt *ListSnippetDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/discussions", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ds []*Discussion + resp, err := s.client.Do(req, &ds) + if err != nil { + return nil, resp, err + } + + return ds, resp, nil +} + +// GetSnippetDiscussion returns a single discussion for a given snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#get-single-snippet-discussion-item +func (s *DiscussionsService) GetSnippetDiscussion(pid interface{}, snippet int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s", + PathEscape(project), + snippet, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CreateSnippetDiscussionOptions represents the available +// CreateSnippetDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-snippet-thread +type CreateSnippetDiscussionOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// CreateSnippetDiscussion creates a new discussion for a single snippet. +// Snippet discussions are comments users can post to a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-snippet-thread +func (s *DiscussionsService) CreateSnippetDiscussion(pid interface{}, snippet int, opt *CreateSnippetDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/discussions", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// AddSnippetDiscussionNoteOptions represents the available +// AddSnippetDiscussionNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-snippet-thread +type AddSnippetDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// AddSnippetDiscussionNote creates a new discussion to a single project +// snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-snippet-thread +func (s *DiscussionsService) AddSnippetDiscussionNote(pid interface{}, snippet int, discussion string, opt *AddSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes", + PathEscape(project), + snippet, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateSnippetDiscussionNoteOptions represents the available +// UpdateSnippetDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-snippet-thread-note +type UpdateSnippetDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// UpdateSnippetDiscussionNote modifies existing discussion of a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-snippet-thread-note +func (s *DiscussionsService) UpdateSnippetDiscussionNote(pid interface{}, snippet int, discussion string, note int, opt *UpdateSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes/%d", + PathEscape(project), + snippet, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteSnippetDiscussionNote deletes an existing discussion of a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#delete-a-snippet-thread-note +func (s *DiscussionsService) DeleteSnippetDiscussionNote(pid interface{}, snippet int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes/%d", + PathEscape(project), + snippet, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListGroupEpicDiscussionsOptions represents the available +// ListEpicDiscussions() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-group-epic-discussion-items +type ListGroupEpicDiscussionsOptions ListOptions + +// ListGroupEpicDiscussions gets a list of all discussions for a single +// epic. Epic discussions are comments users can post to a epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-group-epic-discussion-items +func (s *DiscussionsService) ListGroupEpicDiscussions(gid interface{}, epic int, opt *ListGroupEpicDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/discussions", + PathEscape(group), + epic, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ds []*Discussion + resp, err := s.client.Do(req, &ds) + if err != nil { + return nil, resp, err + } + + return ds, resp, nil +} + +// GetEpicDiscussion returns a single discussion for a given epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#get-single-epic-discussion-item +func (s *DiscussionsService) GetEpicDiscussion(gid interface{}, epic int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s", + PathEscape(group), + epic, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CreateEpicDiscussionOptions represents the available CreateEpicDiscussion() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-epic-thread +type CreateEpicDiscussionOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// CreateEpicDiscussion creates a new discussion for a single epic. Epic +// discussions are comments users can post to a epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-epic-thread +func (s *DiscussionsService) CreateEpicDiscussion(gid interface{}, epic int, opt *CreateEpicDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/discussions", + PathEscape(group), + epic, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// AddEpicDiscussionNoteOptions represents the available +// AddEpicDiscussionNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-epic-thread +type AddEpicDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// AddEpicDiscussionNote creates a new discussion to a single project epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-epic-thread +func (s *DiscussionsService) AddEpicDiscussionNote(gid interface{}, epic int, discussion string, opt *AddEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes", + PathEscape(group), + epic, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateEpicDiscussionNoteOptions represents the available UpdateEpicDiscussion() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-epic-thread-note +type UpdateEpicDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// UpdateEpicDiscussionNote modifies existing discussion of a epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-epic-thread-note +func (s *DiscussionsService) UpdateEpicDiscussionNote(gid interface{}, epic int, discussion string, note int, opt *UpdateEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes/%d", + PathEscape(group), + epic, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteEpicDiscussionNote deletes an existing discussion of a epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#delete-an-epic-thread-note +func (s *DiscussionsService) DeleteEpicDiscussionNote(gid interface{}, epic int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes/%d", + PathEscape(group), + epic, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListMergeRequestDiscussionsOptions represents the available +// ListMergeRequestDiscussions() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-merge-request-discussion-items +type ListMergeRequestDiscussionsOptions ListOptions + +// ListMergeRequestDiscussions gets a list of all discussions for a single +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-merge-request-discussion-items +func (s *DiscussionsService) ListMergeRequestDiscussions(pid interface{}, mergeRequest int, opt *ListMergeRequestDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions", + PathEscape(project), + mergeRequest, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ds []*Discussion + resp, err := s.client.Do(req, &ds) + if err != nil { + return nil, resp, err + } + + return ds, resp, nil +} + +// GetMergeRequestDiscussion returns a single discussion for a given merge +// request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#get-single-merge-request-discussion-item +func (s *DiscussionsService) GetMergeRequestDiscussion(pid interface{}, mergeRequest int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s", + PathEscape(project), + mergeRequest, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CreateMergeRequestDiscussionOptions represents the available +// CreateMergeRequestDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-merge-request-thread +type CreateMergeRequestDiscussionOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CommitID *string `url:"commit_id,omitempty" json:"commit_id,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` + Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` +} + +// PositionOptions represents the position option of a discussion. +type PositionOptions struct { + BaseSHA *string `url:"base_sha,omitempty" json:"base_sha,omitempty"` + HeadSHA *string `url:"head_sha,omitempty" json:"head_sha,omitempty"` + StartSHA *string `url:"start_sha,omitempty" json:"start_sha,omitempty"` + NewPath *string `url:"new_path,omitempty" json:"new_path,omitempty"` + OldPath *string `url:"old_path,omitempty" json:"old_path,omitempty"` + PositionType *string `url:"position_type,omitempty" json:"position_type"` + NewLine *int `url:"new_line,omitempty" json:"new_line,omitempty"` + OldLine *int `url:"old_line,omitempty" json:"old_line,omitempty"` + LineRange *LineRangeOptions `url:"line_range,omitempty" json:"line_range,omitempty"` + Width *int `url:"width,omitempty" json:"width,omitempty"` + Height *int `url:"height,omitempty" json:"height,omitempty"` + X *float64 `url:"x,omitempty" json:"x,omitempty"` + Y *float64 `url:"y,omitempty" json:"y,omitempty"` +} + +// LineRangeOptions represents the line range option of a discussion. +type LineRangeOptions struct { + Start *LinePositionOptions `url:"start,omitempty" json:"start,omitempty"` + End *LinePositionOptions `url:"end,omitempty" json:"end,omitempty"` +} + +// LinePositionOptions represents the line position option of a discussion. +type LinePositionOptions struct { + LineCode *string `url:"line_code,omitempty" json:"line_code,omitempty"` + Type *string `url:"type,omitempty" json:"type,omitempty"` +} + +// CreateMergeRequestDiscussion creates a new discussion for a single merge +// request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-merge-request-thread +func (s *DiscussionsService) CreateMergeRequestDiscussion(pid interface{}, mergeRequest int, opt *CreateMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions", + PathEscape(project), + mergeRequest, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// ResolveMergeRequestDiscussionOptions represents the available +// ResolveMergeRequestDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#resolve-a-merge-request-thread +type ResolveMergeRequestDiscussionOptions struct { + Resolved *bool `url:"resolved,omitempty" json:"resolved,omitempty"` +} + +// ResolveMergeRequestDiscussion resolves/unresolves whole discussion of a merge +// request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#resolve-a-merge-request-thread +func (s *DiscussionsService) ResolveMergeRequestDiscussion(pid interface{}, mergeRequest int, discussion string, opt *ResolveMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s", + PathEscape(project), + mergeRequest, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// AddMergeRequestDiscussionNoteOptions represents the available +// AddMergeRequestDiscussionNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-merge-request-thread +type AddMergeRequestDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// AddMergeRequestDiscussionNote creates a new discussion to a single project +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-merge-request-thread +func (s *DiscussionsService) AddMergeRequestDiscussionNote(pid interface{}, mergeRequest int, discussion string, opt *AddMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes", + PathEscape(project), + mergeRequest, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateMergeRequestDiscussionNoteOptions represents the available +// UpdateMergeRequestDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-merge-request-thread-note +type UpdateMergeRequestDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` + Resolved *bool `url:"resolved,omitempty" json:"resolved,omitempty"` +} + +// UpdateMergeRequestDiscussionNote modifies existing discussion of a merge +// request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-merge-request-thread-note +func (s *DiscussionsService) UpdateMergeRequestDiscussionNote(pid interface{}, mergeRequest int, discussion string, note int, opt *UpdateMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes/%d", + PathEscape(project), + mergeRequest, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteMergeRequestDiscussionNote deletes an existing discussion of a merge +// request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#delete-a-merge-request-thread-note +func (s *DiscussionsService) DeleteMergeRequestDiscussionNote(pid interface{}, mergeRequest int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes/%d", + PathEscape(project), + mergeRequest, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListCommitDiscussionsOptions represents the available +// ListCommitDiscussions() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-commit-discussion-items +type ListCommitDiscussionsOptions ListOptions + +// ListCommitDiscussions gets a list of all discussions for a single +// commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#list-project-commit-discussion-items +func (s *DiscussionsService) ListCommitDiscussions(pid interface{}, commit string, opt *ListCommitDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions", + PathEscape(project), + commit, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ds []*Discussion + resp, err := s.client.Do(req, &ds) + if err != nil { + return nil, resp, err + } + + return ds, resp, nil +} + +// GetCommitDiscussion returns a single discussion for a specific project +// commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#get-single-commit-discussion-item +func (s *DiscussionsService) GetCommitDiscussion(pid interface{}, commit string, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s", + PathEscape(project), + commit, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// CreateCommitDiscussionOptions represents the available +// CreateCommitDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-commit-thread +type CreateCommitDiscussionOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` + Position *NotePosition `url:"position,omitempty" json:"position,omitempty"` +} + +// CreateCommitDiscussion creates a new discussion to a single project commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#create-new-commit-thread +func (s *DiscussionsService) CreateCommitDiscussion(pid interface{}, commit string, opt *CreateCommitDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions", + PathEscape(project), + commit, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + d := new(Discussion) + resp, err := s.client.Do(req, d) + if err != nil { + return nil, resp, err + } + + return d, resp, nil +} + +// AddCommitDiscussionNoteOptions represents the available +// AddCommitDiscussionNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-commit-thread +type AddCommitDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// AddCommitDiscussionNote creates a new discussion to a single project commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-commit-thread +func (s *DiscussionsService) AddCommitDiscussionNote(pid interface{}, commit string, discussion string, opt *AddCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes", + PathEscape(project), + commit, + discussion, + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateCommitDiscussionNoteOptions represents the available +// UpdateCommitDiscussion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-commit-thread-note +type UpdateCommitDiscussionNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// UpdateCommitDiscussionNote modifies existing discussion of an commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-commit-thread-note +func (s *DiscussionsService) UpdateCommitDiscussionNote(pid interface{}, commit string, discussion string, note int, opt *UpdateCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes/%d", + PathEscape(project), + commit, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteCommitDiscussionNote deletes an existing discussion of an commit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/discussions.html#delete-a-commit-thread-note +func (s *DiscussionsService) DeleteCommitDiscussionNote(pid interface{}, commit string, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes/%d", + PathEscape(project), + commit, + discussion, + note, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go b/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go new file mode 100644 index 0000000000..3608c5a4de --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go @@ -0,0 +1,93 @@ +// +// Copyright 2022, FantasyTeddy +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// DockerfileTemplatesService handles communication with the Dockerfile +// templates related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/dockerfiles.html +type DockerfileTemplatesService struct { + client *Client +} + +// DockerfileTemplate represents a GitLab Dockerfile template. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/dockerfiles.html +type DockerfileTemplate struct { + Name string `json:"name"` + Content string `json:"content"` +} + +// DockerfileTemplateListItem represents a GitLab Dockerfile template from the list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/dockerfiles.html +type DockerfileTemplateListItem struct { + Key string `json:"key"` + Name string `json:"name"` +} + +// ListDockerfileTemplatesOptions represents the available ListAllTemplates() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/dockerfiles.html#list-dockerfile-templates +type ListDockerfileTemplatesOptions ListOptions + +// ListTemplates get a list of available Dockerfile templates. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/dockerfiles.html#list-dockerfile-templates +func (s *DockerfileTemplatesService) ListTemplates(opt *ListDockerfileTemplatesOptions, options ...RequestOptionFunc) ([]*DockerfileTemplateListItem, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "templates/dockerfiles", opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*DockerfileTemplateListItem + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// GetTemplate get a single Dockerfile template. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/dockerfiles.html#single-dockerfile-template +func (s *DockerfileTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*DockerfileTemplate, *Response, error) { + u := fmt.Sprintf("templates/dockerfiles/%s", url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + g := new(DockerfileTemplate) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/dora_metrics.go b/vendor/github.com/xanzy/go-gitlab/dora_metrics.go new file mode 100644 index 0000000000..a2ad418eb1 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/dora_metrics.go @@ -0,0 +1,110 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// DORAMetricsService handles communication with the DORA metrics related methods +// of the GitLab API. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricsService struct { + client *Client +} + +// DORAMetric represents a single DORA metric data point. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetric struct { + Date string `json:"date"` + Value float64 `json:"value"` +} + +// Gets a string representation of a DORAMetric data point +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +func (m DORAMetric) String() string { + return Stringify(m) +} + +// GetDORAMetricsOptions represent the request body options for getting +// DORA metrics +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type GetDORAMetricsOptions struct { + Metric *DORAMetricType `url:"metric,omitempty" json:"metric,omitempty"` + EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` + EnvironmentTiers *[]string `url:"environment_tiers,comma,omitempty" json:"environment_tiers,omitempty"` + Interval *DORAMetricInterval `url:"interval,omitempty" json:"interval,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + + // Deprecated, use environment tiers instead + EnvironmentTier *string `url:"environment_tier,omitempty" json:"environment_tier,omitempty"` +} + +// GetProjectDORAMetrics gets the DORA metrics for a project. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/dora/metrics.html#get-project-level-dora-metrics +func (s *DORAMetricsService) GetProjectDORAMetrics(pid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/dora/metrics", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var metrics []DORAMetric + resp, err := s.client.Do(req, &metrics) + if err != nil { + return nil, resp, err + } + + return metrics, resp, err +} + +// GetGroupDORAMetrics gets the DORA metrics for a group. +// +// GitLab API Docs: +// https://docs.gitlab.com/ee/api/dora/metrics.html#get-group-level-dora-metrics +func (s *DORAMetricsService) GetGroupDORAMetrics(gid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/dora/metrics", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var metrics []DORAMetric + resp, err := s.client.Do(req, &metrics) + if err != nil { + return nil, resp, err + } + + return metrics, resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/draft_notes.go b/vendor/github.com/xanzy/go-gitlab/draft_notes.go new file mode 100644 index 0000000000..376e4d0c86 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/draft_notes.go @@ -0,0 +1,233 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +type DraftNote struct { + ID int `json:"id"` + AuthorID int `json:"author_id"` + MergeRequestID int `json:"merge_request_id"` + ResolveDiscussion bool `json:"resolve_discussion"` + DiscussionID string `json:"discussion_id"` + Note string `json:"note"` + CommitID string `json:"commit_id"` + LineCode string `json:"line_code"` + Position *NotePosition `json:"position"` +} + +// DraftNotesService handles communication with the draft notes related methods +// of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes +type DraftNotesService struct { + client *Client +} + +// ListDraftNotesOptions represents the available ListDraftNotes() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes +type ListDraftNotesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListDraftNotes gets a list of all draft notes for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes +func (s *DraftNotesService) ListDraftNotes(pid interface{}, mergeRequest int, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var n []*DraftNote + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetDraftNote gets a single draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#get-a-single-draft-note +func (s *DraftNotesService) GetDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(DraftNote) + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// CreateDraftNoteOptions represents the available CreateDraftNote() +// options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +type CreateDraftNoteOptions struct { + Note *string `url:"note" json:"note"` + CommitID *string `url:"commit_id,omitempty" json:"commit_id,omitempty"` + InReplyToDiscussionID *string `url:"in_reply_to_discussion_id,omitempty" json:"in_reply_to_discussion_id,omitempty"` + ResolveDiscussion *bool `url:"resolve_discussion,omitempty" json:"resolve_discussion,omitempty"` + Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` +} + +// CreateDraftNote creates a draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +func (s *DraftNotesService) CreateDraftNote(pid interface{}, mergeRequest int, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(DraftNote) + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateDraftNoteOptions represents the available UpdateDraftNote() +// options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +type UpdateDraftNoteOptions struct { + Note *string `url:"note,omitempty" json:"note,omitempty"` + Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` +} + +// UpdateDraftNote updates a draft note for a merge request. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note +func (s *DraftNotesService) UpdateDraftNote(pid interface{}, mergeRequest int, note int, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(DraftNote) + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteDraftNote deletes a single draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#delete-a-draft-note +func (s *DraftNotesService) DeleteDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PublishDraftNote publishes a single draft note for a merge request. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#publish-a-draft-note +func (s *DraftNotesService) PublishDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d/publish", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PublishAllDraftNotes publishes all draft notes for a merge request that belong to the user. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/draft_notes.html#publish-a-draft-note +func (s *DraftNotesService) PublishAllDraftNotes(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/bulk_publish", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/environments.go b/vendor/github.com/xanzy/go-gitlab/environments.go new file mode 100644 index 0000000000..b6d902f86f --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/environments.go @@ -0,0 +1,238 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// EnvironmentsService handles communication with the environment related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/environments.html +type EnvironmentsService struct { + client *Client +} + +// Environment represents a GitLab environment. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/environments.html +type Environment struct { + ID int `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + State string `json:"state"` + Tier string `json:"tier"` + ExternalURL string `json:"external_url"` + Project *Project `json:"project"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + LastDeployment *Deployment `json:"last_deployment"` +} + +func (env Environment) String() string { + return Stringify(env) +} + +// ListEnvironmentsOptions represents the available ListEnvironments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#list-environments +type ListEnvironmentsOptions struct { + ListOptions + Name *string `url:"name,omitempty" json:"name,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + States *string `url:"states,omitempty" json:"states,omitempty"` +} + +// ListEnvironments gets a list of environments from a project, sorted by name +// alphabetically. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#list-environments +func (s *EnvironmentsService) ListEnvironments(pid interface{}, opts *ListEnvironmentsOptions, options ...RequestOptionFunc) ([]*Environment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/environments", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var envs []*Environment + resp, err := s.client.Do(req, &envs) + if err != nil { + return nil, resp, err + } + + return envs, resp, nil +} + +// GetEnvironment gets a specific environment from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#get-a-specific-environment +func (s *EnvironmentsService) GetEnvironment(pid interface{}, environment int, options ...RequestOptionFunc) (*Environment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + env := new(Environment) + resp, err := s.client.Do(req, env) + if err != nil { + return nil, resp, err + } + + return env, resp, nil +} + +// CreateEnvironmentOptions represents the available CreateEnvironment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#create-a-new-environment +type CreateEnvironmentOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` +} + +// CreateEnvironment adds an environment to a project. This is an idempotent +// method and can be called multiple times with the same parameters. Createing +// an environment that is already a environment does not affect the +// existing environmentship. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#create-a-new-environment +func (s *EnvironmentsService) CreateEnvironment(pid interface{}, opt *CreateEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/environments", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + env := new(Environment) + resp, err := s.client.Do(req, env) + if err != nil { + return nil, resp, err + } + + return env, resp, nil +} + +// EditEnvironmentOptions represents the available EditEnvironment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#update-an-existing-environment +type EditEnvironmentOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` +} + +// EditEnvironment updates a project team environment to a specified access level.. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#update-an-existing-environment +func (s *EnvironmentsService) EditEnvironment(pid interface{}, environment int, opt *EditEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + env := new(Environment) + resp, err := s.client.Do(req, env) + if err != nil { + return nil, resp, err + } + + return env, resp, nil +} + +// DeleteEnvironment removes an environment from a project team. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#delete-an-environment +func (s *EnvironmentsService) DeleteEnvironment(pid interface{}, environment int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// StopEnvironmentOptions represents the available StopEnvironment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#stop-an-environment +type StopEnvironmentOptions struct { + Force *bool `url:"force,omitempty" json:"force,omitempty"` +} + +// StopEnvironment stops an environment within a specific project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/environments.html#stop-an-environment +func (s *EnvironmentsService) StopEnvironment(pid interface{}, environmentID int, opt *StopEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/environments/%d/stop", PathEscape(project), environmentID) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + env := new(Environment) + resp, err := s.client.Do(req, env) + if err != nil { + return nil, resp, err + } + + return env, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/epic_issues.go b/vendor/github.com/xanzy/go-gitlab/epic_issues.go new file mode 100644 index 0000000000..545357bd78 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/epic_issues.go @@ -0,0 +1,152 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// EpicIssuesService handles communication with the epic issue related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epic_issues.html +type EpicIssuesService struct { + client *Client +} + +// EpicIssueAssignment contains both the epic and issue objects returned from +// Gitlab with the assignment ID. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epic_issues.html +type EpicIssueAssignment struct { + ID int `json:"id"` + Epic *Epic `json:"epic"` + Issue *Issue `json:"issue"` +} + +// ListEpicIssues get a list of epic issues. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/epic_issues.html#list-issues-for-an-epic +func (s *EpicIssuesService) ListEpicIssues(gid interface{}, epic int, opt *ListOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/issues", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var is []*Issue + resp, err := s.client.Do(req, &is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// AssignEpicIssue assigns an existing issue to an epic. +// +// Gitlab API Docs: +// https://docs.gitlab.com/ee/api/epic_issues.html#assign-an-issue-to-the-epic +func (s *EpicIssuesService) AssignEpicIssue(gid interface{}, epic, issue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, issue) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(EpicIssueAssignment) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// RemoveEpicIssue removes an issue from an epic. +// +// Gitlab API Docs: +// https://docs.gitlab.com/ee/api/epic_issues.html#remove-an-issue-from-the-epic +func (s *EpicIssuesService) RemoveEpicIssue(gid interface{}, epic, epicIssue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, epicIssue) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(EpicIssueAssignment) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// UpdateEpicIsssueAssignmentOptions describes the UpdateEpicIssueAssignment() +// options. +// +// Gitlab API Docs: +// https://docs.gitlab.com/ee/api/epic_issues.html#update-epic---issue-association +type UpdateEpicIsssueAssignmentOptions struct { + *ListOptions + MoveBeforeID *int `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` + MoveAfterID *int `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` +} + +// UpdateEpicIssueAssignment moves an issue before or after another issue in an +// epic issue list. +// +// Gitlab API Docs: +// https://docs.gitlab.com/ee/api/epic_issues.html#update-epic---issue-association +func (s *EpicIssuesService) UpdateEpicIssueAssignment(gid interface{}, epic, epicIssue int, opt *UpdateEpicIsssueAssignmentOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, epicIssue) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + var is []*Issue + resp, err := s.client.Do(req, &is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/epics.go b/vendor/github.com/xanzy/go-gitlab/epics.go new file mode 100644 index 0000000000..684ffb3343 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/epics.go @@ -0,0 +1,275 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// EpicsService handles communication with the epic related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html +type EpicsService struct { + client *Client +} + +// EpicAuthor represents a author of the epic. +type EpicAuthor struct { + ID int `json:"id"` + State string `json:"state"` + WebURL string `json:"web_url"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + Username string `json:"username"` +} + +// Epic represents a GitLab epic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html +type Epic struct { + ID int `json:"id"` + IID int `json:"iid"` + GroupID int `json:"group_id"` + ParentID int `json:"parent_id"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + Confidential bool `json:"confidential"` + WebURL string `json:"web_url"` + Author *EpicAuthor `json:"author"` + StartDate *ISOTime `json:"start_date"` + StartDateIsFixed bool `json:"start_date_is_fixed"` + StartDateFixed *ISOTime `json:"start_date_fixed"` + StartDateFromMilestones *ISOTime `json:"start_date_from_milestones"` + DueDate *ISOTime `json:"due_date"` + DueDateIsFixed bool `json:"due_date_is_fixed"` + DueDateFixed *ISOTime `json:"due_date_fixed"` + DueDateFromMilestones *ISOTime `json:"due_date_from_milestones"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ClosedAt *time.Time `json:"closed_at"` + Labels []string `json:"labels"` + Upvotes int `json:"upvotes"` + Downvotes int `json:"downvotes"` + UserNotesCount int `json:"user_notes_count"` + URL string `json:"url"` +} + +func (e Epic) String() string { + return Stringify(e) +} + +// ListGroupEpicsOptions represents the available ListGroupEpics() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#list-epics-for-a-group +type ListGroupEpicsOptions struct { + ListOptions + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` + IncludeDescendantGroups *bool `url:"include_descendant_groups,omitempty" json:"include_descendant_groups,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` +} + +// ListGroupEpics gets a list of group epics. This function accepts pagination +// parameters page and per_page to return the list of group epics. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#list-epics-for-a-group +func (s *EpicsService) ListGroupEpics(gid interface{}, opt *ListGroupEpicsOptions, options ...RequestOptionFunc) ([]*Epic, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var es []*Epic + resp, err := s.client.Do(req, &es) + if err != nil { + return nil, resp, err + } + + return es, resp, nil +} + +// GetEpic gets a single group epic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#single-epic +func (s *EpicsService) GetEpic(gid interface{}, epic int, options ...RequestOptionFunc) (*Epic, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + e := new(Epic) + resp, err := s.client.Do(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// GetEpicLinks gets all child epics of an epic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epic_links.html +func (s *EpicsService) GetEpicLinks(gid interface{}, epic int, options ...RequestOptionFunc) ([]*Epic, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/epics", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var e []*Epic + resp, err := s.client.Do(req, &e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// CreateEpicOptions represents the available CreateEpic() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#new-epic +type CreateEpicOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` + StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` + StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` + DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` + DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` +} + +// CreateEpic creates a new group epic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#new-epic +func (s *EpicsService) CreateEpic(gid interface{}, opt *CreateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + e := new(Epic) + resp, err := s.client.Do(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// UpdateEpicOptions represents the available UpdateEpic() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic +type UpdateEpicOptions struct { + AddLabels *LabelOptions `url:"add_labels,omitempty" json:"add_labels,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` + DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + RemoveLabels *LabelOptions `url:"remove_labels,omitempty" json:"remove_labels,omitempty"` + StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` + StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` + StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` +} + +// UpdateEpic updates an existing group epic. This function is also used +// to mark an epic as closed. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic +func (s *EpicsService) UpdateEpic(gid interface{}, epic int, opt *UpdateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + e := new(Epic) + resp, err := s.client.Do(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// DeleteEpic deletes a single group epic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#delete-epic +func (s *EpicsService) DeleteEpic(gid interface{}, epic int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/error_tracking.go b/vendor/github.com/xanzy/go-gitlab/error_tracking.go new file mode 100644 index 0000000000..fed334a0f8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/error_tracking.go @@ -0,0 +1,196 @@ +// +// Copyright 2022, Ryan Glab +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ErrorTrackingService handles communication with the error tracking +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/error_tracking.html +type ErrorTrackingService struct { + client *Client +} + +// ErrorTrackingClientKey represents an error tracking client key. +// +// GitLab docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#error-tracking-client-keys +type ErrorTrackingClientKey struct { + ID int `json:"id"` + Active bool `json:"active"` + PublicKey string `json:"public_key"` + SentryDsn string `json:"sentry_dsn"` +} + +func (p ErrorTrackingClientKey) String() string { + return Stringify(p) +} + +// ErrorTrackingSettings represents error tracking settings for a GitLab project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/error_tracking.html#error-tracking-project-settings +type ErrorTrackingSettings struct { + Active bool `json:"active"` + ProjectName string `json:"project_name"` + SentryExternalURL string `json:"sentry_external_url"` + APIURL string `json:"api_url"` + Integrated bool `json:"integrated"` +} + +func (p ErrorTrackingSettings) String() string { + return Stringify(p) +} + +// GetErrorTrackingSettings gets error tracking settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#get-error-tracking-settings +func (s *ErrorTrackingService) GetErrorTrackingSettings(pid interface{}, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ets := new(ErrorTrackingSettings) + resp, err := s.client.Do(req, ets) + if err != nil { + return nil, resp, err + } + + return ets, resp, nil +} + +// EnableDisableErrorTrackingOptions represents the available +// EnableDisableErrorTracking() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#enable-or-disable-the-error-tracking-project-settings +type EnableDisableErrorTrackingOptions struct { + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Integrated *bool `url:"integrated,omitempty" json:"integrated,omitempty"` +} + +// EnableDisableErrorTracking allows you to enable or disable the error tracking +// settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#enable-or-disable-the-error-tracking-project-settings +func (s *ErrorTrackingService) EnableDisableErrorTracking(pid interface{}, opt *EnableDisableErrorTrackingOptions, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) + if err != nil { + return nil, nil, err + } + + ets := new(ErrorTrackingSettings) + resp, err := s.client.Do(req, &ets) + if err != nil { + return nil, resp, err + } + + return ets, resp, nil +} + +// ListClientKeysOptions represents the available ListClientKeys() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#list-project-client-keys +type ListClientKeysOptions ListOptions + +// ListClientKeys lists error tracking project client keys. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#list-project-client-keys +func (s *ErrorTrackingService) ListClientKeys(pid interface{}, opt *ListClientKeysOptions, options ...RequestOptionFunc) ([]*ErrorTrackingClientKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var cks []*ErrorTrackingClientKey + resp, err := s.client.Do(req, &cks) + if err != nil { + return nil, resp, err + } + + return cks, resp, nil +} + +// CreateClientKey creates a new client key for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#create-a-client-key +func (s *ErrorTrackingService) CreateClientKey(pid interface{}, options ...RequestOptionFunc) (*ErrorTrackingClientKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + ck := new(ErrorTrackingClientKey) + resp, err := s.client.Do(req, ck) + if err != nil { + return nil, resp, err + } + + return ck, resp, nil +} + +// DeleteClientKey removes a client key from the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#delete-a-client-key +func (s *ErrorTrackingService) DeleteClientKey(pid interface{}, keyID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/client_keys/%d", PathEscape(project), keyID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/event_parsing.go b/vendor/github.com/xanzy/go-gitlab/event_parsing.go new file mode 100644 index 0000000000..0f474211d3 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/event_parsing.go @@ -0,0 +1,312 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// EventType represents a Gitlab event type. +type EventType string + +// List of available event types. +const ( + EventConfidentialIssue EventType = "Confidential Issue Hook" + EventConfidentialNote EventType = "Confidential Note Hook" + EventTypeBuild EventType = "Build Hook" + EventTypeDeployment EventType = "Deployment Hook" + EventTypeFeatureFlag EventType = "Feature Flag Hook" + EventTypeIssue EventType = "Issue Hook" + EventTypeJob EventType = "Job Hook" + EventTypeMember EventType = "Member Hook" + EventTypeMergeRequest EventType = "Merge Request Hook" + EventTypeNote EventType = "Note Hook" + EventTypePipeline EventType = "Pipeline Hook" + EventTypePush EventType = "Push Hook" + EventTypeRelease EventType = "Release Hook" + EventTypeResourceAccessToken EventType = "Resource Access Token Hook" + EventTypeServiceHook EventType = "Service Hook" + EventTypeSubGroup EventType = "Subgroup Hook" + EventTypeSystemHook EventType = "System Hook" + EventTypeTagPush EventType = "Tag Push Hook" + EventTypeWikiPage EventType = "Wiki Page Hook" +) + +const ( + eventObjectKindPush = "push" + eventObjectKindTagPush = "tag_push" + eventObjectKindMergeRequest = "merge_request" +) + +const ( + noteableTypeCommit = "Commit" + noteableTypeIssue = "Issue" + noteableTypeMergeRequest = "MergeRequest" + noteableTypeSnippet = "Snippet" +) + +type noteEvent struct { + ObjectKind string `json:"object_kind"` + ObjectAttributes struct { + NoteableType string `json:"noteable_type"` + } `json:"object_attributes"` +} + +type serviceEvent struct { + ObjectKind string `json:"object_kind"` +} + +const eventTokenHeader = "X-Gitlab-Token" + +// HookEventToken returns the token for the given request. +func HookEventToken(r *http.Request) string { + return r.Header.Get(eventTokenHeader) +} + +const eventTypeHeader = "X-Gitlab-Event" + +// HookEventType returns the event type for the given request. +func HookEventType(r *http.Request) EventType { + return EventType(r.Header.Get(eventTypeHeader)) +} + +// ParseHook tries to parse both web- and system hooks. +// +// Example usage: +// +// func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// payload, err := ioutil.ReadAll(r.Body) +// if err != nil { ... } +// event, err := gitlab.ParseHook(gitlab.HookEventType(r), payload) +// if err != nil { ... } +// switch event := event.(type) { +// case *gitlab.PushEvent: +// processPushEvent(event) +// case *gitlab.MergeEvent: +// processMergeEvent(event) +// ... +// } +// } +func ParseHook(eventType EventType, payload []byte) (event interface{}, err error) { + switch eventType { + case EventTypeSystemHook: + return ParseSystemhook(payload) + default: + return ParseWebhook(eventType, payload) + } +} + +// ParseSystemhook parses the event payload. For recognized event types, a +// value of the corresponding struct type will be returned. An error will be +// returned for unrecognized event types. +// +// Example usage: +// +// func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// payload, err := ioutil.ReadAll(r.Body) +// if err != nil { ... } +// event, err := gitlab.ParseSystemhook(payload) +// if err != nil { ... } +// switch event := event.(type) { +// case *gitlab.PushSystemEvent: +// processPushSystemEvent(event) +// case *gitlab.MergeSystemEvent: +// processMergeSystemEvent(event) +// ... +// } +// } +func ParseSystemhook(payload []byte) (event interface{}, err error) { + e := &systemHookEvent{} + err = json.Unmarshal(payload, e) + if err != nil { + return nil, err + } + + switch e.EventName { + case eventObjectKindPush: + event = &PushSystemEvent{} + case eventObjectKindTagPush: + event = &TagPushSystemEvent{} + case "repository_update": + event = &RepositoryUpdateSystemEvent{} + case + "project_create", + "project_update", + "project_destroy", + "project_transfer", + "project_rename": + event = &ProjectSystemEvent{} + case + "group_create", + "group_destroy", + "group_rename": + event = &GroupSystemEvent{} + case "key_create", "key_destroy": + event = &KeySystemEvent{} + case + "user_create", + "user_destroy", + "user_rename", + "user_failed_login": + event = &UserSystemEvent{} + case + "user_add_to_group", + "user_remove_from_group", + "user_update_for_group": + event = &UserGroupSystemEvent{} + case + "user_add_to_team", + "user_remove_from_team", + "user_update_for_team": + event = &UserTeamSystemEvent{} + default: + switch e.ObjectKind { + case string(MergeRequestEventTargetType): + event = &MergeEvent{} + default: + return nil, fmt.Errorf("unexpected system hook type %s", e.EventName) + } + } + + if err := json.Unmarshal(payload, event); err != nil { + return nil, err + } + + return event, nil +} + +// WebhookEventType returns the event type for the given request. +func WebhookEventType(r *http.Request) EventType { + return EventType(r.Header.Get(eventTypeHeader)) +} + +// ParseWebhook parses the event payload. For recognized event types, a +// value of the corresponding struct type will be returned. An error will +// be returned for unrecognized event types. +// +// Example usage: +// +// func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// payload, err := ioutil.ReadAll(r.Body) +// if err != nil { ... } +// event, err := gitlab.ParseWebhook(gitlab.HookEventType(r), payload) +// if err != nil { ... } +// switch event := event.(type) { +// case *gitlab.PushEvent: +// processPushEvent(event) +// case *gitlab.MergeEvent: +// processMergeEvent(event) +// ... +// } +// } +func ParseWebhook(eventType EventType, payload []byte) (event interface{}, err error) { + switch eventType { + case EventTypeBuild: + event = &BuildEvent{} + case EventTypeDeployment: + event = &DeploymentEvent{} + case EventTypeFeatureFlag: + event = &FeatureFlagEvent{} + case EventTypeIssue, EventConfidentialIssue: + event = &IssueEvent{} + case EventTypeJob: + event = &JobEvent{} + case EventTypeMember: + event = &MemberEvent{} + case EventTypeMergeRequest: + event = &MergeEvent{} + case EventTypeNote, EventConfidentialNote: + note := ¬eEvent{} + err := json.Unmarshal(payload, note) + if err != nil { + return nil, err + } + + if note.ObjectKind != string(NoteEventTargetType) { + return nil, fmt.Errorf("unexpected object kind %s", note.ObjectKind) + } + + switch note.ObjectAttributes.NoteableType { + case noteableTypeCommit: + event = &CommitCommentEvent{} + case noteableTypeMergeRequest: + event = &MergeCommentEvent{} + case noteableTypeIssue: + event = &IssueCommentEvent{} + case noteableTypeSnippet: + event = &SnippetCommentEvent{} + default: + return nil, fmt.Errorf("unexpected noteable type %s", note.ObjectAttributes.NoteableType) + } + case EventTypePipeline: + event = &PipelineEvent{} + case EventTypePush: + event = &PushEvent{} + case EventTypeRelease: + event = &ReleaseEvent{} + case EventTypeResourceAccessToken: + data := map[string]interface{}{} + err := json.Unmarshal(payload, &data) + if err != nil { + return nil, err + } + + _, groupEvent := data["group"] + _, projectEvent := data["project"] + + switch { + case groupEvent: + event = &GroupResourceAccessTokenEvent{} + case projectEvent: + event = &ProjectResourceAccessTokenEvent{} + default: + return nil, fmt.Errorf("unexpected resource access token payload") + } + case EventTypeServiceHook: + service := &serviceEvent{} + err := json.Unmarshal(payload, service) + if err != nil { + return nil, err + } + switch service.ObjectKind { + case eventObjectKindPush: + event = &PushEvent{} + case eventObjectKindTagPush: + event = &TagEvent{} + case eventObjectKindMergeRequest: + event = &MergeEvent{} + default: + return nil, fmt.Errorf("unexpected service type %s", service.ObjectKind) + } + case EventTypeSubGroup: + event = &SubGroupEvent{} + case EventTypeTagPush: + event = &TagEvent{} + case EventTypeWikiPage: + event = &WikiPageEvent{} + default: + return nil, fmt.Errorf("unexpected event type: %s", eventType) + } + + if err := json.Unmarshal(payload, event); err != nil { + return nil, err + } + + return event, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go new file mode 100644 index 0000000000..a653d15b58 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go @@ -0,0 +1,249 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import "time" + +// systemHookEvent is used to pre-process events to determine the +// system hook event type. +type systemHookEvent struct { + BaseSystemEvent + ObjectKind string `json:"object_kind"` +} + +// BaseSystemEvent contains system hook's common properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type BaseSystemEvent struct { + EventName string `json:"event_name"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +// ProjectSystemEvent represents a project system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type ProjectSystemEvent struct { + BaseSystemEvent + Name string `json:"name"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + ProjectID int `json:"project_id"` + OwnerName string `json:"owner_name"` + OwnerEmail string `json:"owner_email"` + ProjectVisibility string `json:"project_visibility"` + OldPathWithNamespace string `json:"old_path_with_namespace,omitempty"` +} + +// GroupSystemEvent represents a group system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type GroupSystemEvent struct { + BaseSystemEvent + Name string `json:"name"` + Path string `json:"path"` + PathWithNamespace string `json:"full_path"` + GroupID int `json:"group_id"` + OwnerName string `json:"owner_name"` + OwnerEmail string `json:"owner_email"` + ProjectVisibility string `json:"project_visibility"` + OldPath string `json:"old_path,omitempty"` + OldPathWithNamespace string `json:"old_full_path,omitempty"` +} + +// KeySystemEvent represents a key system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type KeySystemEvent struct { + BaseSystemEvent + ID int `json:"id"` + Username string `json:"username"` + Key string `json:"key"` +} + +// UserSystemEvent represents a user system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type UserSystemEvent struct { + BaseSystemEvent + ID int `json:"user_id"` + Name string `json:"name"` + Username string `json:"username"` + OldUsername string `json:"old_username,omitempty"` + Email string `json:"email"` + State string `json:"state,omitempty"` +} + +// UserGroupSystemEvent represents a user group system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type UserGroupSystemEvent struct { + BaseSystemEvent + ID int `json:"user_id"` + Name string `json:"user_name"` + Username string `json:"user_username"` + Email string `json:"user_email"` + GroupID int `json:"group_id"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + GroupAccess string `json:"group_access"` +} + +// UserTeamSystemEvent represents a user team system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html +type UserTeamSystemEvent struct { + BaseSystemEvent + ID int `json:"user_id"` + Name string `json:"user_name"` + Username string `json:"user_username"` + Email string `json:"user_email"` + ProjectID int `json:"project_id"` + ProjectName string `json:"project_name"` + ProjectPath string `json:"project_path"` + ProjectPathWithNamespace string `json:"project_path_with_namespace"` + ProjectVisibility string `json:"project_visibility"` + AccessLevel string `json:"access_level"` +} + +// PushSystemEvent represents a push system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html#push-events +type PushSystemEvent struct { + BaseSystemEvent + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int `json:"project_id"` + Project struct { + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitHTTPURL string `json:"git_http_url"` + GitSSHURL string `json:"git_ssh_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + } `json:"project"` + Commits []struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"commits"` + TotalCommitsCount int `json:"total_commits_count"` +} + +// TagPushSystemEvent represents a tag push system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html#tag-events +type TagPushSystemEvent struct { + BaseSystemEvent + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int `json:"project_id"` + Project struct { + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitHTTPURL string `json:"git_http_url"` + GitSSHURL string `json:"git_ssh_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + } `json:"project"` + Commits []struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"commits"` + TotalCommitsCount int `json:"total_commits_count"` +} + +// RepositoryUpdateSystemEvent represents a repository updated system event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/administration/system_hooks.html#repository-update-events +type RepositoryUpdateSystemEvent struct { + BaseSystemEvent + UserID int `json:"user_id"` + UserName string `json:"user_name"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int `json:"project_id"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitHTTPURL string `json:"git_http_url"` + GitSSHURL string `json:"git_ssh_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CiConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + } `json:"project"` + Changes []struct { + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + } `json:"changes"` + Refs []string `json:"refs"` +} diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go new file mode 100644 index 0000000000..c4a8e4aeb9 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go @@ -0,0 +1,1265 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "strconv" + "time" +) + +// StateID identifies the state of an issue or merge request. +// +// There are no GitLab API docs on the subject, but the mappings can be found in +// GitLab's codebase: +// https://gitlab.com/gitlab-org/gitlab-foss/-/blob/ba5be4989e/app/models/concerns/issuable.rb#L39-42 +type StateID int + +const ( + StateIDNone StateID = 0 + StateIDOpen StateID = 1 + StateIDClosed StateID = 2 + StateIDMerged StateID = 3 + StateIDLocked StateID = 4 +) + +// BuildEvent represents a build event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#job-events +type BuildEvent struct { + ObjectKind string `json:"object_kind"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + BeforeSHA string `json:"before_sha"` + SHA string `json:"sha"` + BuildID int `json:"build_id"` + BuildName string `json:"build_name"` + BuildStage string `json:"build_stage"` + BuildStatus string `json:"build_status"` + BuildCreatedAt string `json:"build_created_at"` + BuildStartedAt string `json:"build_started_at"` + BuildFinishedAt string `json:"build_finished_at"` + BuildDuration float64 `json:"build_duration"` + BuildAllowFailure bool `json:"build_allow_failure"` + ProjectID int `json:"project_id"` + ProjectName string `json:"project_name"` + User *EventUser `json:"user"` + Commit struct { + ID int `json:"id"` + SHA string `json:"sha"` + Message string `json:"message"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + Status string `json:"status"` + Duration int `json:"duration"` + StartedAt string `json:"started_at"` + FinishedAt string `json:"finished_at"` + } `json:"commit"` + Repository *Repository `json:"repository"` +} + +// CommitCommentEvent represents a comment on a commit event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-a-commit +type CommitCommentEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *User `json:"user"` + ProjectID int `json:"project_id"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes struct { + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` + } `json:"object_attributes"` + Commit *struct { + ID string `json:"id"` + Title string `json:"title"` + Message string `json:"message"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"commit"` +} + +// DeploymentEvent represents a deployment event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#deployment-events +type DeploymentEvent struct { + ObjectKind string `json:"object_kind"` + Status string `json:"status"` + StatusChangedAt string `json:"status_changed_at"` + DeploymentID int `json:"deployment_id"` + DeployableID int `json:"deployable_id"` + DeployableURL string `json:"deployable_url"` + Environment string `json:"environment"` + EnvironmentSlug string `json:"environment_slug"` + EnvironmentExternalURL string `json:"environment_external_url"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL *string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + } `json:"project"` + Ref string `json:"ref"` + ShortSHA string `json:"short_sha"` + User *EventUser `json:"user"` + UserURL string `json:"user_url"` + CommitURL string `json:"commit_url"` + CommitTitle string `json:"commit_title"` +} + +// FeatureFlagEvent represents a feature flag event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#feature-flag-events +type FeatureFlagEvent struct { + ObjectKind string `json:"object_kind"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL *string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + } `json:"project"` + User *EventUser `json:"user"` + UserURL string `json:"user_url"` + ObjectAttributes struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Active bool `json:"active"` + } `json:"object_attributes"` +} + +// GroupResourceAccessTokenEvent represents a resource access token event for a +// group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events +type GroupResourceAccessTokenEvent struct { + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Group struct { + GroupID int `json:"group_id"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + } `json:"group"` + ObjectAttributes struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"object_attributes"` +} + +// IssueCommentEvent represents a comment on an issue event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-an-issue +type IssueCommentEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *User `json:"user"` + ProjectID int `json:"project_id"` + Project struct { + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes struct { + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + DiscussionID string `json:"discussion_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff []*Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` + } `json:"object_attributes"` + Issue struct { + ID int `json:"id"` + IID int `json:"iid"` + ProjectID int `json:"project_id"` + MilestoneID int `json:"milestone_id"` + AuthorID int `json:"author_id"` + Position int `json:"position"` + BranchName string `json:"branch_name"` + Description string `json:"description"` + State string `json:"state"` + Title string `json:"title"` + Labels []*EventLabel `json:"labels"` + LastEditedAt string `json:"last_edit_at"` + LastEditedByID int `json:"last_edited_by_id"` + UpdatedAt string `json:"updated_at"` + UpdatedByID int `json:"updated_by_id"` + CreatedAt string `json:"created_at"` + ClosedAt string `json:"closed_at"` + DueDate *ISOTime `json:"due_date"` + URL string `json:"url"` + TimeEstimate int `json:"time_estimate"` + Confidential bool `json:"confidential"` + TotalTimeSpent int `json:"total_time_spent"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + HumanTimeEstimate string `json:"human_time_estimate"` + AssigneeIDs []int `json:"assignee_ids"` + AssigneeID int `json:"assignee_id"` + } `json:"issue"` +} + +// IssueEvent represents a issue event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#issue-events +type IssueEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes struct { + ID int `json:"id"` + Title string `json:"title"` + AssigneeIDs []int `json:"assignee_ids"` + AssigneeID int `json:"assignee_id"` + AuthorID int `json:"author_id"` + ProjectID int `json:"project_id"` + CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) + UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) + UpdatedByID int `json:"updated_by_id"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int `json:"last_edited_by_id"` + RelativePosition int `json:"relative_position"` + BranchName string `json:"branch_name"` + Description string `json:"description"` + MilestoneID int `json:"milestone_id"` + StateID StateID `json:"state_id"` + Confidential bool `json:"confidential"` + DiscussionLocked bool `json:"discussion_locked"` + DueDate *ISOTime `json:"due_date"` + MovedToID int `json:"moved_to_id"` + DuplicatedToID int `json:"duplicated_to_id"` + TimeEstimate int `json:"time_estimate"` + TotalTimeSpent int `json:"total_time_spent"` + TimeChange int `json:"time_change"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + HumanTimeEstimate string `json:"human_time_estimate"` + HumanTimeChange string `json:"human_time_change"` + Weight int `json:"weight"` + IID int `json:"iid"` + URL string `json:"url"` + State string `json:"state"` + Action string `json:"action"` + Severity string `json:"severity"` + EscalationStatus string `json:"escalation_status"` + EscalationPolicy struct { + ID int `json:"id"` + Name string `json:"name"` + } `json:"escalation_policy"` + Labels []*EventLabel `json:"labels"` + } `json:"object_attributes"` + Assignee *EventUser `json:"assignee"` + Assignees *[]EventUser `json:"assignees"` + Labels []*EventLabel `json:"labels"` + Changes struct { + Assignees struct { + Previous []*EventUser `json:"previous"` + Current []*EventUser `json:"current"` + } `json:"assignees"` + Description struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"description"` + Labels struct { + Previous []*EventLabel `json:"previous"` + Current []*EventLabel `json:"current"` + } `json:"labels"` + Title struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"title"` + ClosedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"closed_at"` + StateID struct { + Previous StateID `json:"previous"` + Current StateID `json:"current"` + } `json:"state_id"` + UpdatedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"updated_at"` + UpdatedByID struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"updated_by_id"` + TotalTimeSpent struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"total_time_spent"` + } `json:"changes"` +} + +// JobEvent represents a job event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#job-events +type JobEvent struct { + ObjectKind string `json:"object_kind"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + BeforeSHA string `json:"before_sha"` + SHA string `json:"sha"` + BuildID int `json:"build_id"` + BuildName string `json:"build_name"` + BuildStage string `json:"build_stage"` + BuildStatus string `json:"build_status"` + BuildCreatedAt string `json:"build_created_at"` + BuildStartedAt string `json:"build_started_at"` + BuildFinishedAt string `json:"build_finished_at"` + BuildDuration float64 `json:"build_duration"` + BuildQueuedDuration float64 `json:"build_queued_duration"` + BuildAllowFailure bool `json:"build_allow_failure"` + BuildFailureReason string `json:"build_failure_reason"` + RetriesCount int `json:"retries_count"` + PipelineID int `json:"pipeline_id"` + ProjectID int `json:"project_id"` + ProjectName string `json:"project_name"` + User *EventUser `json:"user"` + Commit struct { + ID int `json:"id"` + Name string `json:"name"` + SHA string `json:"sha"` + Message string `json:"message"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthorURL string `json:"author_url"` + Status string `json:"status"` + Duration int `json:"duration"` + StartedAt string `json:"started_at"` + FinishedAt string `json:"finished_at"` + } `json:"commit"` + Repository *Repository `json:"repository"` + Runner struct { + ID int `json:"id"` + Active bool `json:"active"` + RunnerType string `json:"runner_type"` + IsShared bool `json:"is_shared"` + Description string `json:"description"` + Tags []string `json:"tags"` + } `json:"runner"` + Environment struct { + Name string `json:"name"` + Action string `json:"action"` + DeploymentTier string `json:"deployment_tier"` + } `json:"environment"` +} + +// MemberEvent represents a member event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#group-member-events +type MemberEvent struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + GroupName string `json:"group_name"` + GroupPath string `json:"group_path"` + GroupID int `json:"group_id"` + UserUsername string `json:"user_username"` + UserName string `json:"user_name"` + UserEmail string `json:"user_email"` + UserID int `json:"user_id"` + GroupAccess string `json:"group_access"` + GroupPlan string `json:"group_plan"` + ExpiresAt *time.Time `json:"expires_at"` + EventName string `json:"event_name"` +} + +// MergeCommentEvent represents a comment on a merge event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-a-merge-request +type MergeCommentEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + ProjectID int `json:"project_id"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + ObjectAttributes struct { + Attachment string `json:"attachment"` + AuthorID int `json:"author_id"` + ChangePosition *NotePosition `json:"change_position"` + CommitID string `json:"commit_id"` + CreatedAt string `json:"created_at"` + DiscussionID string `json:"discussion_id"` + ID int `json:"id"` + LineCode string `json:"line_code"` + Note string `json:"note"` + NoteableID int `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + OriginalPosition *NotePosition `json:"original_position"` + Position *NotePosition `json:"position"` + ProjectID int `json:"project_id"` + ResolvedAt string `json:"resolved_at"` + ResolvedByID int `json:"resolved_by_id"` + ResolvedByPush bool `json:"resolved_by_push"` + StDiff *Diff `json:"st_diff"` + System bool `json:"system"` + Type string `json:"type"` + UpdatedAt string `json:"updated_at"` + UpdatedByID int `json:"updated_by_id"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` + } `json:"object_attributes"` + Repository *Repository `json:"repository"` + MergeRequest struct { + ID int `json:"id"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int `json:"source_project_id"` + AuthorID int `json:"author_id"` + AssigneeID int `json:"assignee_id"` + AssigneeIDs []int `json:"assignee_ids"` + Title string `json:"title"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + MilestoneID int `json:"milestone_id"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + TargetProjectID int `json:"target_project_id"` + IID int `json:"iid"` + Description string `json:"description"` + Position int `json:"position"` + Labels []*EventLabel `json:"labels"` + LockedAt string `json:"locked_at"` + UpdatedByID int `json:"updated_by_id"` + MergeError string `json:"merge_error"` + MergeParams *MergeParams `json:"merge_params"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + MergeUserID int `json:"merge_user_id"` + MergeCommitSHA string `json:"merge_commit_sha"` + DeletedAt string `json:"deleted_at"` + InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` + LockVersion int `json:"lock_version"` + ApprovalsBeforeMerge string `json:"approvals_before_merge"` + RebaseCommitSHA string `json:"rebase_commit_sha"` + TimeEstimate int `json:"time_estimate"` + Squash bool `json:"squash"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int `json:"last_edited_by_id"` + Source *Repository `json:"source"` + Target *Repository `json:"target"` + LastCommit struct { + ID string `json:"id"` + Title string `json:"title"` + Message string `json:"message"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"last_commit"` + WorkInProgress bool `json:"work_in_progress"` + TotalTimeSpent int `json:"total_time_spent"` + HeadPipelineID int `json:"head_pipeline_id"` + Assignee *EventUser `json:"assignee"` + DetailedMergeStatus string `json:"detailed_merge_status"` + } `json:"merge_request"` +} + +// MergeEvent represents a merge event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#merge-request-events +type MergeEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + ObjectAttributes struct { + ID int `json:"id"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int `json:"source_project_id"` + AuthorID int `json:"author_id"` + AssigneeID int `json:"assignee_id"` + AssigneeIDs []int `json:"assignee_ids"` + ReviewerIDs []int `json:"reviewer_ids"` + Title string `json:"title"` + CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) + UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) + StCommits []*Commit `json:"st_commits"` + StDiffs []*Diff `json:"st_diffs"` + LastEditedAt string `json:"last_edited_at"` + LastEditedByID int `json:"last_edited_by_id"` + MilestoneID int `json:"milestone_id"` + StateID StateID `json:"state_id"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + TargetProjectID int `json:"target_project_id"` + IID int `json:"iid"` + Description string `json:"description"` + Position int `json:"position"` + LockedAt string `json:"locked_at"` + UpdatedByID int `json:"updated_by_id"` + MergeError string `json:"merge_error"` + MergeParams *MergeParams `json:"merge_params"` + MergeWhenBuildSucceeds bool `json:"merge_when_build_succeeds"` + MergeUserID int `json:"merge_user_id"` + MergeCommitSHA string `json:"merge_commit_sha"` + DeletedAt string `json:"deleted_at"` + ApprovalsBeforeMerge string `json:"approvals_before_merge"` + RebaseCommitSHA string `json:"rebase_commit_sha"` + InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` + LockVersion int `json:"lock_version"` + TimeEstimate int `json:"time_estimate"` + Source *Repository `json:"source"` + Target *Repository `json:"target"` + HeadPipelineID *int `json:"head_pipeline_id"` + LastCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"last_commit"` + BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` + WorkInProgress bool `json:"work_in_progress"` + Draft bool `json:"draft"` + TotalTimeSpent int `json:"total_time_spent"` + TimeChange int `json:"time_change"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + HumanTimeChange string `json:"human_time_change"` + HumanTimeEstimate string `json:"human_time_estimate"` + FirstContribution bool `json:"first_contribution"` + URL string `json:"url"` + Labels []*EventLabel `json:"labels"` + Action string `json:"action"` + DetailedMergeStatus string `json:"detailed_merge_status"` + OldRev string `json:"oldrev"` + } `json:"object_attributes"` + Repository *Repository `json:"repository"` + Labels []*EventLabel `json:"labels"` + Changes struct { + Assignees struct { + Previous []*EventUser `json:"previous"` + Current []*EventUser `json:"current"` + } `json:"assignees"` + Reviewers struct { + Previous []*EventUser `json:"previous"` + Current []*EventUser `json:"current"` + } `json:"reviewers"` + Description struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"description"` + Draft struct { + Previous bool `json:"previous"` + Current bool `json:"current"` + } `json:"draft"` + Labels struct { + Previous []*EventLabel `json:"previous"` + Current []*EventLabel `json:"current"` + } `json:"labels"` + LastEditedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"last_edited_at"` + LastEditedByID struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"last_edited_by_id"` + MilestoneID struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"milestone_id"` + SourceBranch struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"source_branch"` + SourceProjectID struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"source_project_id"` + StateID struct { + Previous StateID `json:"previous"` + Current StateID `json:"current"` + } `json:"state_id"` + TargetBranch struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"target_branch"` + TargetProjectID struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"target_project_id"` + Title struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"title"` + UpdatedAt struct { + Previous string `json:"previous"` + Current string `json:"current"` + } `json:"updated_at"` + UpdatedByID struct { + Previous int `json:"previous"` + Current int `json:"current"` + } `json:"updated_by_id"` + } `json:"changes"` + Assignees []*EventUser `json:"assignees"` + Reviewers []*EventUser `json:"reviewers"` +} + +// EventUser represents a user record in an event and is used as an even +// initiator or a merge assignee. +type EventUser struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + AvatarURL string `json:"avatar_url"` + Email string `json:"email"` +} + +// MergeParams represents the merge params. +type MergeParams struct { + ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` +} + +// UnmarshalJSON decodes the merge parameters +// +// This allows support of ForceRemoveSourceBranch for both type +// bool (>11.9) and string (<11.9) +func (p *MergeParams) UnmarshalJSON(b []byte) error { + type Alias MergeParams + raw := struct { + *Alias + ForceRemoveSourceBranch interface{} `json:"force_remove_source_branch"` + }{ + Alias: (*Alias)(p), + } + + err := json.Unmarshal(b, &raw) + if err != nil { + return err + } + + switch v := raw.ForceRemoveSourceBranch.(type) { + case nil: + // No action needed. + case bool: + p.ForceRemoveSourceBranch = v + case string: + p.ForceRemoveSourceBranch, err = strconv.ParseBool(v) + if err != nil { + return err + } + default: + return fmt.Errorf("failed to unmarshal ForceRemoveSourceBranch of type: %T", v) + } + + return nil +} + +// PipelineEvent represents a pipeline event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#pipeline-events +type PipelineEvent struct { + ObjectKind string `json:"object_kind"` + ObjectAttributes struct { + ID int `json:"id"` + IID int `json:"iid"` + Name string `json:"name"` + Ref string `json:"ref"` + Tag bool `json:"tag"` + SHA string `json:"sha"` + BeforeSHA string `json:"before_sha"` + Source string `json:"source"` + Status string `json:"status"` + DetailedStatus string `json:"detailed_status"` + Stages []string `json:"stages"` + CreatedAt string `json:"created_at"` + FinishedAt string `json:"finished_at"` + Duration int `json:"duration"` + QueuedDuration int `json:"queued_duration"` + URL string `json:"url"` + Variables []struct { + Key string `json:"key"` + Value string `json:"value"` + } `json:"variables"` + } `json:"object_attributes"` + MergeRequest struct { + ID int `json:"id"` + IID int `json:"iid"` + Title string `json:"title"` + SourceBranch string `json:"source_branch"` + SourceProjectID int `json:"source_project_id"` + TargetBranch string `json:"target_branch"` + TargetProjectID int `json:"target_project_id"` + State string `json:"state"` + MergeRequestStatus string `json:"merge_status"` + DetailedMergeStatus string `json:"detailed_merge_status"` + URL string `json:"url"` + } `json:"merge_request"` + User *EventUser `json:"user"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Commit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"commit"` + SourcePipline struct { + Project struct { + ID int `json:"id"` + WebURL string `json:"web_url"` + PathWithNamespace string `json:"path_with_namespace"` + } `json:"project"` + PipelineID int `json:"pipeline_id"` + JobID int `json:"job_id"` + } `json:"source_pipeline"` + Builds []struct { + ID int `json:"id"` + Stage string `json:"stage"` + Name string `json:"name"` + Status string `json:"status"` + CreatedAt string `json:"created_at"` + StartedAt string `json:"started_at"` + FinishedAt string `json:"finished_at"` + Duration float64 `json:"duration"` + QueuedDuration float64 `json:"queued_duration"` + FailureReason string `json:"failure_reason"` + When string `json:"when"` + Manual bool `json:"manual"` + AllowFailure bool `json:"allow_failure"` + User *EventUser `json:"user"` + Runner struct { + ID int `json:"id"` + Description string `json:"description"` + Active bool `json:"active"` + IsShared bool `json:"is_shared"` + RunnerType string `json:"runner_type"` + Tags []string `json:"tags"` + } `json:"runner"` + ArtifactsFile struct { + Filename string `json:"filename"` + Size int `json:"size"` + } `json:"artifacts_file"` + Environment struct { + Name string `json:"name"` + Action string `json:"action"` + DeploymentTier string `json:"deployment_tier"` + } `json:"environment"` + } `json:"builds"` +} + +// ProjectResourceAccessTokenEvent represents a resource access token event for +// a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events +type ProjectResourceAccessTokenEvent struct { + EventName string `json:"event_name"` + ObjectKind string `json:"object_kind"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + } `json:"project"` + ObjectAttributes struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + CreatedAt string `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"object_attributes"` +} + +// PushEvent represents a push event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#push-events +type PushEvent struct { + ObjectKind string `json:"object_kind"` + EventName string `json:"event_name"` + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserEmail string `json:"user_email"` + UserAvatar string `json:"user_avatar"` + ProjectID int `json:"project_id"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Repository *Repository `json:"repository"` + Commits []*struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + Added []string `json:"added"` + Modified []string `json:"modified"` + Removed []string `json:"removed"` + } `json:"commits"` + TotalCommitsCount int `json:"total_commits_count"` +} + +// ReleaseEvent represents a release event +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#release-events +type ReleaseEvent struct { + ID int `json:"id"` + CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) + Description string `json:"description"` + Name string `json:"name"` + Tag string `json:"tag"` + ReleasedAt string `json:"released_at"` // Should be *time.Time (see Gitlab issue #21468) + ObjectKind string `json:"object_kind"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL *string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + VisibilityLevel int `json:"visibility_level"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + CIConfigPath string `json:"ci_config_path"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + } `json:"project"` + URL string `json:"url"` + Action string `json:"action"` + Assets struct { + Count int `json:"count"` + Links []struct { + ID int `json:"id"` + External bool `json:"external"` + LinkType string `json:"link_type"` + Name string `json:"name"` + URL string `json:"url"` + } `json:"links"` + Sources []struct { + Format string `json:"format"` + URL string `json:"url"` + } `json:"sources"` + } `json:"assets"` + Commit struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp string `json:"timestamp"` // Should be *time.Time (see Gitlab issue #21468) + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + } `json:"commit"` +} + +// SnippetCommentEvent represents a comment on a snippet event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-a-code-snippet +type SnippetCommentEvent struct { + ObjectKind string `json:"object_kind"` + EventType string `json:"event_type"` + User *EventUser `json:"user"` + ProjectID int `json:"project_id"` + Project struct { + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Repository *Repository `json:"repository"` + ObjectAttributes struct { + ID int `json:"id"` + Note string `json:"note"` + NoteableType string `json:"noteable_type"` + AuthorID int `json:"author_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ProjectID int `json:"project_id"` + Attachment string `json:"attachment"` + LineCode string `json:"line_code"` + CommitID string `json:"commit_id"` + NoteableID int `json:"noteable_id"` + System bool `json:"system"` + StDiff *Diff `json:"st_diff"` + Description string `json:"description"` + Action CommentEventAction `json:"action"` + URL string `json:"url"` + } `json:"object_attributes"` + Snippet *struct { + ID int `json:"id"` + Title string `json:"title"` + Content string `json:"content"` + AuthorID int `json:"author_id"` + ProjectID int `json:"project_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Filename string `json:"file_name"` + ExpiresAt string `json:"expires_at"` + Type string `json:"type"` + VisibilityLevel int `json:"visibility_level"` + Description string `json:"description"` + Secret bool `json:"secret"` + RepositoryReadOnly bool `json:"repository_read_only"` + } `json:"snippet"` +} + +// SubGroupEvent represents a subgroup event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#subgroup-events +type SubGroupEvent struct { + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + EventName string `json:"event_name"` + Name string `json:"name"` + Path string `json:"path"` + FullPath string `json:"full_path"` + GroupID int `json:"group_id"` + ParentGroupID int `json:"parent_group_id"` + ParentName string `json:"parent_name"` + ParentPath string `json:"parent_path"` + ParentFullPath string `json:"parent_full_path"` +} + +// TagEvent represents a tag event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#tag-events +type TagEvent struct { + ObjectKind string `json:"object_kind"` + EventName string `json:"event_name"` + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + CheckoutSHA string `json:"checkout_sha"` + UserID int `json:"user_id"` + UserName string `json:"user_name"` + UserUsername string `json:"user_username"` + UserAvatar string `json:"user_avatar"` + UserEmail string `json:"user_email"` + ProjectID int `json:"project_id"` + Message string `json:"message"` + Project struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Repository *Repository `json:"repository"` + Commits []*struct { + ID string `json:"id"` + Message string `json:"message"` + Title string `json:"title"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"author"` + Added []string `json:"added"` + Modified []string `json:"modified"` + Removed []string `json:"removed"` + } `json:"commits"` + TotalCommitsCount int `json:"total_commits_count"` +} + +// WikiPageEvent represents a wiki page event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#wiki-page-events +type WikiPageEvent struct { + ObjectKind string `json:"object_kind"` + User *EventUser `json:"user"` + Project struct { + Name string `json:"name"` + Description string `json:"description"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` + WebURL string `json:"web_url"` + Visibility VisibilityValue `json:"visibility"` + } `json:"project"` + Wiki struct { + WebURL string `json:"web_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + } `json:"wiki"` + ObjectAttributes struct { + Title string `json:"title"` + Content string `json:"content"` + Format string `json:"format"` + Message string `json:"message"` + Slug string `json:"slug"` + URL string `json:"url"` + Action string `json:"action"` + DiffURL string `json:"diff_url"` + } `json:"object_attributes"` +} + +// EventLabel represents a label inside a webhook event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#issue-events +type EventLabel struct { + ID int `json:"id"` + Title string `json:"title"` + Color string `json:"color"` + ProjectID int `json:"project_id"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Template bool `json:"template"` + Description string `json:"description"` + Type string `json:"type"` + GroupID int `json:"group_id"` +} diff --git a/vendor/github.com/xanzy/go-gitlab/events.go b/vendor/github.com/xanzy/go-gitlab/events.go new file mode 100644 index 0000000000..504db652f0 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/events.go @@ -0,0 +1,231 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// EventsService handles communication with the event related methods of +// the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/events.html +type EventsService struct { + client *Client +} + +// ContributionEvent represents a user's contribution +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/events.html#get-user-contribution-events +type ContributionEvent struct { + ID int `json:"id"` + Title string `json:"title"` + ProjectID int `json:"project_id"` + ActionName string `json:"action_name"` + TargetID int `json:"target_id"` + TargetIID int `json:"target_iid"` + TargetType string `json:"target_type"` + AuthorID int `json:"author_id"` + TargetTitle string `json:"target_title"` + CreatedAt *time.Time `json:"created_at"` + PushData struct { + CommitCount int `json:"commit_count"` + Action string `json:"action"` + RefType string `json:"ref_type"` + CommitFrom string `json:"commit_from"` + CommitTo string `json:"commit_to"` + Ref string `json:"ref"` + CommitTitle string `json:"commit_title"` + } `json:"push_data"` + Note *Note `json:"note"` + Author struct { + Name string `json:"name"` + Username string `json:"username"` + ID int `json:"id"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"author"` + AuthorUsername string `json:"author_username"` +} + +// ListContributionEventsOptions represents the options for GetUserContributionEvents +// +// GitLap API docs: +// https://docs.gitlab.com/ee/api/events.html#get-user-contribution-events +type ListContributionEventsOptions struct { + ListOptions + Action *EventTypeValue `url:"action,omitempty" json:"action,omitempty"` + TargetType *EventTargetTypeValue `url:"target_type,omitempty" json:"target_type,omitempty"` + Before *ISOTime `url:"before,omitempty" json:"before,omitempty"` + After *ISOTime `url:"after,omitempty" json:"after,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListUserContributionEvents retrieves user contribution events +// for the specified user, sorted from newest to oldest. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/events.html#get-user-contribution-events +func (s *UsersService) ListUserContributionEvents(uid interface{}, opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) { + user, err := parseID(uid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("users/%s/events", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var cs []*ContributionEvent + resp, err := s.client.Do(req, &cs) + if err != nil { + return nil, resp, err + } + + return cs, resp, nil +} + +// ListCurrentUserContributionEvents gets a list currently authenticated user's events +// +// GitLab API docs: https://docs.gitlab.com/ee/api/events.html#list-currently-authenticated-users-events +func (s *EventsService) ListCurrentUserContributionEvents(opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "events", opt, options) + if err != nil { + return nil, nil, err + } + + var cs []*ContributionEvent + resp, err := s.client.Do(req, &cs) + if err != nil { + return nil, resp, err + } + + return cs, resp, nil +} + +// ProjectEvent represents a GitLab project event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/events.html#list-a-projects-visible-events +type ProjectEvent struct { + ID int `json:"id"` + Title string `json:"title"` + ProjectID int `json:"project_id"` + ActionName string `json:"action_name"` + TargetID int `json:"target_id"` + TargetIID int `json:"target_iid"` + TargetType string `json:"target_type"` + AuthorID int `json:"author_id"` + TargetTitle string `json:"target_title"` + CreatedAt string `json:"created_at"` + Author struct { + Name string `json:"name"` + Username string `json:"username"` + ID int `json:"id"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"author"` + AuthorUsername string `json:"author_username"` + Data struct { + Before string `json:"before"` + After string `json:"after"` + Ref string `json:"ref"` + UserID int `json:"user_id"` + UserName string `json:"user_name"` + Repository *Repository `json:"repository"` + Commits []*Commit `json:"commits"` + TotalCommitsCount int `json:"total_commits_count"` + } `json:"data"` + Note struct { + ID int `json:"id"` + Body string `json:"body"` + Attachment string `json:"attachment"` + Author struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"author"` + CreatedAt *time.Time `json:"created_at"` + System bool `json:"system"` + NoteableID int `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + NoteableIID int `json:"noteable_iid"` + } `json:"note"` + PushData struct { + CommitCount int `json:"commit_count"` + Action string `json:"action"` + RefType string `json:"ref_type"` + CommitFrom string `json:"commit_from"` + CommitTo string `json:"commit_to"` + Ref string `json:"ref"` + CommitTitle string `json:"commit_title"` + } `json:"push_data"` +} + +func (s ProjectEvent) String() string { + return Stringify(s) +} + +// ListProjectVisibleEventsOptions represents the available +// ListProjectVisibleEvents() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/events.html#list-a-projects-visible-events +type ListProjectVisibleEventsOptions struct { + ListOptions + Action *EventTypeValue `url:"action,omitempty" json:"action,omitempty"` + TargetType *EventTargetTypeValue `url:"target_type,omitempty" json:"target_type,omitempty"` + Before *ISOTime `url:"before,omitempty" json:"before,omitempty"` + After *ISOTime `url:"after,omitempty" json:"after,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListProjectVisibleEvents gets the events for the specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/events.html#list-a-projects-visible-events +func (s *EventsService) ListProjectVisibleEvents(pid interface{}, opt *ListProjectVisibleEventsOptions, options ...RequestOptionFunc) ([]*ProjectEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/events", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*ProjectEvent + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go new file mode 100644 index 0000000000..c6a3f7b285 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go @@ -0,0 +1,218 @@ +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ExternalStatusChecksService handles communication with the external +// status check related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/status_checks.html +type ExternalStatusChecksService struct { + client *Client +} + +type MergeStatusCheck struct { + ID int `json:"id"` + Name string `json:"name"` + ExternalURL string `json:"external_url"` + Status string `json:"status"` +} + +type ProjectStatusCheck struct { + ID int `json:"id"` + Name string `json:"name"` + ProjectID int `json:"project_id"` + ExternalURL string `json:"external_url"` + ProtectedBranches []StatusCheckProtectedBranch `json:"protected_branches"` +} + +type StatusCheckProtectedBranch struct { + ID int `json:"id"` + ProjectID int `json:"project_id"` + Name string `json:"name"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + CodeOwnerApprovalRequired bool `json:"code_owner_approval_required"` +} + +// ListMergeStatusChecks lists the external status checks that apply to it +// and their status for a single merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#list-status-checks-for-a-merge-request +func (s *ExternalStatusChecksService) ListMergeStatusChecks(pid interface{}, mr int, opt *ListOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks", PathEscape(project), mr) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mscs []*MergeStatusCheck + resp, err := s.client.Do(req, &mscs) + if err != nil { + return nil, resp, err + } + + return mscs, resp, nil +} + +// SetExternalStatusCheckStatusOptions represents the available +// SetExternalStatusCheckStatus() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#set-status-of-an-external-status-check +type SetExternalStatusCheckStatusOptions struct { + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` + ExternalStatusCheckID *int `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` +} + +// SetExternalStatusCheckStatus sets the status of an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#set-status-of-an-external-status-check +func (s *ExternalStatusChecksService) SetExternalStatusCheckStatus(pid interface{}, mergeRequest int, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/status_check_responses", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListProjectStatusChecks lists the project external status checks. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#get-project-external-status-checks +func (s *ExternalStatusChecksService) ListProjectStatusChecks(pid interface{}, opt *ListOptions, options ...RequestOptionFunc) ([]*ProjectStatusCheck, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pscs []*ProjectStatusCheck + resp, err := s.client.Do(req, &pscs) + if err != nil { + return nil, resp, err + } + + return pscs, resp, nil +} + +// CreateExternalStatusCheckOptions represents the available +// CreateExternalStatusCheck() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#create-external-status-check +type CreateExternalStatusCheckOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` +} + +// CreateExternalStatusCheck creates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#create-external-status-check +func (s *ExternalStatusChecksService) CreateExternalStatusCheck(pid interface{}, opt *CreateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteExternalStatusCheck deletes an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#delete-external-status-check +func (s *ExternalStatusChecksService) DeleteExternalStatusCheck(pid interface{}, check int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UpdateExternalStatusCheckOptions represents the available +// UpdateExternalStatusCheck() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#update-external-status-check +type UpdateExternalStatusCheckOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` +} + +// UpdateExternalStatusCheck updates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#update-external-status-check +func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid interface{}, check int, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UpdateExternalStatusCheck updates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#retry-failed-status-check-for-a-merge-request +func (s *ExternalStatusChecksService) RetryFailedStatusCheckForAMergeRequest(pid interface{}, mergeRequest int, externalStatusCheck int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks/%d/retry", PathEscape(project), mergeRequest, externalStatusCheck) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/feature_flags.go b/vendor/github.com/xanzy/go-gitlab/feature_flags.go new file mode 100644 index 0000000000..8bb847e151 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/feature_flags.go @@ -0,0 +1,96 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// FeaturesService handles the communication with the application FeaturesService +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/features.html +type FeaturesService struct { + client *Client +} + +// Feature represents a GitLab feature flag. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/features.html +type Feature struct { + Name string `json:"name"` + State string `json:"state"` + Gates []Gate +} + +// Gate represents a gate of a GitLab feature flag. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/features.html +type Gate struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +func (f Feature) String() string { + return Stringify(f) +} + +// ListFeatures gets a list of feature flags +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/features.html#list-all-features +func (s *FeaturesService) ListFeatures(options ...RequestOptionFunc) ([]*Feature, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "features", nil, options) + if err != nil { + return nil, nil, err + } + + var f []*Feature + resp, err := s.client.Do(req, &f) + if err != nil { + return nil, resp, err + } + return f, resp, nil +} + +// SetFeatureFlag sets or creates a feature flag gate +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/features.html#set-or-create-a-feature +func (s *FeaturesService) SetFeatureFlag(name string, value interface{}, options ...RequestOptionFunc) (*Feature, *Response, error) { + u := fmt.Sprintf("features/%s", url.PathEscape(name)) + + opt := struct { + Value interface{} `url:"value" json:"value"` + }{ + value, + } + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + f := &Feature{} + resp, err := s.client.Do(req, f) + if err != nil { + return nil, resp, err + } + return f, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/freeze_periods.go b/vendor/github.com/xanzy/go-gitlab/freeze_periods.go new file mode 100644 index 0000000000..3cbfba81c4 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/freeze_periods.go @@ -0,0 +1,194 @@ +// +// Copyright 2021 Paul Cioanca +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// FreezePeriodsService handles the communication with the freeze periods +// related methods of the GitLab API. +// +// https://docs.gitlab.com/ee/api/freeze_periods.html +type FreezePeriodsService struct { + client *Client +} + +// FreezePeriod represents a freeze period object. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#list-freeze-periods +type FreezePeriod struct { + ID int `json:"id"` + FreezeStart string `json:"freeze_start"` + FreezeEnd string `json:"freeze_end"` + CronTimezone string `json:"cron_timezone"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// ListFreezePeriodsOptions represents the available ListFreezePeriodsOptions() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#list-freeze-periods +type ListFreezePeriodsOptions ListOptions + +// ListFreezePeriods gets a list of project project freeze periods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#list-freeze-periods +func (s *FreezePeriodsService) ListFreezePeriods(pid interface{}, opt *ListFreezePeriodsOptions, options ...RequestOptionFunc) ([]*FreezePeriod, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/freeze_periods", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var fp []*FreezePeriod + resp, err := s.client.Do(req, &fp) + if err != nil { + return nil, resp, err + } + + return fp, resp, nil +} + +// GetFreezePeriod gets a specific freeze period for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#get-a-freeze-period-by-a-freeze_period_id +func (s *FreezePeriodsService) GetFreezePeriod(pid interface{}, freezePeriod int, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + fp := new(FreezePeriod) + resp, err := s.client.Do(req, fp) + if err != nil { + return nil, resp, err + } + + return fp, resp, nil +} + +// CreateFreezePeriodOptions represents the available CreateFreezePeriodOptions() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#create-a-freeze-period +type CreateFreezePeriodOptions struct { + FreezeStart *string `url:"freeze_start,omitempty" json:"freeze_start,omitempty"` + FreezeEnd *string `url:"freeze_end,omitempty" json:"freeze_end,omitempty"` + CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` +} + +// CreateFreezePeriodOptions adds a freeze period to a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#create-a-freeze-period +func (s *FreezePeriodsService) CreateFreezePeriodOptions(pid interface{}, opt *CreateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/freeze_periods", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + fp := new(FreezePeriod) + resp, err := s.client.Do(req, fp) + if err != nil { + return nil, resp, err + } + + return fp, resp, nil +} + +// UpdateFreezePeriodOptions represents the available UpdateFreezePeriodOptions() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#update-a-freeze-period +type UpdateFreezePeriodOptions struct { + FreezeStart *string `url:"freeze_start,omitempty" json:"freeze_start,omitempty"` + FreezeEnd *string `url:"freeze_end,omitempty" json:"freeze_end,omitempty"` + CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` +} + +// UpdateFreezePeriodOptions edits a freeze period for a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#update-a-freeze-period +func (s *FreezePeriodsService) UpdateFreezePeriodOptions(pid interface{}, freezePeriod int, opt *UpdateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + fp := new(FreezePeriod) + resp, err := s.client.Do(req, fp) + if err != nil { + return nil, resp, err + } + + return fp, resp, nil +} + +// DeleteFreezePeriod removes a freeze period from a project. This is an +// idempotent method and can be called multiple times. Either the hook is +// available or not. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/freeze_periods.html#delete-a-freeze-period +func (s *FreezePeriodsService) DeleteFreezePeriod(pid interface{}, freezePeriod int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/generic_packages.go b/vendor/github.com/xanzy/go-gitlab/generic_packages.go new file mode 100644 index 0000000000..4c32eed44a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/generic_packages.go @@ -0,0 +1,158 @@ +// +// Copyright 2021, Sune Keller +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "io" + "net/http" + "time" +) + +// GenericPackagesService handles communication with the packages related +// methods of the GitLab API. +// +// GitLab docs: +// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html +type GenericPackagesService struct { + client *Client +} + +// GenericPackagesFile represents a GitLab generic package file. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-package-file +type GenericPackagesFile struct { + ID int `json:"id"` + PackageID int `json:"package_id"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Size int `json:"size"` + FileStore int `json:"file_store"` + FileMD5 string `json:"file_md5"` + FileSHA1 string `json:"file_sha1"` + FileName string `json:"file_name"` + File struct { + URL string `json:"url"` + } `json:"file"` + FileSHA256 string `json:"file_sha256"` + VerificationRetryAt *time.Time `json:"verification_retry_at"` + VerifiedAt *time.Time `json:"verified_at"` + VerificationFailure bool `json:"verification_failure"` + VerificationRetryCount int `json:"verification_retry_count"` + VerificationChecksum string `json:"verification_checksum"` + VerificationState int `json:"verification_state"` + VerificationStartedAt *time.Time `json:"verification_started_at"` + NewFilePath string `json:"new_file_path"` +} + +// FormatPackageURL returns the GitLab Package Registry URL for the given artifact metadata, without the BaseURL. +// This does not make a GitLab API request, but rather computes it based on their documentation. +func (s *GenericPackagesService) FormatPackageURL(pid interface{}, packageName, packageVersion, fileName string) (string, error) { + project, err := parseID(pid) + if err != nil { + return "", err + } + u := fmt.Sprintf( + "projects/%s/packages/generic/%s/%s/%s", + PathEscape(project), + PathEscape(packageName), + PathEscape(packageVersion), + PathEscape(fileName), + ) + return u, nil +} + +// PublishPackageFileOptions represents the available PublishPackageFile() +// options. +// +// GitLab docs: +// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-package-file +type PublishPackageFileOptions struct { + Status *GenericPackageStatusValue `url:"status,omitempty" json:"status,omitempty"` + Select *GenericPackageSelectValue `url:"select,omitempty" json:"select,omitempty"` +} + +// PublishPackageFile uploads a file to a project's package registry. +// +// GitLab docs: +// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-package-file +func (s *GenericPackagesService) PublishPackageFile(pid interface{}, packageName, packageVersion, fileName string, content io.Reader, opt *PublishPackageFileOptions, options ...RequestOptionFunc) (*GenericPackagesFile, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/packages/generic/%s/%s/%s", + PathEscape(project), + PathEscape(packageName), + PathEscape(packageVersion), + PathEscape(fileName), + ) + + // We need to create the request as a GET request to make sure the options + // are set correctly. After the request is created we will overwrite both + // the method and the body. + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + // Overwrite the method and body. + req.Method = http.MethodPut + req.SetBody(content) + + f := new(GenericPackagesFile) + resp, err := s.client.Do(req, f) + if err != nil { + return nil, resp, err + } + + return f, resp, nil +} + +// DownloadPackageFile allows you to download the package file. +// +// GitLab docs: +// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#download-package-file +func (s *GenericPackagesService) DownloadPackageFile(pid interface{}, packageName, packageVersion, fileName string, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/packages/generic/%s/%s/%s", + PathEscape(project), + PathEscape(packageName), + PathEscape(packageVersion), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var f bytes.Buffer + resp, err := s.client.Do(req, &f) + if err != nil { + return nil, resp, err + } + + return f.Bytes(), resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/geo_nodes.go b/vendor/github.com/xanzy/go-gitlab/geo_nodes.go new file mode 100644 index 0000000000..47ac583a56 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/geo_nodes.go @@ -0,0 +1,433 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GeoNode represents a GitLab Geo Node. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/geo_nodes.html +type GeoNode struct { + ID int `json:"id"` + Name string `json:"name"` + URL string `json:"url"` + InternalURL string `json:"internal_url"` + Primary bool `json:"primary"` + Enabled bool `json:"enabled"` + Current bool `json:"current"` + FilesMaxCapacity int `json:"files_max_capacity"` + ReposMaxCapacity int `json:"repos_max_capacity"` + VerificationMaxCapacity int `json:"verification_max_capacity"` + SelectiveSyncType string `json:"selective_sync_type"` + SelectiveSyncShards []string `json:"selective_sync_shards"` + SelectiveSyncNamespaceIds []int `json:"selective_sync_namespace_ids"` + MinimumReverificationInterval int `json:"minimum_reverification_interval"` + ContainerRepositoriesMaxCapacity int `json:"container_repositories_max_capacity"` + SyncObjectStorage bool `json:"sync_object_storage"` + CloneProtocol string `json:"clone_protocol"` + WebEditURL string `json:"web_edit_url"` + WebGeoProjectsURL string `json:"web_geo_projects_url"` + Links GeoNodeLinks `json:"_links"` +} + +// GeoNodeLinks represents links for GitLab GeoNode. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/geo_nodes.html +type GeoNodeLinks struct { + Self string `json:"self"` + Status string `json:"status"` + Repair string `json:"repair"` +} + +// GeoNodesService handles communication with Geo Nodes related methods +// of GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/geo_nodes.html +type GeoNodesService struct { + client *Client +} + +// CreateGeoNodesOptions represents the available CreateGeoNode() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#create-a-new-geo-node +type CreateGeoNodesOptions struct { + Primary *bool `url:"primary,omitempty" json:"primary,omitempty"` + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` + FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` + ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` + VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` + ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` + SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` + SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` + SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` + SelectiveSyncNamespaceIds *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` + MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` +} + +// CreateGeoNode creates a new Geo Node. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#create-a-new-geo-node +func (s *GeoNodesService) CreateGeoNode(opt *CreateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "geo_nodes", opt, options) + if err != nil { + return nil, nil, err + } + + g := new(GeoNode) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// ListGeoNodesOptions represents the available ListGeoNodes() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-configuration-about-all-geo-nodes +type ListGeoNodesOptions ListOptions + +// ListGeoNodes gets a list of geo nodes. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-configuration-about-all-geo-nodes +func (s *GeoNodesService) ListGeoNodes(opt *ListGeoNodesOptions, options ...RequestOptionFunc) ([]*GeoNode, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "geo_nodes", opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*GeoNode + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// GetGeoNode gets a specific geo node. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-configuration-about-a-specific-geo-node +func (s *GeoNodesService) GetGeoNode(id int, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + u := fmt.Sprintf("geo_nodes/%d", id) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + g := new(GeoNode) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// UpdateGeoNodesOptions represents the available EditGeoNode() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#edit-a-geo-node +type UpdateGeoNodesOptions struct { + ID *int `url:"primary,omitempty" json:"primary,omitempty"` + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` + FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` + ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` + VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` + ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` + SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` + SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` + SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` + SelectiveSyncNamespaceIds *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` + MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` +} + +// EditGeoNode updates settings of an existing Geo node. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#edit-a-geo-node +func (s *GeoNodesService) EditGeoNode(id int, opt *UpdateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + u := fmt.Sprintf("geo_nodes/%d", id) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + g := new(GeoNode) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// DeleteGeoNode removes the Geo node. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#delete-a-geo-node +func (s *GeoNodesService) DeleteGeoNode(id int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("geo_nodes/%d", id) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// RepairGeoNode to repair the OAuth authentication of a Geo node. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#repair-a-geo-node +func (s *GeoNodesService) RepairGeoNode(id int, options ...RequestOptionFunc) (*GeoNode, *Response, error) { + u := fmt.Sprintf("geo_nodes/%d/repair", id) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + g := new(GeoNode) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// GeoNodeStatus represents the status of Geo Node. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-status-about-all-geo-nodes +type GeoNodeStatus struct { + GeoNodeID int `json:"geo_node_id"` + Healthy bool `json:"healthy"` + Health string `json:"health"` + HealthStatus string `json:"health_status"` + MissingOauthApplication bool `json:"missing_oauth_application"` + AttachmentsCount int `json:"attachments_count"` + AttachmentsSyncedCount int `json:"attachments_synced_count"` + AttachmentsFailedCount int `json:"attachments_failed_count"` + AttachmentsSyncedMissingOnPrimaryCount int `json:"attachments_synced_missing_on_primary_count"` + AttachmentsSyncedInPercentage string `json:"attachments_synced_in_percentage"` + DbReplicationLagSeconds int `json:"db_replication_lag_seconds"` + LfsObjectsCount int `json:"lfs_objects_count"` + LfsObjectsSyncedCount int `json:"lfs_objects_synced_count"` + LfsObjectsFailedCount int `json:"lfs_objects_failed_count"` + LfsObjectsSyncedMissingOnPrimaryCount int `json:"lfs_objects_synced_missing_on_primary_count"` + LfsObjectsSyncedInPercentage string `json:"lfs_objects_synced_in_percentage"` + JobArtifactsCount int `json:"job_artifacts_count"` + JobArtifactsSyncedCount int `json:"job_artifacts_synced_count"` + JobArtifactsFailedCount int `json:"job_artifacts_failed_count"` + JobArtifactsSyncedMissingOnPrimaryCount int `json:"job_artifacts_synced_missing_on_primary_count"` + JobArtifactsSyncedInPercentage string `json:"job_artifacts_synced_in_percentage"` + ContainerRepositoriesCount int `json:"container_repositories_count"` + ContainerRepositoriesSyncedCount int `json:"container_repositories_synced_count"` + ContainerRepositoriesFailedCount int `json:"container_repositories_failed_count"` + ContainerRepositoriesSyncedInPercentage string `json:"container_repositories_synced_in_percentage"` + DesignRepositoriesCount int `json:"design_repositories_count"` + DesignRepositoriesSyncedCount int `json:"design_repositories_synced_count"` + DesignRepositoriesFailedCount int `json:"design_repositories_failed_count"` + DesignRepositoriesSyncedInPercentage string `json:"design_repositories_synced_in_percentage"` + ProjectsCount int `json:"projects_count"` + RepositoriesCount int `json:"repositories_count"` + RepositoriesFailedCount int `json:"repositories_failed_count"` + RepositoriesSyncedCount int `json:"repositories_synced_count"` + RepositoriesSyncedInPercentage string `json:"repositories_synced_in_percentage"` + WikisCount int `json:"wikis_count"` + WikisFailedCount int `json:"wikis_failed_count"` + WikisSyncedCount int `json:"wikis_synced_count"` + WikisSyncedInPercentage string `json:"wikis_synced_in_percentage"` + ReplicationSlotsCount int `json:"replication_slots_count"` + ReplicationSlotsUsedCount int `json:"replication_slots_used_count"` + ReplicationSlotsUsedInPercentage string `json:"replication_slots_used_in_percentage"` + ReplicationSlotsMaxRetainedWalBytes int `json:"replication_slots_max_retained_wal_bytes"` + RepositoriesCheckedCount int `json:"repositories_checked_count"` + RepositoriesCheckedFailedCount int `json:"repositories_checked_failed_count"` + RepositoriesCheckedInPercentage string `json:"repositories_checked_in_percentage"` + RepositoriesChecksummedCount int `json:"repositories_checksummed_count"` + RepositoriesChecksumFailedCount int `json:"repositories_checksum_failed_count"` + RepositoriesChecksummedInPercentage string `json:"repositories_checksummed_in_percentage"` + WikisChecksummedCount int `json:"wikis_checksummed_count"` + WikisChecksumFailedCount int `json:"wikis_checksum_failed_count"` + WikisChecksummedInPercentage string `json:"wikis_checksummed_in_percentage"` + RepositoriesVerifiedCount int `json:"repositories_verified_count"` + RepositoriesVerificationFailedCount int `json:"repositories_verification_failed_count"` + RepositoriesVerifiedInPercentage string `json:"repositories_verified_in_percentage"` + RepositoriesChecksumMismatchCount int `json:"repositories_checksum_mismatch_count"` + WikisVerifiedCount int `json:"wikis_verified_count"` + WikisVerificationFailedCount int `json:"wikis_verification_failed_count"` + WikisVerifiedInPercentage string `json:"wikis_verified_in_percentage"` + WikisChecksumMismatchCount int `json:"wikis_checksum_mismatch_count"` + RepositoriesRetryingVerificationCount int `json:"repositories_retrying_verification_count"` + WikisRetryingVerificationCount int `json:"wikis_retrying_verification_count"` + LastEventID int `json:"last_event_id"` + LastEventTimestamp int `json:"last_event_timestamp"` + CursorLastEventID int `json:"cursor_last_event_id"` + CursorLastEventTimestamp int `json:"cursor_last_event_timestamp"` + LastSuccessfulStatusCheckTimestamp int `json:"last_successful_status_check_timestamp"` + Version string `json:"version"` + Revision string `json:"revision"` + MergeRequestDiffsCount int `json:"merge_request_diffs_count"` + MergeRequestDiffsChecksumTotalCount int `json:"merge_request_diffs_checksum_total_count"` + MergeRequestDiffsChecksummedCount int `json:"merge_request_diffs_checksummed_count"` + MergeRequestDiffsChecksumFailedCount int `json:"merge_request_diffs_checksum_failed_count"` + MergeRequestDiffsSyncedCount int `json:"merge_request_diffs_synced_count"` + MergeRequestDiffsFailedCount int `json:"merge_request_diffs_failed_count"` + MergeRequestDiffsRegistryCount int `json:"merge_request_diffs_registry_count"` + MergeRequestDiffsVerificationTotalCount int `json:"merge_request_diffs_verification_total_count"` + MergeRequestDiffsVerifiedCount int `json:"merge_request_diffs_verified_count"` + MergeRequestDiffsVerificationFailedCount int `json:"merge_request_diffs_verification_failed_count"` + MergeRequestDiffsSyncedInPercentage string `json:"merge_request_diffs_synced_in_percentage"` + MergeRequestDiffsVerifiedInPercentage string `json:"merge_request_diffs_verified_in_percentage"` + PackageFilesCount int `json:"package_files_count"` + PackageFilesChecksumTotalCount int `json:"package_files_checksum_total_count"` + PackageFilesChecksummedCount int `json:"package_files_checksummed_count"` + PackageFilesChecksumFailedCount int `json:"package_files_checksum_failed_count"` + PackageFilesSyncedCount int `json:"package_files_synced_count"` + PackageFilesFailedCount int `json:"package_files_failed_count"` + PackageFilesRegistryCount int `json:"package_files_registry_count"` + PackageFilesVerificationTotalCount int `json:"package_files_verification_total_count"` + PackageFilesVerifiedCount int `json:"package_files_verified_count"` + PackageFilesVerificationFailedCount int `json:"package_files_verification_failed_count"` + PackageFilesSyncedInPercentage string `json:"package_files_synced_in_percentage"` + PackageFilesVerifiedInPercentage string `json:"package_files_verified_in_percentage"` + PagesDeploymentsCount int `json:"pages_deployments_count"` + PagesDeploymentsChecksumTotalCount int `json:"pages_deployments_checksum_total_count"` + PagesDeploymentsChecksummedCount int `json:"pages_deployments_checksummed_count"` + PagesDeploymentsChecksumFailedCount int `json:"pages_deployments_checksum_failed_count"` + PagesDeploymentsSyncedCount int `json:"pages_deployments_synced_count"` + PagesDeploymentsFailedCount int `json:"pages_deployments_failed_count"` + PagesDeploymentsRegistryCount int `json:"pages_deployments_registry_count"` + PagesDeploymentsVerificationTotalCount int `json:"pages_deployments_verification_total_count"` + PagesDeploymentsVerifiedCount int `json:"pages_deployments_verified_count"` + PagesDeploymentsVerificationFailedCount int `json:"pages_deployments_verification_failed_count"` + PagesDeploymentsSyncedInPercentage string `json:"pages_deployments_synced_in_percentage"` + PagesDeploymentsVerifiedInPercentage string `json:"pages_deployments_verified_in_percentage"` + TerraformStateVersionsCount int `json:"terraform_state_versions_count"` + TerraformStateVersionsChecksumTotalCount int `json:"terraform_state_versions_checksum_total_count"` + TerraformStateVersionsChecksummedCount int `json:"terraform_state_versions_checksummed_count"` + TerraformStateVersionsChecksumFailedCount int `json:"terraform_state_versions_checksum_failed_count"` + TerraformStateVersionsSyncedCount int `json:"terraform_state_versions_synced_count"` + TerraformStateVersionsFailedCount int `json:"terraform_state_versions_failed_count"` + TerraformStateVersionsRegistryCount int `json:"terraform_state_versions_registry_count"` + TerraformStateVersionsVerificationTotalCount int `json:"terraform_state_versions_verification_total_count"` + TerraformStateVersionsVerifiedCount int `json:"terraform_state_versions_verified_count"` + TerraformStateVersionsVerificationFailedCount int `json:"terraform_state_versions_verification_failed_count"` + TerraformStateVersionsSyncedInPercentage string `json:"terraform_state_versions_synced_in_percentage"` + TerraformStateVersionsVerifiedInPercentage string `json:"terraform_state_versions_verified_in_percentage"` + SnippetRepositoriesCount int `json:"snippet_repositories_count"` + SnippetRepositoriesChecksumTotalCount int `json:"snippet_repositories_checksum_total_count"` + SnippetRepositoriesChecksummedCount int `json:"snippet_repositories_checksummed_count"` + SnippetRepositoriesChecksumFailedCount int `json:"snippet_repositories_checksum_failed_count"` + SnippetRepositoriesSyncedCount int `json:"snippet_repositories_synced_count"` + SnippetRepositoriesFailedCount int `json:"snippet_repositories_failed_count"` + SnippetRepositoriesRegistryCount int `json:"snippet_repositories_registry_count"` + SnippetRepositoriesVerificationTotalCount int `json:"snippet_repositories_verification_total_count"` + SnippetRepositoriesVerifiedCount int `json:"snippet_repositories_verified_count"` + SnippetRepositoriesVerificationFailedCount int `json:"snippet_repositories_verification_failed_count"` + SnippetRepositoriesSyncedInPercentage string `json:"snippet_repositories_synced_in_percentage"` + SnippetRepositoriesVerifiedInPercentage string `json:"snippet_repositories_verified_in_percentage"` + GroupWikiRepositoriesCount int `json:"group_wiki_repositories_count"` + GroupWikiRepositoriesChecksumTotalCount int `json:"group_wiki_repositories_checksum_total_count"` + GroupWikiRepositoriesChecksummedCount int `json:"group_wiki_repositories_checksummed_count"` + GroupWikiRepositoriesChecksumFailedCount int `json:"group_wiki_repositories_checksum_failed_count"` + GroupWikiRepositoriesSyncedCount int `json:"group_wiki_repositories_synced_count"` + GroupWikiRepositoriesFailedCount int `json:"group_wiki_repositories_failed_count"` + GroupWikiRepositoriesRegistryCount int `json:"group_wiki_repositories_registry_count"` + GroupWikiRepositoriesVerificationTotalCount int `json:"group_wiki_repositories_verification_total_count"` + GroupWikiRepositoriesVerifiedCount int `json:"group_wiki_repositories_verified_count"` + GroupWikiRepositoriesVerificationFailedCount int `json:"group_wiki_repositories_verification_failed_count"` + GroupWikiRepositoriesSyncedInPercentage string `json:"group_wiki_repositories_synced_in_percentage"` + GroupWikiRepositoriesVerifiedInPercentage string `json:"group_wiki_repositories_verified_in_percentage"` + PipelineArtifactsCount int `json:"pipeline_artifacts_count"` + PipelineArtifactsChecksumTotalCount int `json:"pipeline_artifacts_checksum_total_count"` + PipelineArtifactsChecksummedCount int `json:"pipeline_artifacts_checksummed_count"` + PipelineArtifactsChecksumFailedCount int `json:"pipeline_artifacts_checksum_failed_count"` + PipelineArtifactsSyncedCount int `json:"pipeline_artifacts_synced_count"` + PipelineArtifactsFailedCount int `json:"pipeline_artifacts_failed_count"` + PipelineArtifactsRegistryCount int `json:"pipeline_artifacts_registry_count"` + PipelineArtifactsVerificationTotalCount int `json:"pipeline_artifacts_verification_total_count"` + PipelineArtifactsVerifiedCount int `json:"pipeline_artifacts_verified_count"` + PipelineArtifactsVerificationFailedCount int `json:"pipeline_artifacts_verification_failed_count"` + PipelineArtifactsSyncedInPercentage string `json:"pipeline_artifacts_synced_in_percentage"` + PipelineArtifactsVerifiedInPercentage string `json:"pipeline_artifacts_verified_in_percentage"` + UploadsCount int `json:"uploads_count"` + UploadsSyncedCount int `json:"uploads_synced_count"` + UploadsFailedCount int `json:"uploads_failed_count"` + UploadsRegistryCount int `json:"uploads_registry_count"` + UploadsSyncedInPercentage string `json:"uploads_synced_in_percentage"` +} + +// RetrieveStatusOfAllGeoNodes get the list of status of all Geo Nodes. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-status-about-all-geo-nodes +func (s *GeoNodesService) RetrieveStatusOfAllGeoNodes(options ...RequestOptionFunc) ([]*GeoNodeStatus, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "geo_nodes/status", nil, options) + if err != nil { + return nil, nil, err + } + + var gnss []*GeoNodeStatus + resp, err := s.client.Do(req, &gnss) + if err != nil { + return nil, resp, err + } + + return gnss, resp, nil +} + +// RetrieveStatusOfGeoNode get the of status of a specific Geo Nodes. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-status-about-a-specific-geo-node +func (s *GeoNodesService) RetrieveStatusOfGeoNode(id int, options ...RequestOptionFunc) (*GeoNodeStatus, *Response, error) { + u := fmt.Sprintf("geo_nodes/%d/status", id) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gns := new(GeoNodeStatus) + resp, err := s.client.Do(req, gns) + if err != nil { + return nil, resp, err + } + + return gns, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go b/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go new file mode 100644 index 0000000000..83d5872c14 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go @@ -0,0 +1,93 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// GitIgnoreTemplatesService handles communication with the gitignore +// templates related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/gitignores.html +type GitIgnoreTemplatesService struct { + client *Client +} + +// GitIgnoreTemplate represents a GitLab gitignore template. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/gitignores.html +type GitIgnoreTemplate struct { + Name string `json:"name"` + Content string `json:"content"` +} + +// GitIgnoreTemplateListItem represents a GitLab gitignore template from the list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/gitignores.html +type GitIgnoreTemplateListItem struct { + Key string `json:"key"` + Name string `json:"name"` +} + +// ListTemplatesOptions represents the available ListAllTemplates() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitignores.html#get-all-gitignore-templates +type ListTemplatesOptions ListOptions + +// ListTemplates get a list of available git ignore templates +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitignores.html#get-all-gitignore-templates +func (s *GitIgnoreTemplatesService) ListTemplates(opt *ListTemplatesOptions, options ...RequestOptionFunc) ([]*GitIgnoreTemplateListItem, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "templates/gitignores", opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*GitIgnoreTemplateListItem + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// GetTemplate get a git ignore template +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/gitignores.html#get-a-single-gitignore-template +func (s *GitIgnoreTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*GitIgnoreTemplate, *Response, error) { + u := fmt.Sprintf("templates/gitignores/%s", url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + g := new(GitIgnoreTemplate) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go new file mode 100644 index 0000000000..19ed3eadb9 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/gitlab.go @@ -0,0 +1,1049 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package gitlab implements a GitLab API client. +package gitlab + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" + + "github.com/google/go-querystring/query" + retryablehttp "github.com/hashicorp/go-retryablehttp" + "golang.org/x/oauth2" + "golang.org/x/time/rate" +) + +const ( + defaultBaseURL = "https://gitlab.com/" + apiVersionPath = "api/v4/" + userAgent = "go-gitlab" + + headerRateLimit = "RateLimit-Limit" + headerRateReset = "RateLimit-Reset" +) + +// AuthType represents an authentication type within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +type AuthType int + +// List of available authentication types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +const ( + BasicAuth AuthType = iota + JobToken + OAuthToken + PrivateToken +) + +var ErrNotFound = errors.New("404 Not Found") + +// A Client manages communication with the GitLab API. +type Client struct { + // HTTP client used to communicate with the API. + client *retryablehttp.Client + + // Base URL for API requests. Defaults to the public GitLab API, but can be + // set to a domain endpoint to use with a self hosted GitLab server. baseURL + // should always be specified with a trailing slash. + baseURL *url.URL + + // disableRetries is used to disable the default retry logic. + disableRetries bool + + // configureLimiterOnce is used to make sure the limiter is configured exactly + // once and block all other calls until the initial (one) call is done. + configureLimiterOnce sync.Once + + // Limiter is used to limit API calls and prevent 429 responses. + limiter RateLimiter + + // Token type used to make authenticated API calls. + authType AuthType + + // Username and password used for basic authentication. + username, password string + + // Token used to make authenticated API calls. + token string + + // Protects the token field from concurrent read/write accesses. + tokenLock sync.RWMutex + + // Default request options applied to every request. + defaultRequestOptions []RequestOptionFunc + + // User agent used when communicating with the GitLab API. + UserAgent string + + // Services used for talking to different parts of the GitLab API. + AccessRequests *AccessRequestsService + Appearance *AppearanceService + Applications *ApplicationsService + AuditEvents *AuditEventsService + Avatar *AvatarRequestsService + AwardEmoji *AwardEmojiService + Boards *IssueBoardsService + Branches *BranchesService + BroadcastMessage *BroadcastMessagesService + CIYMLTemplate *CIYMLTemplatesService + ClusterAgents *ClusterAgentsService + Commits *CommitsService + ContainerRegistry *ContainerRegistryService + CustomAttribute *CustomAttributesService + DeployKeys *DeployKeysService + DeployTokens *DeployTokensService + DeploymentMergeRequests *DeploymentMergeRequestsService + Deployments *DeploymentsService + Discussions *DiscussionsService + DockerfileTemplate *DockerfileTemplatesService + DORAMetrics *DORAMetricsService + DraftNotes *DraftNotesService + Environments *EnvironmentsService + EpicIssues *EpicIssuesService + Epics *EpicsService + ErrorTracking *ErrorTrackingService + Events *EventsService + ExternalStatusChecks *ExternalStatusChecksService + Features *FeaturesService + FreezePeriods *FreezePeriodsService + GenericPackages *GenericPackagesService + GeoNodes *GeoNodesService + GitIgnoreTemplates *GitIgnoreTemplatesService + GroupAccessTokens *GroupAccessTokensService + GroupBadges *GroupBadgesService + GroupCluster *GroupClustersService + GroupEpicBoards *GroupEpicBoardsService + GroupImportExport *GroupImportExportService + GroupIssueBoards *GroupIssueBoardsService + GroupIterations *GroupIterationsService + GroupLabels *GroupLabelsService + GroupMembers *GroupMembersService + GroupMilestones *GroupMilestonesService + GroupProtectedEnvironments *GroupProtectedEnvironmentsService + GroupRepositoryStorageMove *GroupRepositoryStorageMoveService + GroupSSHCertificates *GroupSSHCertificatesService + GroupVariables *GroupVariablesService + GroupWikis *GroupWikisService + Groups *GroupsService + Import *ImportService + InstanceCluster *InstanceClustersService + InstanceVariables *InstanceVariablesService + Invites *InvitesService + IssueLinks *IssueLinksService + Issues *IssuesService + IssuesStatistics *IssuesStatisticsService + Jobs *JobsService + JobTokenScope *JobTokenScopeService + Keys *KeysService + Labels *LabelsService + License *LicenseService + LicenseTemplates *LicenseTemplatesService + ManagedLicenses *ManagedLicensesService + Markdown *MarkdownService + MemberRolesService *MemberRolesService + MergeRequestApprovals *MergeRequestApprovalsService + MergeRequests *MergeRequestsService + MergeTrains *MergeTrainsService + Metadata *MetadataService + Milestones *MilestonesService + Namespaces *NamespacesService + Notes *NotesService + NotificationSettings *NotificationSettingsService + Packages *PackagesService + Pages *PagesService + PagesDomains *PagesDomainsService + PersonalAccessTokens *PersonalAccessTokensService + PipelineSchedules *PipelineSchedulesService + PipelineTriggers *PipelineTriggersService + Pipelines *PipelinesService + PlanLimits *PlanLimitsService + ProjectAccessTokens *ProjectAccessTokensService + ProjectBadges *ProjectBadgesService + ProjectCluster *ProjectClustersService + ProjectFeatureFlags *ProjectFeatureFlagService + ProjectImportExport *ProjectImportExportService + ProjectIterations *ProjectIterationsService + ProjectMembers *ProjectMembersService + ProjectMirrors *ProjectMirrorService + ProjectRepositoryStorageMove *ProjectRepositoryStorageMoveService + ProjectSnippets *ProjectSnippetsService + ProjectTemplates *ProjectTemplatesService + ProjectVariables *ProjectVariablesService + ProjectVulnerabilities *ProjectVulnerabilitiesService + Projects *ProjectsService + ProtectedBranches *ProtectedBranchesService + ProtectedEnvironments *ProtectedEnvironmentsService + ProtectedTags *ProtectedTagsService + ReleaseLinks *ReleaseLinksService + Releases *ReleasesService + Repositories *RepositoriesService + RepositoryFiles *RepositoryFilesService + RepositorySubmodules *RepositorySubmodulesService + ResourceGroup *ResourceGroupService + ResourceIterationEvents *ResourceIterationEventsService + ResourceLabelEvents *ResourceLabelEventsService + ResourceMilestoneEvents *ResourceMilestoneEventsService + ResourceStateEvents *ResourceStateEventsService + ResourceWeightEvents *ResourceWeightEventsService + Runners *RunnersService + Search *SearchService + Services *ServicesService + Settings *SettingsService + Sidekiq *SidekiqService + SnippetRepositoryStorageMove *SnippetRepositoryStorageMoveService + Snippets *SnippetsService + SystemHooks *SystemHooksService + Tags *TagsService + Todos *TodosService + Topics *TopicsService + Users *UsersService + Validate *ValidateService + Version *VersionService + Wikis *WikisService +} + +// ListOptions specifies the optional parameters to various List methods that +// support pagination. +type ListOptions struct { + // For keyset-based paginated result sets, the value must be `"keyset"` + Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` + // For offset-based and keyset-based paginated result sets, the number of results to include per page. + PerPage int `url:"per_page,omitempty" json:"per_page,omitempty"` + // For offset-based paginated result sets, page of results to retrieve. + Page int `url:"page,omitempty" json:"page,omitempty"` + // For keyset-based paginated result sets, tree record ID at which to fetch the next page. + PageToken string `url:"page_token,omitempty" json:"page_token,omitempty"` + // For keyset-based paginated result sets, name of the column by which to order + OrderBy string `url:"order_by,omitempty" json:"order_by,omitempty"` + // For keyset-based paginated result sets, sort order (`"asc"`` or `"desc"`) + Sort string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// RateLimiter describes the interface that all (custom) rate limiters must implement. +type RateLimiter interface { + Wait(context.Context) error +} + +// NewClient returns a new GitLab API client. To use API methods which require +// authentication, provide a valid private or personal token. +func NewClient(token string, options ...ClientOptionFunc) (*Client, error) { + client, err := newClient(options...) + if err != nil { + return nil, err + } + client.authType = PrivateToken + client.token = token + return client, nil +} + +// NewBasicAuthClient returns a new GitLab API client. To use API methods which +// require authentication, provide a valid username and password. +func NewBasicAuthClient(username, password string, options ...ClientOptionFunc) (*Client, error) { + client, err := newClient(options...) + if err != nil { + return nil, err + } + + client.authType = BasicAuth + client.username = username + client.password = password + + return client, nil +} + +// NewJobClient returns a new GitLab API client. To use API methods which require +// authentication, provide a valid job token. +func NewJobClient(token string, options ...ClientOptionFunc) (*Client, error) { + client, err := newClient(options...) + if err != nil { + return nil, err + } + client.authType = JobToken + client.token = token + return client, nil +} + +// NewOAuthClient returns a new GitLab API client. To use API methods which +// require authentication, provide a valid oauth token. +func NewOAuthClient(token string, options ...ClientOptionFunc) (*Client, error) { + client, err := newClient(options...) + if err != nil { + return nil, err + } + client.authType = OAuthToken + client.token = token + return client, nil +} + +func newClient(options ...ClientOptionFunc) (*Client, error) { + c := &Client{UserAgent: userAgent} + + // Configure the HTTP client. + c.client = &retryablehttp.Client{ + Backoff: c.retryHTTPBackoff, + CheckRetry: c.retryHTTPCheck, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + HTTPClient: cleanhttp.DefaultPooledClient(), + RetryWaitMin: 100 * time.Millisecond, + RetryWaitMax: 400 * time.Millisecond, + RetryMax: 5, + } + + // Set the default base URL. + c.setBaseURL(defaultBaseURL) + + // Apply any given client options. + for _, fn := range options { + if fn == nil { + continue + } + if err := fn(c); err != nil { + return nil, err + } + } + + // If no custom limiter was set using a client option, configure + // the default rate limiter with values that implicitly disable + // rate limiting until an initial HTTP call is done and we can + // use the headers to try and properly configure the limiter. + if c.limiter == nil { + c.limiter = rate.NewLimiter(rate.Inf, 0) + } + + // Create the internal timeStats service. + timeStats := &timeStatsService{client: c} + + // Create all the public services. + c.AccessRequests = &AccessRequestsService{client: c} + c.Appearance = &AppearanceService{client: c} + c.Applications = &ApplicationsService{client: c} + c.AuditEvents = &AuditEventsService{client: c} + c.Avatar = &AvatarRequestsService{client: c} + c.AwardEmoji = &AwardEmojiService{client: c} + c.Boards = &IssueBoardsService{client: c} + c.Branches = &BranchesService{client: c} + c.BroadcastMessage = &BroadcastMessagesService{client: c} + c.CIYMLTemplate = &CIYMLTemplatesService{client: c} + c.ClusterAgents = &ClusterAgentsService{client: c} + c.Commits = &CommitsService{client: c} + c.ContainerRegistry = &ContainerRegistryService{client: c} + c.CustomAttribute = &CustomAttributesService{client: c} + c.DeployKeys = &DeployKeysService{client: c} + c.DeployTokens = &DeployTokensService{client: c} + c.DeploymentMergeRequests = &DeploymentMergeRequestsService{client: c} + c.Deployments = &DeploymentsService{client: c} + c.Discussions = &DiscussionsService{client: c} + c.DockerfileTemplate = &DockerfileTemplatesService{client: c} + c.DORAMetrics = &DORAMetricsService{client: c} + c.DraftNotes = &DraftNotesService{client: c} + c.Environments = &EnvironmentsService{client: c} + c.EpicIssues = &EpicIssuesService{client: c} + c.Epics = &EpicsService{client: c} + c.ErrorTracking = &ErrorTrackingService{client: c} + c.Events = &EventsService{client: c} + c.ExternalStatusChecks = &ExternalStatusChecksService{client: c} + c.Features = &FeaturesService{client: c} + c.FreezePeriods = &FreezePeriodsService{client: c} + c.GenericPackages = &GenericPackagesService{client: c} + c.GeoNodes = &GeoNodesService{client: c} + c.GitIgnoreTemplates = &GitIgnoreTemplatesService{client: c} + c.GroupAccessTokens = &GroupAccessTokensService{client: c} + c.GroupBadges = &GroupBadgesService{client: c} + c.GroupCluster = &GroupClustersService{client: c} + c.GroupEpicBoards = &GroupEpicBoardsService{client: c} + c.GroupImportExport = &GroupImportExportService{client: c} + c.GroupIssueBoards = &GroupIssueBoardsService{client: c} + c.GroupIterations = &GroupIterationsService{client: c} + c.GroupLabels = &GroupLabelsService{client: c} + c.GroupMembers = &GroupMembersService{client: c} + c.GroupMilestones = &GroupMilestonesService{client: c} + c.GroupProtectedEnvironments = &GroupProtectedEnvironmentsService{client: c} + c.GroupRepositoryStorageMove = &GroupRepositoryStorageMoveService{client: c} + c.GroupSSHCertificates = &GroupSSHCertificatesService{client: c} + c.GroupVariables = &GroupVariablesService{client: c} + c.GroupWikis = &GroupWikisService{client: c} + c.Groups = &GroupsService{client: c} + c.Import = &ImportService{client: c} + c.InstanceCluster = &InstanceClustersService{client: c} + c.InstanceVariables = &InstanceVariablesService{client: c} + c.Invites = &InvitesService{client: c} + c.IssueLinks = &IssueLinksService{client: c} + c.Issues = &IssuesService{client: c, timeStats: timeStats} + c.IssuesStatistics = &IssuesStatisticsService{client: c} + c.Jobs = &JobsService{client: c} + c.JobTokenScope = &JobTokenScopeService{client: c} + c.Keys = &KeysService{client: c} + c.Labels = &LabelsService{client: c} + c.License = &LicenseService{client: c} + c.LicenseTemplates = &LicenseTemplatesService{client: c} + c.ManagedLicenses = &ManagedLicensesService{client: c} + c.Markdown = &MarkdownService{client: c} + c.MemberRolesService = &MemberRolesService{client: c} + c.MergeRequestApprovals = &MergeRequestApprovalsService{client: c} + c.MergeRequests = &MergeRequestsService{client: c, timeStats: timeStats} + c.MergeTrains = &MergeTrainsService{client: c} + c.Metadata = &MetadataService{client: c} + c.Milestones = &MilestonesService{client: c} + c.Namespaces = &NamespacesService{client: c} + c.Notes = &NotesService{client: c} + c.NotificationSettings = &NotificationSettingsService{client: c} + c.Packages = &PackagesService{client: c} + c.Pages = &PagesService{client: c} + c.PagesDomains = &PagesDomainsService{client: c} + c.PersonalAccessTokens = &PersonalAccessTokensService{client: c} + c.PipelineSchedules = &PipelineSchedulesService{client: c} + c.PipelineTriggers = &PipelineTriggersService{client: c} + c.Pipelines = &PipelinesService{client: c} + c.PlanLimits = &PlanLimitsService{client: c} + c.ProjectAccessTokens = &ProjectAccessTokensService{client: c} + c.ProjectBadges = &ProjectBadgesService{client: c} + c.ProjectCluster = &ProjectClustersService{client: c} + c.ProjectFeatureFlags = &ProjectFeatureFlagService{client: c} + c.ProjectImportExport = &ProjectImportExportService{client: c} + c.ProjectIterations = &ProjectIterationsService{client: c} + c.ProjectMembers = &ProjectMembersService{client: c} + c.ProjectMirrors = &ProjectMirrorService{client: c} + c.ProjectRepositoryStorageMove = &ProjectRepositoryStorageMoveService{client: c} + c.ProjectSnippets = &ProjectSnippetsService{client: c} + c.ProjectTemplates = &ProjectTemplatesService{client: c} + c.ProjectVariables = &ProjectVariablesService{client: c} + c.ProjectVulnerabilities = &ProjectVulnerabilitiesService{client: c} + c.Projects = &ProjectsService{client: c} + c.ProtectedBranches = &ProtectedBranchesService{client: c} + c.ProtectedEnvironments = &ProtectedEnvironmentsService{client: c} + c.ProtectedTags = &ProtectedTagsService{client: c} + c.ReleaseLinks = &ReleaseLinksService{client: c} + c.Releases = &ReleasesService{client: c} + c.Repositories = &RepositoriesService{client: c} + c.RepositoryFiles = &RepositoryFilesService{client: c} + c.RepositorySubmodules = &RepositorySubmodulesService{client: c} + c.ResourceGroup = &ResourceGroupService{client: c} + c.ResourceIterationEvents = &ResourceIterationEventsService{client: c} + c.ResourceLabelEvents = &ResourceLabelEventsService{client: c} + c.ResourceMilestoneEvents = &ResourceMilestoneEventsService{client: c} + c.ResourceStateEvents = &ResourceStateEventsService{client: c} + c.ResourceWeightEvents = &ResourceWeightEventsService{client: c} + c.Runners = &RunnersService{client: c} + c.Search = &SearchService{client: c} + c.Services = &ServicesService{client: c} + c.Settings = &SettingsService{client: c} + c.Sidekiq = &SidekiqService{client: c} + c.Snippets = &SnippetsService{client: c} + c.SnippetRepositoryStorageMove = &SnippetRepositoryStorageMoveService{client: c} + c.SystemHooks = &SystemHooksService{client: c} + c.Tags = &TagsService{client: c} + c.Todos = &TodosService{client: c} + c.Topics = &TopicsService{client: c} + c.Users = &UsersService{client: c} + c.Validate = &ValidateService{client: c} + c.Version = &VersionService{client: c} + c.Wikis = &WikisService{client: c} + + return c, nil +} + +// retryHTTPCheck provides a callback for Client.CheckRetry which +// will retry both rate limit (429) and server (>= 500) errors. +func (c *Client) retryHTTPCheck(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + if err != nil { + return false, err + } + if !c.disableRetries && (resp.StatusCode == 429 || resp.StatusCode >= 500) { + return true, nil + } + return false, nil +} + +// retryHTTPBackoff provides a generic callback for Client.Backoff which +// will pass through all calls based on the status code of the response. +func (c *Client) retryHTTPBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // Use the rate limit backoff function when we are rate limited. + if resp != nil && resp.StatusCode == 429 { + return rateLimitBackoff(min, max, attemptNum, resp) + } + + // Set custom duration's when we experience a service interruption. + min = 700 * time.Millisecond + max = 900 * time.Millisecond + + return retryablehttp.LinearJitterBackoff(min, max, attemptNum, resp) +} + +// rateLimitBackoff provides a callback for Client.Backoff which will use the +// RateLimit-Reset header to determine the time to wait. We add some jitter +// to prevent a thundering herd. +// +// min and max are mainly used for bounding the jitter that will be added to +// the reset time retrieved from the headers. But if the final wait time is +// less then min, min will be used instead. +func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // rnd is used to generate pseudo-random numbers. + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + // First create some jitter bounded by the min and max durations. + jitter := time.Duration(rnd.Float64() * float64(max-min)) + + if resp != nil { + if v := resp.Header.Get(headerRateReset); v != "" { + if reset, _ := strconv.ParseInt(v, 10, 64); reset > 0 { + // Only update min if the given time to wait is longer. + if wait := time.Until(time.Unix(reset, 0)); wait > min { + min = wait + } + } + } else { + // In case the RateLimit-Reset header is not set, back off an additional + // 100% exponentially. With the default milliseconds being set to 100 for + // `min`, this makes the 5th retry wait 3.2 seconds (3,200 ms) by default. + min = time.Duration(float64(min) * math.Pow(2, float64(attemptNum))) + } + } + + return min + jitter +} + +// configureLimiter configures the rate limiter. +func (c *Client) configureLimiter(ctx context.Context, headers http.Header) { + if v := headers.Get(headerRateLimit); v != "" { + if rateLimit, _ := strconv.ParseFloat(v, 64); rateLimit > 0 { + // The rate limit is based on requests per minute, so for our limiter to + // work correctly we divide the limit by 60 to get the limit per second. + rateLimit /= 60 + + // Configure the limit and burst using a split of 2/3 for the limit and + // 1/3 for the burst. This enables clients to burst 1/3 of the allowed + // calls before the limiter kicks in. The remaining calls will then be + // spread out evenly using intervals of time.Second / limit which should + // prevent hitting the rate limit. + limit := rate.Limit(rateLimit * 0.66) + burst := int(rateLimit * 0.33) + + // Need at least one allowed to burst or x/time will throw an error + if burst == 0 { + burst = 1 + } + + // Create a new limiter using the calculated values. + c.limiter = rate.NewLimiter(limit, burst) + + // Call the limiter once as we have already made a request + // to get the headers and the limiter is not aware of this. + c.limiter.Wait(ctx) + } + } +} + +// BaseURL return a copy of the baseURL. +func (c *Client) BaseURL() *url.URL { + u := *c.baseURL + return &u +} + +// setBaseURL sets the base URL for API requests to a custom endpoint. +func (c *Client) setBaseURL(urlStr string) error { + // Make sure the given URL end with a slash + if !strings.HasSuffix(urlStr, "/") { + urlStr += "/" + } + + baseURL, err := url.Parse(urlStr) + if err != nil { + return err + } + + if !strings.HasSuffix(baseURL.Path, apiVersionPath) { + baseURL.Path += apiVersionPath + } + + // Update the base URL of the client. + c.baseURL = baseURL + + return nil +} + +// NewRequest creates a new API request. The method expects a relative URL +// path that will be resolved relative to the base URL of the Client. +// Relative URL paths should always be specified without a preceding slash. +// If specified, the value pointed to by body is JSON encoded and included +// as the request body. +func (c *Client) NewRequest(method, path string, opt interface{}, options []RequestOptionFunc) (*retryablehttp.Request, error) { + u := *c.baseURL + unescaped, err := url.PathUnescape(path) + if err != nil { + return nil, err + } + + // Set the encoded path data + u.RawPath = c.baseURL.Path + path + u.Path = c.baseURL.Path + unescaped + + // Create a request specific headers map. + reqHeaders := make(http.Header) + reqHeaders.Set("Accept", "application/json") + + if c.UserAgent != "" { + reqHeaders.Set("User-Agent", c.UserAgent) + } + + var body interface{} + switch { + case method == http.MethodPatch || method == http.MethodPost || method == http.MethodPut: + reqHeaders.Set("Content-Type", "application/json") + + if opt != nil { + body, err = json.Marshal(opt) + if err != nil { + return nil, err + } + } + case opt != nil: + q, err := query.Values(opt) + if err != nil { + return nil, err + } + u.RawQuery = q.Encode() + } + + req, err := retryablehttp.NewRequest(method, u.String(), body) + if err != nil { + return nil, err + } + + for _, fn := range append(c.defaultRequestOptions, options...) { + if fn == nil { + continue + } + if err := fn(req); err != nil { + return nil, err + } + } + + // Set the request specific headers. + for k, v := range reqHeaders { + req.Header[k] = v + } + + return req, nil +} + +// UploadRequest creates an API request for uploading a file. The method +// expects a relative URL path that will be resolved relative to the base +// URL of the Client. Relative URL paths should always be specified without +// a preceding slash. If specified, the value pointed to by body is JSON +// encoded and included as the request body. +func (c *Client) UploadRequest(method, path string, content io.Reader, filename string, uploadType UploadType, opt interface{}, options []RequestOptionFunc) (*retryablehttp.Request, error) { + u := *c.baseURL + unescaped, err := url.PathUnescape(path) + if err != nil { + return nil, err + } + + // Set the encoded path data + u.RawPath = c.baseURL.Path + path + u.Path = c.baseURL.Path + unescaped + + // Create a request specific headers map. + reqHeaders := make(http.Header) + reqHeaders.Set("Accept", "application/json") + + if c.UserAgent != "" { + reqHeaders.Set("User-Agent", c.UserAgent) + } + + b := new(bytes.Buffer) + w := multipart.NewWriter(b) + + fw, err := w.CreateFormFile(string(uploadType), filename) + if err != nil { + return nil, err + } + + if _, err := io.Copy(fw, content); err != nil { + return nil, err + } + + if opt != nil { + fields, err := query.Values(opt) + if err != nil { + return nil, err + } + for name := range fields { + if err = w.WriteField(name, fmt.Sprintf("%v", fields.Get(name))); err != nil { + return nil, err + } + } + } + + if err = w.Close(); err != nil { + return nil, err + } + + reqHeaders.Set("Content-Type", w.FormDataContentType()) + + req, err := retryablehttp.NewRequest(method, u.String(), b) + if err != nil { + return nil, err + } + + for _, fn := range append(c.defaultRequestOptions, options...) { + if fn == nil { + continue + } + if err := fn(req); err != nil { + return nil, err + } + } + + // Set the request specific headers. + for k, v := range reqHeaders { + req.Header[k] = v + } + + return req, nil +} + +// Response is a GitLab API response. This wraps the standard http.Response +// returned from GitLab and provides convenient access to things like +// pagination links. +type Response struct { + *http.Response + + // Fields used for offset-based pagination. + TotalItems int + TotalPages int + ItemsPerPage int + CurrentPage int + NextPage int + PreviousPage int + + // Fields used for keyset-based pagination. + PreviousLink string + NextLink string + FirstLink string + LastLink string +} + +// newResponse creates a new Response for the provided http.Response. +func newResponse(r *http.Response) *Response { + response := &Response{Response: r} + response.populatePageValues() + response.populateLinkValues() + return response +} + +const ( + // Headers used for offset-based pagination. + xTotal = "X-Total" + xTotalPages = "X-Total-Pages" + xPerPage = "X-Per-Page" + xPage = "X-Page" + xNextPage = "X-Next-Page" + xPrevPage = "X-Prev-Page" + + // Headers used for keyset-based pagination. + linkPrev = "prev" + linkNext = "next" + linkFirst = "first" + linkLast = "last" +) + +// populatePageValues parses the HTTP Link response headers and populates the +// various pagination link values in the Response. +func (r *Response) populatePageValues() { + if totalItems := r.Header.Get(xTotal); totalItems != "" { + r.TotalItems, _ = strconv.Atoi(totalItems) + } + if totalPages := r.Header.Get(xTotalPages); totalPages != "" { + r.TotalPages, _ = strconv.Atoi(totalPages) + } + if itemsPerPage := r.Header.Get(xPerPage); itemsPerPage != "" { + r.ItemsPerPage, _ = strconv.Atoi(itemsPerPage) + } + if currentPage := r.Header.Get(xPage); currentPage != "" { + r.CurrentPage, _ = strconv.Atoi(currentPage) + } + if nextPage := r.Header.Get(xNextPage); nextPage != "" { + r.NextPage, _ = strconv.Atoi(nextPage) + } + if previousPage := r.Header.Get(xPrevPage); previousPage != "" { + r.PreviousPage, _ = strconv.Atoi(previousPage) + } +} + +func (r *Response) populateLinkValues() { + if link := r.Header.Get("Link"); link != "" { + for _, link := range strings.Split(link, ",") { + parts := strings.Split(link, ";") + if len(parts) < 2 { + continue + } + + linkType := strings.Trim(strings.Split(parts[1], "=")[1], "\"") + linkValue := strings.Trim(parts[0], "< >") + + switch linkType { + case linkPrev: + r.PreviousLink = linkValue + case linkNext: + r.NextLink = linkValue + case linkFirst: + r.FirstLink = linkValue + case linkLast: + r.LastLink = linkValue + } + } + } +} + +// Do sends an API request and returns the API response. The API response is +// JSON decoded and stored in the value pointed to by v, or returned as an +// error if an API error has occurred. If v implements the io.Writer +// interface, the raw response body will be written to v, without attempting to +// first decode it. +func (c *Client) Do(req *retryablehttp.Request, v interface{}) (*Response, error) { + // Wait will block until the limiter can obtain a new token. + err := c.limiter.Wait(req.Context()) + if err != nil { + return nil, err + } + + // Set the correct authentication header. If using basic auth, then check + // if we already have a token and if not first authenticate and get one. + var basicAuthToken string + switch c.authType { + case BasicAuth: + c.tokenLock.RLock() + basicAuthToken = c.token + c.tokenLock.RUnlock() + if basicAuthToken == "" { + // If we don't have a token yet, we first need to request one. + basicAuthToken, err = c.requestOAuthToken(req.Context(), basicAuthToken) + if err != nil { + return nil, err + } + } + req.Header.Set("Authorization", "Bearer "+basicAuthToken) + case JobToken: + if values := req.Header.Values("JOB-TOKEN"); len(values) == 0 { + req.Header.Set("JOB-TOKEN", c.token) + } + case OAuthToken: + if values := req.Header.Values("Authorization"); len(values) == 0 { + req.Header.Set("Authorization", "Bearer "+c.token) + } + case PrivateToken: + if values := req.Header.Values("PRIVATE-TOKEN"); len(values) == 0 { + req.Header.Set("PRIVATE-TOKEN", c.token) + } + } + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized && c.authType == BasicAuth { + resp.Body.Close() + // The token most likely expired, so we need to request a new one and try again. + if _, err := c.requestOAuthToken(req.Context(), basicAuthToken); err != nil { + return nil, err + } + return c.Do(req, v) + } + defer resp.Body.Close() + defer io.Copy(io.Discard, resp.Body) + + // If not yet configured, try to configure the rate limiter + // using the response headers we just received. Fail silently + // so the limiter will remain disabled in case of an error. + c.configureLimiterOnce.Do(func() { c.configureLimiter(req.Context(), resp.Header) }) + + response := newResponse(resp) + + err = CheckResponse(resp) + if err != nil { + // Even though there was an error, we still return the response + // in case the caller wants to inspect it further. + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + _, err = io.Copy(w, resp.Body) + } else { + err = json.NewDecoder(resp.Body).Decode(v) + } + } + + return response, err +} + +func (c *Client) requestOAuthToken(ctx context.Context, token string) (string, error) { + c.tokenLock.Lock() + defer c.tokenLock.Unlock() + + // Return early if the token was updated while waiting for the lock. + if c.token != token { + return c.token, nil + } + + config := &oauth2.Config{ + Endpoint: oauth2.Endpoint{ + AuthURL: strings.TrimSuffix(c.baseURL.String(), apiVersionPath) + "oauth/authorize", + TokenURL: strings.TrimSuffix(c.baseURL.String(), apiVersionPath) + "oauth/token", + }, + } + + ctx = context.WithValue(ctx, oauth2.HTTPClient, c.client.HTTPClient) + t, err := config.PasswordCredentialsToken(ctx, c.username, c.password) + if err != nil { + return "", err + } + c.token = t.AccessToken + + return c.token, nil +} + +// Helper function to accept and format both the project ID or name as project +// identifier for all API calls. +func parseID(id interface{}) (string, error) { + switch v := id.(type) { + case int: + return strconv.Itoa(v), nil + case string: + return v, nil + default: + return "", fmt.Errorf("invalid ID type %#v, the ID must be an int or a string", id) + } +} + +// Helper function to escape a project identifier. +func PathEscape(s string) string { + return strings.ReplaceAll(url.PathEscape(s), ".", "%2E") +} + +// An ErrorResponse reports one or more errors caused by an API request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/index.html#data-validation-and-error-reporting +type ErrorResponse struct { + Body []byte + Response *http.Response + Message string +} + +func (e *ErrorResponse) Error() string { + path, _ := url.QueryUnescape(e.Response.Request.URL.Path) + url := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path) + + if e.Message == "" { + return fmt.Sprintf("%s %s: %d", e.Response.Request.Method, url, e.Response.StatusCode) + } else { + return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, url, e.Response.StatusCode, e.Message) + } +} + +// CheckResponse checks the API response for errors, and returns them if present. +func CheckResponse(r *http.Response) error { + switch r.StatusCode { + case 200, 201, 202, 204, 304: + return nil + case 404: + return ErrNotFound + } + + errorResponse := &ErrorResponse{Response: r} + + data, err := io.ReadAll(r.Body) + if err == nil && strings.TrimSpace(string(data)) != "" { + errorResponse.Body = data + + var raw interface{} + if err := json.Unmarshal(data, &raw); err != nil { + errorResponse.Message = fmt.Sprintf("failed to parse unknown error format: %s", data) + } else { + errorResponse.Message = parseError(raw) + } + } + + return errorResponse +} + +// Format: +// +// { +// "message": { +// "": [ +// "", +// "", +// ... +// ], +// "": { +// "": [ +// "", +// "", +// ... +// ], +// } +// }, +// "error": "" +// } +func parseError(raw interface{}) string { + switch raw := raw.(type) { + case string: + return raw + + case []interface{}: + var errs []string + for _, v := range raw { + errs = append(errs, parseError(v)) + } + return fmt.Sprintf("[%s]", strings.Join(errs, ", ")) + + case map[string]interface{}: + var errs []string + for k, v := range raw { + errs = append(errs, fmt.Sprintf("{%s: %s}", k, parseError(v))) + } + sort.Strings(errs) + return strings.Join(errs, ", ") + + default: + return fmt.Sprintf("failed to parse unexpected error type: %T", raw) + } +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go new file mode 100644 index 0000000000..ccbe47b838 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go @@ -0,0 +1,199 @@ +// +// Copyright 2022, Masahiro Yoshida +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupAccessTokensService handles communication with the +// groups access tokens related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_access_tokens.html +type GroupAccessTokensService struct { + client *Client +} + +// GroupAccessToken represents a GitLab group access token. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_access_tokens.html +type GroupAccessToken struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + Scopes []string `json:"scopes"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + LastUsedAt *time.Time `json:"last_used_at"` + Active bool `json:"active"` + Revoked bool `json:"revoked"` + Token string `json:"token"` + AccessLevel AccessLevelValue `json:"access_level"` +} + +func (v GroupAccessToken) String() string { + return Stringify(v) +} + +// ListGroupAccessTokensOptions represents the available options for +// listing variables in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#list-group-access-tokens +type ListGroupAccessTokensOptions ListOptions + +// ListGroupAccessTokens gets a list of all group access tokens in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#list-group-access-tokens +func (s *GroupAccessTokensService) ListGroupAccessTokens(gid interface{}, opt *ListGroupAccessTokensOptions, options ...RequestOptionFunc) ([]*GroupAccessToken, *Response, error) { + groups, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_tokens", PathEscape(groups)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gats []*GroupAccessToken + resp, err := s.client.Do(req, &gats) + if err != nil { + return nil, resp, err + } + + return gats, resp, nil +} + +// GetGroupAccessToken gets a single group access tokens in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#get-a-group-access-token +func (s *GroupAccessTokensService) GetGroupAccessToken(gid interface{}, id int, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { + groups, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_tokens/%d", PathEscape(groups), id) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gat := new(GroupAccessToken) + resp, err := s.client.Do(req, &gat) + if err != nil { + return nil, resp, err + } + + return gat, resp, nil +} + +// CreateGroupAccessTokenOptions represents the available CreateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#create-a-group-access-token +type CreateGroupAccessTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// CreateGroupAccessToken creates a new group access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#create-a-group-access-token +func (s *GroupAccessTokensService) CreateGroupAccessToken(gid interface{}, opt *CreateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { + groups, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_tokens", PathEscape(groups)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(GroupAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RotateGroupAccessTokenOptions represents the available RotateGroupAccessToken() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#rotate-a-group-access-token +type RotateGroupAccessTokenOptions struct { + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// RotateGroupAccessToken revokes a group access token and returns a new group +// access token that expires in one week per default. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#rotate-a-group-access-token +func (s *GroupAccessTokensService) RotateGroupAccessToken(gid interface{}, id int, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { + groups, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/access_tokens/%d/rotate", PathEscape(groups), id) + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gat := new(GroupAccessToken) + resp, err := s.client.Do(req, gat) + if err != nil { + return nil, resp, err + } + + return gat, resp, nil +} + +// RevokeGroupAccessToken revokes a group access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_access_tokens.html#revoke-a-group-access-token +func (s *GroupAccessTokensService) RevokeGroupAccessToken(gid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { + groups, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/access_tokens/%d", PathEscape(groups), id) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_badges.go b/vendor/github.com/xanzy/go-gitlab/group_badges.go new file mode 100644 index 0000000000..c648a74432 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_badges.go @@ -0,0 +1,237 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupBadgesService handles communication with the group badges +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html +type GroupBadgesService struct { + client *Client +} + +// BadgeKind represents a GitLab Badge Kind +type BadgeKind string + +// all possible values Badge Kind +const ( + ProjectBadgeKind BadgeKind = "project" + GroupBadgeKind BadgeKind = "group" +) + +// GroupBadge represents a group badge. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html +type GroupBadge struct { + ID int `json:"id"` + Name string `json:"name"` + LinkURL string `json:"link_url"` + ImageURL string `json:"image_url"` + RenderedLinkURL string `json:"rendered_link_url"` + RenderedImageURL string `json:"rendered_image_url"` + Kind BadgeKind `json:"kind"` +} + +// ListGroupBadgesOptions represents the available ListGroupBadges() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#list-all-badges-of-a-group +type ListGroupBadgesOptions struct { + ListOptions + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// ListGroupBadges gets a list of a group badges. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#list-all-badges-of-a-group +func (s *GroupBadgesService) ListGroupBadges(gid interface{}, opt *ListGroupBadgesOptions, options ...RequestOptionFunc) ([]*GroupBadge, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/badges", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gb []*GroupBadge + resp, err := s.client.Do(req, &gb) + if err != nil { + return nil, resp, err + } + + return gb, resp, nil +} + +// GetGroupBadge gets a group badge. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#get-a-badge-of-a-group +func (s *GroupBadgesService) GetGroupBadge(gid interface{}, badge int, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gb := new(GroupBadge) + resp, err := s.client.Do(req, gb) + if err != nil { + return nil, resp, err + } + + return gb, resp, nil +} + +// AddGroupBadgeOptions represents the available AddGroupBadge() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#add-a-badge-to-a-group +type AddGroupBadgeOptions struct { + LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` + ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// AddGroupBadge adds a badge to a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#add-a-badge-to-a-group +func (s *GroupBadgesService) AddGroupBadge(gid interface{}, opt *AddGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/badges", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gb := new(GroupBadge) + resp, err := s.client.Do(req, gb) + if err != nil { + return nil, resp, err + } + + return gb, resp, nil +} + +// EditGroupBadgeOptions represents the available EditGroupBadge() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#edit-a-badge-of-a-group +type EditGroupBadgeOptions struct { + LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` + ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// EditGroupBadge updates a badge of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#edit-a-badge-of-a-group +func (s *GroupBadgesService) EditGroupBadge(gid interface{}, badge int, opt *EditGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gb := new(GroupBadge) + resp, err := s.client.Do(req, gb) + if err != nil { + return nil, resp, err + } + + return gb, resp, nil +} + +// DeleteGroupBadge removes a badge from a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#remove-a-badge-from-a-group +func (s *GroupBadgesService) DeleteGroupBadge(gid interface{}, badge int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GroupBadgePreviewOptions represents the available PreviewGroupBadge() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#preview-a-badge-from-a-group +type GroupBadgePreviewOptions struct { + LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` + ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// PreviewGroupBadge returns how the link_url and image_url final URLs would be after +// resolving the placeholder interpolation. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_badges.html#preview-a-badge-from-a-group +func (s *GroupBadgesService) PreviewGroupBadge(gid interface{}, opt *GroupBadgePreviewOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/badges/render", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + gb := new(GroupBadge) + resp, err := s.client.Do(req, &gb) + if err != nil { + return nil, resp, err + } + + return gb, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_boards.go b/vendor/github.com/xanzy/go-gitlab/group_boards.go new file mode 100644 index 0000000000..ed9f8d5169 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_boards.go @@ -0,0 +1,353 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupIssueBoardsService handles communication with the group issue board +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html +type GroupIssueBoardsService struct { + client *Client +} + +// GroupIssueBoard represents a GitLab group issue board. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html +type GroupIssueBoard struct { + ID int `json:"id"` + Name string `json:"name"` + Group *Group `json:"group"` + Milestone *Milestone `json:"milestone"` + Labels []*GroupLabel `json:"labels"` + Lists []*BoardList `json:"lists"` +} + +func (b GroupIssueBoard) String() string { + return Stringify(b) +} + +// ListGroupIssueBoardsOptions represents the available +// ListGroupIssueBoards() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#list-all-group-issue-boards-in-a-group +type ListGroupIssueBoardsOptions ListOptions + +// ListGroupIssueBoards gets a list of all issue boards in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#list-all-group-issue-boards-in-a-group +func (s *GroupIssueBoardsService) ListGroupIssueBoards(gid interface{}, opt *ListGroupIssueBoardsOptions, options ...RequestOptionFunc) ([]*GroupIssueBoard, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*GroupIssueBoard + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// CreateGroupIssueBoardOptions represents the available +// CreateGroupIssueBoard() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#create-a-group-issue-board +type CreateGroupIssueBoardOptions struct { + Name *string `url:"name" json:"name"` +} + +// CreateGroupIssueBoard creates a new issue board. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#create-a-group-issue-board +func (s *GroupIssueBoardsService) CreateGroupIssueBoard(gid interface{}, opt *CreateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gib := new(GroupIssueBoard) + resp, err := s.client.Do(req, gib) + if err != nil { + return nil, resp, err + } + + return gib, resp, nil +} + +// GetGroupIssueBoard gets a single issue board of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#single-group-issue-board +func (s *GroupIssueBoardsService) GetGroupIssueBoard(gid interface{}, board int, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gib := new(GroupIssueBoard) + resp, err := s.client.Do(req, gib) + if err != nil { + return nil, resp, err + } + + return gib, resp, nil +} + +// UpdateGroupIssueBoardOptions represents a group issue board. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#update-a-group-issue-board +type UpdateGroupIssueBoardOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` + Weight *int `url:"weight,omitempty" json:"weight,omitempty"` +} + +// UpdateIssueBoard updates a single issue board of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#update-a-group-issue-board +func (s *GroupIssueBoardsService) UpdateIssueBoard(gid interface{}, board int, opt *UpdateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gib := new(GroupIssueBoard) + resp, err := s.client.Do(req, gib) + if err != nil { + return nil, resp, err + } + + return gib, resp, nil +} + +// DeleteIssueBoard delete a single issue board of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#delete-a-group-issue-board +func (s *GroupIssueBoardsService) DeleteIssueBoard(gid interface{}, board int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListGroupIssueBoardListsOptions represents the available +// ListGroupIssueBoardLists() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#list-group-issue-board-lists +type ListGroupIssueBoardListsOptions ListOptions + +// ListGroupIssueBoardLists gets a list of the issue board's lists. Does not include +// backlog and closed lists. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_boards.html#list-group-issue-board-lists +func (s *GroupIssueBoardsService) ListGroupIssueBoardLists(gid interface{}, board int, opt *ListGroupIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d/lists", PathEscape(group), board) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gbl []*BoardList + resp, err := s.client.Do(req, &gbl) + if err != nil { + return nil, resp, err + } + + return gbl, resp, nil +} + +// GetGroupIssueBoardList gets a single issue board list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#single-group-issue-board-list +func (s *GroupIssueBoardsService) GetGroupIssueBoardList(gid interface{}, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", + PathEscape(group), + board, + list, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gbl := new(BoardList) + resp, err := s.client.Do(req, gbl) + if err != nil { + return nil, resp, err + } + + return gbl, resp, nil +} + +// CreateGroupIssueBoardListOptions represents the available +// CreateGroupIssueBoardList() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#new-group-issue-board-list +type CreateGroupIssueBoardListOptions struct { + LabelID *int `url:"label_id" json:"label_id"` +} + +// CreateGroupIssueBoardList creates a new issue board list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#new-group-issue-board-list +func (s *GroupIssueBoardsService) CreateGroupIssueBoardList(gid interface{}, board int, opt *CreateGroupIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d/lists", PathEscape(group), board) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gbl := new(BoardList) + resp, err := s.client.Do(req, gbl) + if err != nil { + return nil, resp, err + } + + return gbl, resp, nil +} + +// UpdateGroupIssueBoardListOptions represents the available +// UpdateGroupIssueBoardList() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#edit-group-issue-board-list +type UpdateGroupIssueBoardListOptions struct { + Position *int `url:"position" json:"position"` +} + +// UpdateIssueBoardList updates the position of an existing +// group issue board list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#edit-group-issue-board-list +func (s *GroupIssueBoardsService) UpdateIssueBoardList(gid interface{}, board, list int, opt *UpdateGroupIssueBoardListOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", + PathEscape(group), + board, + list, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gbl []*BoardList + resp, err := s.client.Do(req, &gbl) + if err != nil { + return nil, resp, err + } + + return gbl, resp, nil +} + +// DeleteGroupIssueBoardList soft deletes a group issue board list. +// Only for admins and group owners. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_boards.html#delete-a-group-issue-board-list +func (s *GroupIssueBoardsService) DeleteGroupIssueBoardList(gid interface{}, board, list int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", + PathEscape(group), + board, + list, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_clusters.go b/vendor/github.com/xanzy/go-gitlab/group_clusters.go new file mode 100644 index 0000000000..f459e1cc84 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_clusters.go @@ -0,0 +1,217 @@ +// +// Copyright 2021, Paul Shoemaker +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupClustersService handles communication with the +// group clusters related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html +type GroupClustersService struct { + client *Client +} + +// GroupCluster represents a GitLab Group Cluster. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_clusters.html +type GroupCluster struct { + ID int `json:"id"` + Name string `json:"name"` + Domain string `json:"domain"` + CreatedAt *time.Time `json:"created_at"` + Managed bool `json:"managed"` + Enabled bool `json:"enabled"` + ProviderType string `json:"provider_type"` + PlatformType string `json:"platform_type"` + EnvironmentScope string `json:"environment_scope"` + ClusterType string `json:"cluster_type"` + User *User `json:"user"` + PlatformKubernetes *PlatformKubernetes `json:"platform_kubernetes"` + ManagementProject *ManagementProject `json:"management_project"` + Group *Group `json:"group"` +} + +func (v GroupCluster) String() string { + return Stringify(v) +} + +// ListClusters gets a list of all clusters in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#list-group-clusters +func (s *GroupClustersService) ListClusters(pid interface{}, options ...RequestOptionFunc) ([]*GroupCluster, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/clusters", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var pcs []*GroupCluster + resp, err := s.client.Do(req, &pcs) + if err != nil { + return nil, resp, err + } + + return pcs, resp, nil +} + +// GetCluster gets a cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#get-a-single-group-cluster +func (s *GroupClustersService) GetCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gc := new(GroupCluster) + resp, err := s.client.Do(req, &gc) + if err != nil { + return nil, resp, err + } + + return gc, resp, nil +} + +// AddGroupClusterOptions represents the available AddCluster() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#add-existing-cluster-to-group +type AddGroupClusterOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Domain *string `url:"domain,omitempty" json:"domain,omitempty"` + ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + Managed *bool `url:"managed,omitempty" json:"managed,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + PlatformKubernetes *AddGroupPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` +} + +// AddGroupPlatformKubernetesOptions represents the available PlatformKubernetes options for adding. +type AddGroupPlatformKubernetesOptions struct { + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` + Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` + AuthorizationType *string `url:"authorization_type,omitempty" json:"authorization_type,omitempty"` +} + +// AddCluster adds an existing cluster to the group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#add-existing-cluster-to-group +func (s *GroupClustersService) AddCluster(pid interface{}, opt *AddGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/clusters/user", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gc := new(GroupCluster) + resp, err := s.client.Do(req, gc) + if err != nil { + return nil, resp, err + } + + return gc, resp, nil +} + +// EditGroupClusterOptions represents the available EditCluster() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#edit-group-cluster +type EditGroupClusterOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Domain *string `url:"domain,omitempty" json:"domain,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + PlatformKubernetes *EditGroupPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` + ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` +} + +// EditGroupPlatformKubernetesOptions represents the available PlatformKubernetes options for editing. +type EditGroupPlatformKubernetesOptions struct { + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` +} + +// EditCluster updates an existing group cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#edit-group-cluster +func (s *GroupClustersService) EditCluster(pid interface{}, cluster int, opt *EditGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gc := new(GroupCluster) + resp, err := s.client.Do(req, gc) + if err != nil { + return nil, resp, err + } + + return gc, resp, nil +} + +// DeleteCluster deletes an existing group cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_clusters.html#delete-group-cluster +func (s *GroupClustersService) DeleteCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go b/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go new file mode 100644 index 0000000000..fd8cfd86d2 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go @@ -0,0 +1,104 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupEpicBoardsService handles communication with the group epic board +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_epic_boards.html +type GroupEpicBoardsService struct { + client *Client +} + +// GroupEpicBoard represents a GitLab group epic board. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_epic_boards.html +type GroupEpicBoard struct { + ID int `json:"id"` + Name string `json:"name"` + Group *Group `json:"group"` + Labels []*LabelDetails `json:"labels"` + Lists []*BoardList `json:"lists"` +} + +func (b GroupEpicBoard) String() string { + return Stringify(b) +} + +// ListGroupEpicBoardsOptions represents the available +// ListGroupEpicBoards() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_epic_boards.html#list-all-epic-boards-in-a-group +type ListGroupEpicBoardsOptions ListOptions + +// ListGroupEpicBoards gets a list of all epic boards in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_epic_boards.html#list-all-epic-boards-in-a-group +func (s *GroupEpicBoardsService) ListGroupEpicBoards(gid interface{}, opt *ListGroupEpicBoardsOptions, options ...RequestOptionFunc) ([]*GroupEpicBoard, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epic_boards", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*GroupEpicBoard + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// GetGroupEpicBoard gets a single epic board of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_epic_boards.html#single-group-epic-board +func (s *GroupEpicBoardsService) GetGroupEpicBoard(gid interface{}, board int, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epic_boards/%d", PathEscape(group), board) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gib := new(GroupEpicBoard) + resp, err := s.client.Do(req, gib) + if err != nil { + return nil, resp, err + } + + return gib, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_hooks.go b/vendor/github.com/xanzy/go-gitlab/group_hooks.go new file mode 100644 index 0000000000..414a8d0864 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_hooks.go @@ -0,0 +1,268 @@ +// +// Copyright 2021, Eric Stevens +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupHook represents a GitLab group hook. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks +type GroupHook struct { + ID int `json:"id"` + URL string `json:"url"` + GroupID int `json:"group_id"` + PushEvents bool `json:"push_events"` + PushEventsBranchFilter string `json:"push_events_branch_filter"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + JobEvents bool `json:"job_events"` + PipelineEvents bool `json:"pipeline_events"` + WikiPageEvents bool `json:"wiki_page_events"` + DeploymentEvents bool `json:"deployment_events"` + ReleasesEvents bool `json:"releases_events"` + SubGroupEvents bool `json:"subgroup_events"` + MemberEvents bool `json:"member_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + AlertStatus string `json:"alert_status"` + CreatedAt *time.Time `json:"created_at"` + CustomWebhookTemplate string `json:"custom_webhook_template"` + ResourceAccessTokenEvents bool `json:"resource_access_token_events"` + CustomHeaders []*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// ListGroupHooksOptions represents the available ListGroupHooks() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks +type ListGroupHooksOptions ListOptions + +// ListGroupHooks gets a list of group hooks. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks +func (s *GroupsService) ListGroupHooks(gid interface{}, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + var gh []*GroupHook + resp, err := s.client.Do(req, &gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// GetGroupHook gets a specific hook for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#get-group-hook +func (s *GroupsService) GetGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gh := new(GroupHook) + resp, err := s.client.Do(req, gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// AddGroupHookOptions represents the available AddGroupHook() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook +type AddGroupHookOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` + MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// AddGroupHook create a new group scoped webhook. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook +func (s *GroupsService) AddGroupHook(gid interface{}, opt *AddGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gh := new(GroupHook) + resp, err := s.client.Do(req, gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// EditGroupHookOptions represents the available EditGroupHook() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook +type EditGroupHookOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` + MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// EditGroupHook edits a hook for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook +func (s *GroupsService) EditGroupHook(pid interface{}, hook int, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gh := new(GroupHook) + resp, err := s.client.Do(req, gh) + if err != nil { + return nil, resp, err + } + + return gh, resp, nil +} + +// DeleteGroupHook removes a hook from a group. This is an idempotent +// method and can be called multiple times. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-group-hook +func (s *GroupsService) DeleteGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetGroupCustomHeader creates or updates a group custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#set-a-custom-header +func (s *GroupsService) SetGroupCustomHeader(gid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGroupCustomHeader deletes a group custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-a-custom-header +func (s *GroupsService) DeleteGroupCustomHeader(gid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_import_export.go b/vendor/github.com/xanzy/go-gitlab/group_import_export.go new file mode 100644 index 0000000000..b35245ed49 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_import_export.go @@ -0,0 +1,180 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "strconv" +) + +// GroupImportExportService handles communication with the group import export +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_import_export.html +type GroupImportExportService struct { + client *Client +} + +// ScheduleExport starts a new group export. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_import_export.html#schedule-new-export +func (s *GroupImportExportService) ScheduleExport(gid interface{}, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/export", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ExportDownload downloads the finished export. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_import_export.html#export-download +func (s *GroupImportExportService) ExportDownload(gid interface{}, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/export/download", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + exportDownload := new(bytes.Buffer) + resp, err := s.client.Do(req, exportDownload) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(exportDownload.Bytes()), resp, err +} + +// GroupImportFileOptions represents the available ImportFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_import_export.html#import-a-file +type GroupImportFileOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + File *string `url:"file,omitempty" json:"file,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` +} + +// ImportFile imports a file. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_import_export.html#import-a-file +func (s *GroupImportExportService) ImportFile(opt *GroupImportFileOptions, options ...RequestOptionFunc) (*Response, error) { + // First check if we got all required options. + if opt.Name == nil || *opt.Name == "" { + return nil, fmt.Errorf("Missing required option: Name") + } + if opt.Path == nil || *opt.Path == "" { + return nil, fmt.Errorf("Missing required option: Path") + } + if opt.File == nil || *opt.File == "" { + return nil, fmt.Errorf("Missing required option: File") + } + + f, err := os.Open(*opt.File) + if err != nil { + return nil, err + } + defer f.Close() + + b := &bytes.Buffer{} + w := multipart.NewWriter(b) + + _, filename := filepath.Split(*opt.File) + fw, err := w.CreateFormFile("file", filename) + if err != nil { + return nil, err + } + + _, err = io.Copy(fw, f) + if err != nil { + return nil, err + } + + // Populate the additional fields. + fw, err = w.CreateFormField("name") + if err != nil { + return nil, err + } + + _, err = fw.Write([]byte(*opt.Name)) + if err != nil { + return nil, err + } + + fw, err = w.CreateFormField("path") + if err != nil { + return nil, err + } + + _, err = fw.Write([]byte(*opt.Path)) + if err != nil { + return nil, err + } + + if opt.ParentID != nil { + fw, err = w.CreateFormField("parent_id") + if err != nil { + return nil, err + } + + _, err = fw.Write([]byte(strconv.Itoa(*opt.ParentID))) + if err != nil { + return nil, err + } + } + + if err = w.Close(); err != nil { + return nil, err + } + + req, err := s.client.NewRequest(http.MethodPost, "groups/import", nil, options) + if err != nil { + return nil, err + } + + // Set the buffer as the request body. + if err = req.SetBody(b); err != nil { + return nil, err + } + + // Overwrite the default content type. + req.Header.Set("Content-Type", w.FormDataContentType()) + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_iterations.go b/vendor/github.com/xanzy/go-gitlab/group_iterations.go new file mode 100644 index 0000000000..c77d633f6a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_iterations.go @@ -0,0 +1,90 @@ +// +// Copyright 2022, Daniel Steinke +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// IterationsAPI handles communication with the iterations related methods +// of the GitLab API +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_iterations.html +type GroupIterationsService struct { + client *Client +} + +// GroupInteration represents a GitLab iteration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_iterations.html +type GroupIteration struct { + ID int `json:"id"` + IID int `json:"iid"` + Sequence int `json:"sequence"` + GroupID int `json:"group_id"` + Title string `json:"title"` + Description string `json:"description"` + State int `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + DueDate *ISOTime `json:"due_date"` + StartDate *ISOTime `json:"start_date"` + WebURL string `json:"web_url"` +} + +func (i GroupIteration) String() string { + return Stringify(i) +} + +// ListGroupIterationsOptions contains the available ListGroupIterations() +// options +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_iterations.html#list-group-iterations +type ListGroupIterationsOptions struct { + ListOptions + State *string `url:"state,omitempty" json:"state,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` +} + +// ListGroupIterations returns a list of group iterations. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_iterations.html#list-group-iterations +func (s *GroupIterationsService) ListGroupIterations(gid interface{}, opt *ListGroupIterationsOptions, options ...RequestOptionFunc) ([]*GroupIteration, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/iterations", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gis []*GroupIteration + resp, err := s.client.Do(req, &gis) + if err != nil { + return nil, nil, err + } + + return gis, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_labels.go b/vendor/github.com/xanzy/go-gitlab/group_labels.go new file mode 100644 index 0000000000..5a390269b1 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_labels.go @@ -0,0 +1,258 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupLabelsService handles communication with the label related methods of the +// GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_labels.html +type GroupLabelsService struct { + client *Client +} + +// GroupLabel represents a GitLab group label. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_labels.html +type GroupLabel Label + +func (l GroupLabel) String() string { + return Stringify(l) +} + +// ListGroupLabelsOptions represents the available ListGroupLabels() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_labels.html#list-group-labels +type ListGroupLabelsOptions struct { + ListOptions + WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` + IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` + IncludeDescendantGrouops *bool `url:"include_descendant_groups,omitempty" json:"include_descendant_groups,omitempty"` + OnlyGroupLabels *bool `url:"only_group_labels,omitempty" json:"only_group_labels,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` +} + +// ListGroupLabels gets all labels for given group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#list-group-labels +func (s *GroupLabelsService) ListGroupLabels(gid interface{}, opt *ListGroupLabelsOptions, options ...RequestOptionFunc) ([]*GroupLabel, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var l []*GroupLabel + resp, err := s.client.Do(req, &l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// GetGroupLabel get a single label for a given group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#get-a-single-group-label +func (s *GroupLabelsService) GetGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var l *GroupLabel + resp, err := s.client.Do(req, &l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// CreateGroupLabelOptions represents the available CreateGroupLabel() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#create-a-new-group-label +type CreateGroupLabelOptions CreateLabelOptions + +// CreateGroupLabel creates a new label for given group with given name and +// color. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#create-a-new-group-label +func (s *GroupLabelsService) CreateGroupLabel(gid interface{}, opt *CreateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + l := new(GroupLabel) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// DeleteGroupLabelOptions represents the available DeleteGroupLabel() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#delete-a-group-label +type DeleteGroupLabelOptions DeleteLabelOptions + +// DeleteGroupLabel deletes a group label given by its name or ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#delete-a-group-label +func (s *GroupLabelsService) DeleteGroupLabel(gid interface{}, lid interface{}, opt *DeleteGroupLabelOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + + if lid != nil { + label, err := parseID(lid) + if err != nil { + return nil, err + } + u = fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) + } + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UpdateGroupLabelOptions represents the available UpdateGroupLabel() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#update-a-group-label +type UpdateGroupLabelOptions UpdateLabelOptions + +// UpdateGroupLabel updates an existing label with new name or now color. At least +// one parameter is required, to update the label. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#update-a-group-label +func (s *GroupLabelsService) UpdateGroupLabel(gid interface{}, opt *UpdateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + l := new(GroupLabel) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// SubscribeToGroupLabel subscribes the authenticated user to a label to receive +// notifications. If the user is already subscribed to the label, the status +// code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#subscribe-to-a-group-label +func (s *GroupLabelsService) SubscribeToGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/labels/%s/subscribe", PathEscape(group), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + l := new(GroupLabel) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// UnsubscribeFromGroupLabel unsubscribes the authenticated user from a label to not +// receive notifications from it. If the user is not subscribed to the label, the +// status code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_labels.html#unsubscribe-from-a-group-label +func (s *GroupLabelsService) UnsubscribeFromGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/labels/%s/unsubscribe", PathEscape(group), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_members.go b/vendor/github.com/xanzy/go-gitlab/group_members.go new file mode 100644 index 0000000000..cdf225c3d8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_members.go @@ -0,0 +1,391 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupMembersService handles communication with the group members +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html +type GroupMembersService struct { + client *Client +} + +// GroupMemberSAMLIdentity represents the SAML Identity link for the group member. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +// Gitlab MR for API change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20357 +// Gitlab MR for API Doc change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25652 +type GroupMemberSAMLIdentity struct { + ExternUID string `json:"extern_uid"` + Provider string `json:"provider"` + SAMLProviderID int `json:"saml_provider_id"` +} + +// GroupMember represents a GitLab group member. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html +type GroupMember struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + AccessLevel AccessLevelValue `json:"access_level"` + Email string `json:"email,omitempty"` + GroupSAMLIdentity *GroupMemberSAMLIdentity `json:"group_saml_identity"` + MemberRole *MemberRole `json:"member_role"` +} + +// ListGroupMembersOptions represents the available ListGroupMembers() and +// ListAllGroupMembers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +type ListGroupMembersOptions struct { + ListOptions + Query *string `url:"query,omitempty" json:"query,omitempty"` + UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` +} + +// ListGroupMembers get a list of group members viewable by the authenticated +// user. Inherited members through ancestor groups are not included. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +func (s *GroupsService) ListGroupMembers(gid interface{}, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gm []*GroupMember + resp, err := s.client.Do(req, &gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, nil +} + +// ListAllGroupMembers get a list of group members viewable by the authenticated +// user. Returns a list including inherited members through ancestor groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project-including-inherited-and-invited-members +func (s *GroupsService) ListAllGroupMembers(gid interface{}, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members/all", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gm []*GroupMember + resp, err := s.client.Do(req, &gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, nil +} + +// AddGroupMemberOptions represents the available AddGroupMember() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project +type AddGroupMemberOptions struct { + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` +} + +// GetGroupMember gets a member of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project +func (s *GroupMembersService) GetGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gm := new(GroupMember) + resp, err := s.client.Do(req, gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, nil +} + +// GetInheritedGroupMember get a member of a group or project, including +// inherited and invited members +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project-including-inherited-and-invited-members +func (s *GroupMembersService) GetInheritedGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members/all/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gm := new(GroupMember) + resp, err := s.client.Do(req, gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, err +} + +// BillableGroupMember represents a GitLab billable group member. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group +type BillableGroupMember struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + Email string `json:"email"` + LastActivityOn *ISOTime `json:"last_activity_on"` + MembershipType string `json:"membership_type"` + Removable bool `json:"removable"` + CreatedAt *time.Time `json:"created_at"` + IsLastOwner bool `json:"is_last_owner"` + LastLoginAt *time.Time `json:"last_login_at"` +} + +// ListBillableGroupMembersOptions represents the available ListBillableGroupMembers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group +type ListBillableGroupMembersOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListBillableGroupMembers Gets a list of group members that count as billable. +// The list includes members in the subgroup or subproject. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group +func (s *GroupsService) ListBillableGroupMembers(gid interface{}, opt *ListBillableGroupMembersOptions, options ...RequestOptionFunc) ([]*BillableGroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/billable_members", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var bgm []*BillableGroupMember + resp, err := s.client.Do(req, &bgm) + if err != nil { + return nil, resp, err + } + + return bgm, resp, nil +} + +// RemoveBillableGroupMember removes a given group members that count as billable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#remove-a-billable-member-from-a-group +func (s *GroupsService) RemoveBillableGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/billable_members/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// AddGroupMember adds a user to the list of group members. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project +func (s *GroupMembersService) AddGroupMember(gid interface{}, opt *AddGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gm := new(GroupMember) + resp, err := s.client.Do(req, gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, nil +} + +// ShareWithGroup shares a group with the group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#share-groups-with-groups +func (s *GroupMembersService) ShareWithGroup(gid interface{}, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/share", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// DeleteShareWithGroup allows to unshare a group from a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-link-sharing-group-with-another-group +func (s *GroupMembersService) DeleteShareWithGroup(gid interface{}, groupID int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/share/%d", PathEscape(group), groupID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// EditGroupMemberOptions represents the available EditGroupMember() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project +type EditGroupMemberOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` +} + +// EditGroupMember updates a member of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project +func (s *GroupMembersService) EditGroupMember(gid interface{}, user int, opt *EditGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gm := new(GroupMember) + resp, err := s.client.Do(req, gm) + if err != nil { + return nil, resp, err + } + + return gm, resp, nil +} + +// RemoveGroupMemberOptions represents the available options to remove a group member. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project +type RemoveGroupMemberOptions struct { + SkipSubresources *bool `url:"skip_subresources,omitempty" json:"skip_subresources,omitempty"` + UnassignIssuables *bool `url:"unassign_issuables,omitempty" json:"unassign_issuables,omitempty"` +} + +// RemoveGroupMember removes user from user team. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project +func (s *GroupMembersService) RemoveGroupMember(gid interface{}, user int, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_milestones.go b/vendor/github.com/xanzy/go-gitlab/group_milestones.go new file mode 100644 index 0000000000..f3089b2152 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_milestones.go @@ -0,0 +1,322 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupMilestonesService handles communication with the milestone related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_milestones.html +type GroupMilestonesService struct { + client *Client +} + +// GroupMilestone represents a GitLab milestone. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_milestones.html +type GroupMilestone struct { + ID int `json:"id"` + IID int `json:"iid"` + GroupID int `json:"group_id"` + Title string `json:"title"` + Description string `json:"description"` + StartDate *ISOTime `json:"start_date"` + DueDate *ISOTime `json:"due_date"` + State string `json:"state"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` + Expired *bool `json:"expired"` +} + +func (m GroupMilestone) String() string { + return Stringify(m) +} + +// ListGroupMilestonesOptions represents the available +// ListGroupMilestones() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#list-group-milestones +type ListGroupMilestonesOptions struct { + ListOptions + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + SearchTitle *string `url:"search_title,omitempty" json:"search_title,omitempty"` + IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` + IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` + IncludeDescendents *bool `url:"include_descendents,omitempty" json:"include_descendents,omitempty"` + UpdatedBefore *ISOTime `url:"updated_before,omitempty" json:"updated_before,omitempty"` + UpdatedAfter *ISOTime `url:"updated_after,omitempty" json:"updated_after,omitempty"` + ContainingDate *ISOTime `url:"containing_date,omitempty" json:"containing_date,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` +} + +// ListGroupMilestones returns a list of group milestones. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#list-group-milestones +func (s *GroupMilestonesService) ListGroupMilestones(gid interface{}, opt *ListGroupMilestonesOptions, options ...RequestOptionFunc) ([]*GroupMilestone, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*GroupMilestone + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// GetGroupMilestone gets a single group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-single-milestone +func (s *GroupMilestonesService) GetGroupMilestone(gid interface{}, milestone int, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(group), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + m := new(GroupMilestone) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// CreateGroupMilestoneOptions represents the available CreateGroupMilestone() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#create-new-milestone +type CreateGroupMilestoneOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` +} + +// CreateGroupMilestone creates a new group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#create-new-milestone +func (s *GroupMilestonesService) CreateGroupMilestone(gid interface{}, opt *CreateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(GroupMilestone) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// UpdateGroupMilestoneOptions represents the available UpdateGroupMilestone() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#edit-milestone +type UpdateGroupMilestoneOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` + StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` +} + +// UpdateGroupMilestone updates an existing group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#edit-milestone +func (s *GroupMilestonesService) UpdateGroupMilestone(gid interface{}, milestone int, opt *UpdateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(group), milestone) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(GroupMilestone) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// DeleteGroupMilestone deletes a specified group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#delete-group-milestone +func (s *GroupMilestonesService) DeleteGroupMilestone(pid interface{}, milestone int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(project), milestone) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// GetGroupMilestoneIssuesOptions represents the available GetGroupMilestoneIssues() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-issues-assigned-to-a-single-milestone +type GetGroupMilestoneIssuesOptions ListOptions + +// GetGroupMilestoneIssues gets all issues assigned to a single group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-issues-assigned-to-a-single-milestone +func (s *GroupMilestonesService) GetGroupMilestoneIssues(gid interface{}, milestone int, opt *GetGroupMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones/%d/issues", PathEscape(group), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var i []*Issue + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// GetGroupMilestoneMergeRequestsOptions represents the available +// GetGroupMilestoneMergeRequests() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-merge-requests-assigned-to-a-single-milestone +type GetGroupMilestoneMergeRequestsOptions ListOptions + +// GetGroupMilestoneMergeRequests gets all merge requests assigned to a +// single group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-merge-requests-assigned-to-a-single-milestone +func (s *GroupMilestonesService) GetGroupMilestoneMergeRequests(gid interface{}, milestone int, opt *GetGroupMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones/%d/merge_requests", PathEscape(group), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mr []*MergeRequest + resp, err := s.client.Do(req, &mr) + if err != nil { + return nil, resp, err + } + + return mr, resp, nil +} + +// BurndownChartEvent reprensents a burnout chart event +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-burndown-chart-events-for-a-single-milestone +type BurndownChartEvent struct { + CreatedAt *time.Time `json:"created_at"` + Weight *int `json:"weight"` + Action *string `json:"action"` +} + +// GetGroupMilestoneBurndownChartEventsOptions represents the available +// GetGroupMilestoneBurndownChartEventsOptions() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-burndown-chart-events-for-a-single-milestone +type GetGroupMilestoneBurndownChartEventsOptions ListOptions + +// GetGroupMilestoneBurndownChartEvents gets all merge requests assigned to a +// single group milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-burndown-chart-events-for-a-single-milestone +func (s *GroupMilestonesService) GetGroupMilestoneBurndownChartEvents(gid interface{}, milestone int, opt *GetGroupMilestoneBurndownChartEventsOptions, options ...RequestOptionFunc) ([]*BurndownChartEvent, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/milestones/%d/burndown_events", PathEscape(group), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var be []*BurndownChartEvent + resp, err := s.client.Do(req, &be) + if err != nil { + return nil, resp, err + } + + return be, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go b/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go new file mode 100644 index 0000000000..addc383fb1 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go @@ -0,0 +1,281 @@ +// +// Copyright 2023, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupProtectedEnvironmentsService handles communication with the group-level +// protected environment methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html +type GroupProtectedEnvironmentsService struct { + client *Client +} + +// GroupProtectedEnvironment represents a group-level protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html +type GroupProtectedEnvironment struct { + Name string `json:"name"` + DeployAccessLevels []*GroupEnvironmentAccessDescription `json:"deploy_access_levels"` + RequiredApprovalCount int `json:"required_approval_count"` + ApprovalRules []*GroupEnvironmentApprovalRule `json:"approval_rules"` +} + +// GroupEnvironmentAccessDescription represents the access decription for a +// group-level protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html +type GroupEnvironmentAccessDescription struct { + ID int `json:"id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` + UserID int `json:"user_id"` + GroupID int `json:"group_id"` + GroupInheritanceType int `json:"group_inheritance_type"` +} + +// GroupEnvironmentApprovalRule represents the approval rules for a group-level +// protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment +type GroupEnvironmentApprovalRule struct { + ID int `json:"id"` + UserID int `json:"user_id"` + GroupID int `json:"group_id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` + RequiredApprovalCount int `json:"required_approvals"` + GroupInheritanceType int `json:"group_inheritance_type"` +} + +// ListGroupProtectedEnvironmentsOptions represents the available +// ListGroupProtectedEnvironments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#list-group-level-protected-environments +type ListGroupProtectedEnvironmentsOptions ListOptions + +// ListGroupProtectedEnvironments returns a list of protected environments from +// a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#list-group-level-protected-environments +func (s *GroupProtectedEnvironmentsService) ListGroupProtectedEnvironments(gid interface{}, opt *ListGroupProtectedEnvironmentsOptions, options ...RequestOptionFunc) ([]*GroupProtectedEnvironment, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/protected_environments", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pes []*GroupProtectedEnvironment + resp, err := s.client.Do(req, &pes) + if err != nil { + return nil, resp, err + } + + return pes, resp, nil +} + +// GetGroupProtectedEnvironment returns a single group-level protected +// environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#get-a-single-protected-environment +func (s *GroupProtectedEnvironmentsService) GetGroupProtectedEnvironment(gid interface{}, environment string, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pe := new(GroupProtectedEnvironment) + resp, err := s.client.Do(req, pe) + if err != nil { + return nil, resp, err + } + + return pe, resp, nil +} + +// ProtectGroupEnvironmentOptions represents the available +// ProtectGroupEnvironment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment +type ProtectGroupEnvironmentOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + DeployAccessLevels *[]*GroupEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` + RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + ApprovalRules *[]*GroupEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` +} + +// GroupEnvironmentAccessOptions represents the options for an access decription +// for a group-level protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment +type GroupEnvironmentAccessOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` +} + +// GroupEnvironmentApprovalRuleOptions represents the approval rules for a +// group-level protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment +type GroupEnvironmentApprovalRuleOptions struct { + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` + RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` +} + +// ProtectGroupEnvironment protects a single group-level environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment +func (s *GroupProtectedEnvironmentsService) ProtectGroupEnvironment(gid interface{}, opt *ProtectGroupEnvironmentOptions, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/protected_environments", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pe := new(GroupProtectedEnvironment) + resp, err := s.client.Do(req, pe) + if err != nil { + return nil, resp, err + } + + return pe, resp, nil +} + +// UpdateGroupProtectedEnvironmentOptions represents the available +// UpdateGroupProtectedEnvironment() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment +type UpdateGroupProtectedEnvironmentOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + DeployAccessLevels *[]*UpdateGroupEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` + RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + ApprovalRules *[]*UpdateGroupEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` +} + +// UpdateGroupEnvironmentAccessOptions represents the options for updates to the +// access decription for a group-level protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment +type UpdateGroupEnvironmentAccessOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` +} + +// UpdateGroupEnvironmentApprovalRuleOptions represents the updates to the +// approval rules for a group-level protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment +type UpdateGroupEnvironmentApprovalRuleOptions struct { + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` + RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` +} + +// UpdateGroupProtectedEnvironment updates a single group-level protected +// environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment +func (s *GroupProtectedEnvironmentsService) UpdateGroupProtectedEnvironment(gid interface{}, environment string, opt *UpdateGroupProtectedEnvironmentOptions, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pe := new(GroupProtectedEnvironment) + resp, err := s.client.Do(req, pe) + if err != nil { + return nil, resp, err + } + + return pe, resp, nil +} + +// UnprotectGroupEnvironment unprotects the given protected group-level +// environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_protected_environments.html#unprotect-a-single-environment +func (s *GroupProtectedEnvironmentsService) UnprotectGroupEnvironment(gid interface{}, environment string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go new file mode 100644 index 0000000000..18951a1661 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go @@ -0,0 +1,195 @@ +// +// Copyright 2023, Nick Westbury +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupRepositoryStorageMoveService handles communication with the +// group repositories related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html +type GroupRepositoryStorageMoveService struct { + client *Client +} + +// GroupRepositoryStorageMove represents the status of a repository move. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html +type GroupRepositoryStorageMove struct { + ID int `json:"id"` + CreatedAt *time.Time `json:"created_at"` + State string `json:"state"` + SourceStorageName string `json:"source_storage_name"` + DestinationStorageName string `json:"destination_storage_name"` + Group *RepositoryGroup `json:"group"` +} + +type RepositoryGroup struct { + ID int `json:"id"` + Name string `json:"name"` + WebURL string `json:"web_url"` +} + +// RetrieveAllGroupStorageMovesOptions represents the available +// RetrieveAllStorageMoves() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-group-repository-storage-moves +type RetrieveAllGroupStorageMovesOptions ListOptions + +// RetrieveAllStorageMoves retrieves all group repository storage moves +// accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-group-repository-storage-moves +func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { + req, err := g.client.NewRequest(http.MethodGet, "group_repository_storage_moves", opts, options) + if err != nil { + return nil, nil, err + } + + var gsms []*GroupRepositoryStorageMove + resp, err := g.client.Do(req, &gsms) + if err != nil { + return nil, resp, err + } + + return gsms, resp, err +} + +// RetrieveAllStorageMovesForGroup retrieves all repository storage moves for +// a single group accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-single-group +func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMovesForGroup(group int, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("groups/%d/repository_storage_moves", group) + + req, err := g.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var gsms []*GroupRepositoryStorageMove + resp, err := g.client.Do(req, &gsms) + if err != nil { + return nil, resp, err + } + + return gsms, resp, err +} + +// GetStorageMove gets a single group repository storage move. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#get-a-single-group-repository-storage-move +func (g GroupRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("group_repository_storage_moves/%d", repositoryStorage) + + req, err := g.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gsm := new(GroupRepositoryStorageMove) + resp, err := g.client.Do(req, gsm) + if err != nil { + return nil, resp, err + } + + return gsm, resp, err +} + +// GetStorageMoveForGroup gets a single repository storage move for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-group +func (g GroupRepositoryStorageMoveService) GetStorageMoveForGroup(group int, repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("groups/%d/repository_storage_moves/%d", group, repositoryStorage) + + req, err := g.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gsm := new(GroupRepositoryStorageMove) + resp, err := g.client.Do(req, gsm) + if err != nil { + return nil, resp, err + } + + return gsm, resp, err +} + +// ScheduleStorageMoveForGroupOptions represents the available +// ScheduleStorageMoveForGroup() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-group +type ScheduleStorageMoveForGroupOptions struct { + DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` +} + +// ScheduleStorageMoveForGroup schedule a repository to be moved for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-group +func (g GroupRepositoryStorageMoveService) ScheduleStorageMoveForGroup(group int, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("groups/%d/repository_storage_moves", group) + + req, err := g.client.NewRequest(http.MethodPost, u, opts, options) + if err != nil { + return nil, nil, err + } + + gsm := new(GroupRepositoryStorageMove) + resp, err := g.client.Do(req, gsm) + if err != nil { + return nil, resp, err + } + + return gsm, resp, err +} + +// ScheduleAllGroupStorageMovesOptions represents the available +// ScheduleAllStorageMoves() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard +type ScheduleAllGroupStorageMovesOptions struct { + SourceStorageName *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"` + DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` +} + +// ScheduleAllStorageMoves schedules all group repositories to be moved. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard +func (g GroupRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllGroupStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := g.client.NewRequest(http.MethodPost, "group_repository_storage_moves", opts, options) + if err != nil { + return nil, err + } + + return g.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go new file mode 100644 index 0000000000..1360057a25 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go @@ -0,0 +1,181 @@ +// +// Copyright 2023, James Hong +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// GroupServiceAccount represents a GitLab service account user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +type GroupServiceAccount struct { + ID int `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` +} + +// ListServiceAccountsOptions represents the available ListServiceAccounts() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +type ListServiceAccountsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListServiceAccounts gets a list of service acxcounts. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users +func (s *GroupsService) ListServiceAccounts(gid interface{}, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var sa []*GroupServiceAccount + resp, err := s.client.Do(req, &sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountOptions represents the available CreateServiceAccount() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-service-account-user +type CreateServiceAccountOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// Creates a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-service-account-user +func (s *GroupsService) CreateServiceAccount(gid interface{}, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + sa := new(GroupServiceAccount) + resp, err := s.client.Do(req, sa) + if err != nil { + return nil, resp, err + } + + return sa, resp, nil +} + +// CreateServiceAccountPersonalAccessTokenOptions represents the available +// CreateServiceAccountPersonalAccessToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user +type CreateServiceAccountPersonalAccessTokenOptions struct { + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// CreateServiceAccountPersonalAccessToken add a new Personal Access Token for a +// service account user for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user +func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RotateServiceAccountPersonalAccessToken rotates a Personal Access Token for a +// service account user for a group. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user +func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount, token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens/%d/rotate", PathEscape(group), serviceAccount, token) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// DeleteServiceAccount Deletes a service account user. +// +// This API endpoint works on top-level groups only. It does not work on subgroups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#delete-a-service-account-user +func (s *GroupsService) DeleteServiceAccount(gid interface{}, serviceAccount int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go b/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go new file mode 100644 index 0000000000..c29039039d --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go @@ -0,0 +1,105 @@ +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// GroupSSHCertificatesService handles communication with the group +// SSH certificate related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_ssh_certificates.html +type GroupSSHCertificatesService struct { + client *Client +} + +// GroupSSHCertificate represents a GitLab Group SSH certificate. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html +type GroupSSHCertificate struct { + ID int `json:"id"` + Title string `json:"title"` + Key string `json:"key"` + CreatedAt *time.Time `json:"created_at"` +} + +// ListGroupSSHCertificates gets a list of SSH certificates for a specified +// group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#get-all-ssh-certificates-for-a-particular-group +func (s *GroupSSHCertificatesService) ListGroupSSHCertificates(gid interface{}, options ...RequestOptionFunc) ([]*GroupSSHCertificate, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/ssh_certificates", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var certs []*GroupSSHCertificate + resp, err := s.client.Do(req, &certs) + if err != nil { + return nil, resp, err + } + + return certs, resp, nil +} + +// CreateGroupSSHCertificateOptions represents the available +// CreateGroupSSHCertificate() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#create-ssh-certificate +type CreateGroupSSHCertificateOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` +} + +// CreateMemberRole creates a new member role for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#create-ssh-certificate +func (s *GroupSSHCertificatesService) CreateGroupSSHCertificate(gid interface{}, opt *CreateGroupSSHCertificateOptions, options ...RequestOptionFunc) (*GroupSSHCertificate, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/ssh_certificates", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + cert := new(GroupSSHCertificate) + resp, err := s.client.Do(req, cert) + if err != nil { + return nil, resp, err + } + + return cert, resp, nil +} + +// DeleteGroupSSHCertificate deletes a SSH certificate from a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#delete-group-ssh-certificate +func (s *GroupSSHCertificatesService) DeleteGroupSSHCertificate(gid interface{}, cert int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/ssh_certificates/%d", PathEscape(group), cert) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_variables.go b/vendor/github.com/xanzy/go-gitlab/group_variables.go new file mode 100644 index 0000000000..69fe44592d --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_variables.go @@ -0,0 +1,218 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// GroupVariablesService handles communication with the +// group variables related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html +type GroupVariablesService struct { + client *Client +} + +// GroupVariable represents a GitLab group Variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html +type GroupVariable struct { + Key string `json:"key"` + Value string `json:"value"` + VariableType VariableTypeValue `json:"variable_type"` + Protected bool `json:"protected"` + Masked bool `json:"masked"` + Raw bool `json:"raw"` + EnvironmentScope string `json:"environment_scope"` + Description string `json:"description"` +} + +func (v GroupVariable) String() string { + return Stringify(v) +} + +// ListGroupVariablesOptions represents the available options for listing variables +// for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#list-group-variables +type ListGroupVariablesOptions ListOptions + +// ListVariables gets a list of all variables for a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#list-group-variables +func (s *GroupVariablesService) ListVariables(gid interface{}, opt *ListGroupVariablesOptions, options ...RequestOptionFunc) ([]*GroupVariable, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/variables", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var vs []*GroupVariable + resp, err := s.client.Do(req, &vs) + if err != nil { + return nil, resp, err + } + + return vs, resp, nil +} + +// GetGroupVariableOptions represents the available GetVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details +type GetGroupVariableOptions struct { + Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` +} + +// GetVariable gets a variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details +func (s *GroupVariablesService) GetVariable(gid interface{}, key string, opt *GetGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(GroupVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// CreateGroupVariableOptions represents the available CreateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#create-variable +type CreateGroupVariableOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// CreateVariable creates a new group variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#create-variable +func (s *GroupVariablesService) CreateVariable(gid interface{}, opt *CreateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/variables", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(GroupVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// UpdateGroupVariableOptions represents the available UpdateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#update-variable +type UpdateGroupVariableOptions struct { + Value *string `url:"value,omitempty" json:"value,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// UpdateVariable updates the position of an existing +// group issue board list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#update-variable +func (s *GroupVariablesService) UpdateVariable(gid interface{}, key string, opt *UpdateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(GroupVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// RemoveVariable removes a group's variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_level_variables.html#remove-variable +func (s *GroupVariablesService) RemoveVariable(gid interface{}, key string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/group_wikis.go b/vendor/github.com/xanzy/go-gitlab/group_wikis.go new file mode 100644 index 0000000000..4693965fe5 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/group_wikis.go @@ -0,0 +1,204 @@ +// +// Copyright 2021, Markus Lackner +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// GroupWikisService handles communication with the group wikis related methods of +// the Gitlab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_wikis.html +type GroupWikisService struct { + client *Client +} + +// GroupWiki represents a GitLab groups wiki. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/group_wikis.html +type GroupWiki struct { + Content string `json:"content"` + Encoding string `json:"encoding"` + Format WikiFormatValue `json:"format"` + Slug string `json:"slug"` + Title string `json:"title"` +} + +func (w GroupWiki) String() string { + return Stringify(w) +} + +// ListGroupWikisOptions represents the available ListGroupWikis options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#list-wiki-pages +type ListGroupWikisOptions struct { + WithContent *bool `url:"with_content,omitempty" json:"with_content,omitempty"` +} + +// ListGroupWikis lists all pages of the wiki of the given group id. +// When with_content is set, it also returns the content of the pages. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#list-wiki-pages +func (s *GroupWikisService) ListGroupWikis(gid interface{}, opt *ListGroupWikisOptions, options ...RequestOptionFunc) ([]*GroupWiki, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/wikis", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gws []*GroupWiki + resp, err := s.client.Do(req, &gws) + if err != nil { + return nil, resp, err + } + + return gws, resp, nil +} + +// GetGroupWikiPageOptions represents options to GetGroupWikiPage +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#get-a-wiki-page +type GetGroupWikiPageOptions struct { + RenderHTML *bool `url:"render_html,omitempty" json:"render_html,omitempty"` + Version *string `url:"version,omitempty" json:"version,omitempty"` +} + +// GetGroupWikiPage gets a wiki page for a given group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#get-a-wiki-page +func (s *GroupWikisService) GetGroupWikiPage(gid interface{}, slug string, opt *GetGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + gw := new(GroupWiki) + resp, err := s.client.Do(req, gw) + if err != nil { + return nil, resp, err + } + + return gw, resp, nil +} + +// CreateGroupWikiPageOptions represents options to CreateGroupWikiPage. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#create-a-new-wiki-page +type CreateGroupWikiPageOptions struct { + Content *string `url:"content,omitempty" json:"content,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` +} + +// CreateGroupWikiPage creates a new wiki page for the given group with +// the given title, slug, and content. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#create-a-new-wiki-page +func (s *GroupWikisService) CreateGroupWikiPage(gid interface{}, opt *CreateGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/wikis", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + w := new(GroupWiki) + resp, err := s.client.Do(req, w) + if err != nil { + return nil, resp, err + } + + return w, resp, nil +} + +// EditGroupWikiPageOptions represents options to EditGroupWikiPage. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#edit-an-existing-wiki-page +type EditGroupWikiPageOptions struct { + Content *string `url:"content,omitempty" json:"content,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` +} + +// EditGroupWikiPage Updates an existing wiki page. At least one parameter is +// required to update the wiki page. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#edit-an-existing-wiki-page +func (s *GroupWikisService) EditGroupWikiPage(gid interface{}, slug string, opt *EditGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + w := new(GroupWiki) + resp, err := s.client.Do(req, w) + if err != nil { + return nil, resp, err + } + + return w, resp, nil +} + +// DeleteGroupWikiPage deletes a wiki page with a given slug. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/group_wikis.html#delete-a-wiki-page +func (s *GroupWikisService) DeleteGroupWikiPage(gid interface{}, slug string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go new file mode 100644 index 0000000000..34f0cab662 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/groups.go @@ -0,0 +1,1177 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// GroupsService handles communication with the group related methods of +// the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html +type GroupsService struct { + client *Client +} + +// Group represents a GitLab group. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html +type Group struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Description string `json:"description"` + MembershipLock bool `json:"membership_lock"` + Visibility VisibilityValue `json:"visibility"` + LFSEnabled bool `json:"lfs_enabled"` + DefaultBranchProtectionDefaults struct { + AllowedToPush []*GroupAccessLevel `json:"allowed_to_push"` + AllowForcePush bool `json:"allow_force_push"` + AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push"` + } `json:"default_branch_protection_defaults"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + RequestAccessEnabled bool `json:"request_access_enabled"` + RepositoryStorage string `json:"repository_storage"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` + FileTemplateProjectID int `json:"file_template_project_id"` + ParentID int `json:"parent_id"` + Projects []*Project `json:"projects"` + Statistics *Statistics `json:"statistics"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ShareWithGroupLock bool `json:"share_with_group_lock"` + RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` + EmailsEnabled bool `json:"emails_enabled"` + MentionsDisabled bool `json:"mentions_disabled"` + RunnersToken string `json:"runners_token"` + SharedProjects []*Project `json:"shared_projects"` + SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` + SharedWithGroups []struct { + GroupID int `json:"group_id"` + GroupName string `json:"group_name"` + GroupFullPath string `json:"group_full_path"` + GroupAccessLevel int `json:"group_access_level"` + ExpiresAt *ISOTime `json:"expires_at"` + } `json:"shared_with_groups"` + LDAPCN string `json:"ldap_cn"` + LDAPAccess AccessLevelValue `json:"ldap_access"` + LDAPGroupLinks []*LDAPGroupLink `json:"ldap_group_links"` + SAMLGroupLinks []*SAMLGroupLink `json:"saml_group_links"` + SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` + ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` + PreventForkingOutsideGroup bool `json:"prevent_forking_outside_group"` + MarkedForDeletionOn *ISOTime `json:"marked_for_deletion_on"` + CreatedAt *time.Time `json:"created_at"` + IPRestrictionRanges string `json:"ip_restriction_ranges"` + WikiAccessLevel AccessControlValue `json:"wiki_access_level"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled bool `json:"emails_disabled"` + + // Deprecated: Use DefaultBranchProtectionDefaults instead + DefaultBranchProtection int `json:"default_branch_protection"` +} + +// GroupAccessLevel represents default branch protection defaults access levels. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type GroupAccessLevel struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` +} + +// GroupAvatar represents a GitLab group avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html +type GroupAvatar struct { + Filename string + Image io.Reader +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *GroupAvatar) MarshalJSON() ([]byte, error) { + if a.Filename == "" && a.Image == nil { + return []byte(`""`), nil + } + type alias GroupAvatar + return json.Marshal((*alias)(a)) +} + +// LDAPGroupLink represents a GitLab LDAP group link. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#ldap-group-links +type LDAPGroupLink struct { + CN string `json:"cn"` + Filter string `json:"filter"` + GroupAccess AccessLevelValue `json:"group_access"` + Provider string `json:"provider"` +} + +// SAMLGroupLink represents a GitLab SAML group link. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#saml-group-links +type SAMLGroupLink struct { + Name string `json:"name"` + AccessLevel AccessLevelValue `json:"access_level"` + MemberRoleID int `json:"member_role_id,omitempty"` +} + +// ListGroupsOptions represents the available ListGroups() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-groups +type ListGroupsOptions struct { + ListOptions + SkipGroups *[]int `url:"skip_groups,omitempty" del:"," json:"skip_groups,omitempty"` + AllAvailable *bool `url:"all_available,omitempty" json:"all_available,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` + Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + TopLevelOnly *bool `url:"top_level_only,omitempty" json:"top_level_only,omitempty"` + RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` +} + +// ListGroups gets a list of groups (as user: my groups, as admin: all groups). +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-groups +func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "groups", opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*Group + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// ListSubGroupsOptions represents the available ListSubGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-subgroups +type ListSubGroupsOptions ListGroupsOptions + +// ListSubGroups gets a list of subgroups for a given group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-subgroups +func (s *GroupsService) ListSubGroups(gid interface{}, opt *ListSubGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/subgroups", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*Group + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// ListDescendantGroupsOptions represents the available ListDescendantGroups() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-descendant-groups +type ListDescendantGroupsOptions ListGroupsOptions + +// ListDescendantGroups gets a list of subgroups for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-descendant-groups +func (s *GroupsService) ListDescendantGroups(gid interface{}, opt *ListDescendantGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/descendant_groups", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var gs []*Group + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// ListGroupProjectsOptions represents the available ListGroup() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-projects +type ListGroupProjectsOptions struct { + ListOptions + Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` + IncludeSubGroups *bool `url:"include_subgroups,omitempty" json:"include_subgroups,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + Simple *bool `url:"simple,omitempty" json:"simple,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Starred *bool `url:"starred,omitempty" json:"starred,omitempty"` + Topic *string `url:"topic,omitempty" json:"topic,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` + WithIssuesEnabled *bool `url:"with_issues_enabled,omitempty" json:"with_issues_enabled,omitempty"` + WithMergeRequestsEnabled *bool `url:"with_merge_requests_enabled,omitempty" json:"with_merge_requests_enabled,omitempty"` + WithSecurityReports *bool `url:"with_security_reports,omitempty" json:"with_security_reports,omitempty"` + WithShared *bool `url:"with_shared,omitempty" json:"with_shared,omitempty"` +} + +// ListGroupProjects get a list of group projects +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-projects +func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/projects", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Project + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// GetGroupOptions represents the available GetGroup() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#details-of-a-group +type GetGroupOptions struct { + ListOptions + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` + WithProjects *bool `url:"with_projects,omitempty" json:"with_projects,omitempty"` +} + +// GetGroup gets all details of a group. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#details-of-a-group +func (s *GroupsService) GetGroup(gid interface{}, opt *GetGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// DownloadAvatar downloads a group avatar. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#download-a-group-avatar +func (s *GroupsService) DownloadAvatar(gid interface{}, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/avatar", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + avatar := new(bytes.Buffer) + resp, err := s.client.Do(req, avatar) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(avatar.Bytes()), resp, err +} + +// CreateGroupOptions represents the available CreateGroup() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#new-group +type CreateGroupOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Avatar *GroupAvatar `url:"-" json:"-"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` + RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` + SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + + // Deprecated: User DefaultBranchProtectionDefaults instead + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` +} + +// DefaultBranchProtectionDefaultsOptions represents the available options for +// using default_branch_protection_defaults in CreateGroup() or UpdateGroup() +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type DefaultBranchProtectionDefaultsOptions struct { + AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` +} + +// CreateGroup creates a new project group. Available only for users who can +// create groups. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#new-group +func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, "groups", opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + "groups", + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// TransferGroup transfers a project to the Group namespace. Available only +// for admin. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#transfer-project-to-group +func (s *GroupsService) TransferGroup(gid interface{}, pid interface{}, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/projects/%s", PathEscape(group), PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// TransferSubGroupOptions represents the available TransferSubGroup() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#transfer-a-group-to-a-new-parent-group--turn-a-subgroup-to-a-top-level-group +type TransferSubGroupOptions struct { + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` +} + +// TransferSubGroup transfers a group to a new parent group or turn a subgroup +// to a top-level group. Available to administrators and users. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#transfer-a-group-to-a-new-parent-group--turn-a-subgroup-to-a-top-level-group +func (s *GroupsService) TransferSubGroup(gid interface{}, opt *TransferSubGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/transfer", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// UpdateGroupOptions represents the available UpdateGroup() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group +type UpdateGroupOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` + RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` + ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` + PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` + SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` + PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` + IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + + // Deprecated: Use DefaultBranchProtectionDefaults instead + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` +} + +// UpdateGroup updates an existing group; only available to group owners and +// administrators. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group +func (s *GroupsService) UpdateGroup(gid interface{}, opt *UpdateGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s", PathEscape(group)) + + var req *retryablehttp.Request + + if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { + req, err = s.client.NewRequest(http.MethodPut, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPut, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// UploadAvatar uploads a group avatar. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#upload-a-group-avatar +func (s *GroupsService) UploadAvatar(gid interface{}, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s", PathEscape(group)) + + req, err := s.client.UploadRequest( + http.MethodPut, + u, + avatar, + filename, + UploadAvatar, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// DeleteGroupOptions represents the available DeleteGroup() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group +type DeleteGroupOptions struct { + PermanentlyRemove *bool `url:"permanently_remove,omitempty" json:"permanently_remove,omitempty"` + FullPath *string `url:"full_path,omitempty" json:"full_path,omitempty"` +} + +// DeleteGroup removes group with all projects inside. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#remove-group +func (s *GroupsService) DeleteGroup(gid interface{}, opt *DeleteGroupOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// RestoreGroup restores a previously deleted group +// +// GitLap API docs: +// https://docs.gitlab.com/ee/api/groups.html#restore-group-marked-for-deletion +func (s *GroupsService) RestoreGroup(gid interface{}, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/restore", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// SearchGroup get all groups that match your string in their name or path. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#search-for-group +func (s *GroupsService) SearchGroup(query string, options ...RequestOptionFunc) ([]*Group, *Response, error) { + var q struct { + Search string `url:"search,omitempty" json:"search,omitempty"` + } + q.Search = query + + req, err := s.client.NewRequest(http.MethodGet, "groups", &q, options) + if err != nil { + return nil, nil, err + } + + var gs []*Group + resp, err := s.client.Do(req, &gs) + if err != nil { + return nil, resp, err + } + + return gs, resp, nil +} + +// ListProvisionedUsersOptions represents the available ListProvisionedUsers() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-provisioned-users +type ListProvisionedUsersOptions struct { + ListOptions + Username *string `url:"username,omitempty" json:"username,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Blocked *bool `url:"blocked,omitempty" json:"blocked,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` +} + +// ListProvisionedUsers gets a list of users provisioned by the given group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-provisioned-users +func (s *GroupsService) ListProvisionedUsers(gid interface{}, opt *ListProvisionedUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/provisioned_users", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var us []*User + resp, err := s.client.Do(req, &us) + if err != nil { + return nil, resp, err + } + + return us, resp, nil +} + +// ListGroupLDAPLinks lists the group's LDAP links. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-ldap-group-links +func (s *GroupsService) ListGroupLDAPLinks(gid interface{}, options ...RequestOptionFunc) ([]*LDAPGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var gls []*LDAPGroupLink + resp, err := s.client.Do(req, &gls) + if err != nil { + return nil, resp, err + } + + return gls, resp, nil +} + +// AddGroupLDAPLinkOptions represents the available AddGroupLDAPLink() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-ldap-group-link-with-cn-or-filter +type AddGroupLDAPLinkOptions struct { + CN *string `url:"cn,omitempty" json:"cn,omitempty"` + Filter *string `url:"filter,omitempty" json:"filter,omitempty"` + GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` +} + +// DeleteGroupLDAPLinkWithCNOrFilterOptions represents the available DeleteGroupLDAPLinkWithCNOrFilter() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link-with-cn-or-filter +type DeleteGroupLDAPLinkWithCNOrFilterOptions struct { + CN *string `url:"cn,omitempty" json:"cn,omitempty"` + Filter *string `url:"filter,omitempty" json:"filter,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` +} + +// AddGroupLDAPLink creates a new group LDAP link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-ldap-group-link-with-cn-or-filter +func (s *GroupsService) AddGroupLDAPLink(gid interface{}, opt *AddGroupLDAPLinkOptions, options ...RequestOptionFunc) (*LDAPGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gl := new(LDAPGroupLink) + resp, err := s.client.Do(req, gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, nil +} + +// DeleteGroupLDAPLink deletes a group LDAP link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link +func (s *GroupsService) DeleteGroupLDAPLink(gid interface{}, cn string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/ldap_group_links/%s", PathEscape(group), PathEscape(cn)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGroupLDAPLinkWithCNOrFilter deletes a group LDAP link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link-with-cn-or-filter +func (s *GroupsService) DeleteGroupLDAPLinkWithCNOrFilter(gid interface{}, opts *DeleteGroupLDAPLinkWithCNOrFilterOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodDelete, u, opts, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGroupLDAPLinkForProvider deletes a group LDAP link from a specific +// provider. Available only for users who can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link +func (s *GroupsService) DeleteGroupLDAPLinkForProvider(gid interface{}, provider, cn string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf( + "groups/%s/ldap_group_links/%s/%s", + PathEscape(group), + PathEscape(provider), + PathEscape(cn), + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListGroupSAMLLinks lists the group's SAML links. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-saml-group-links +func (s *GroupsService) ListGroupSAMLLinks(gid interface{}, options ...RequestOptionFunc) ([]*SAMLGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var gl []*SAMLGroupLink + resp, err := s.client.Do(req, &gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, nil +} + +// GetGroupSAMLLink get a specific group SAML link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#get-saml-group-link +func (s *GroupsService) GetGroupSAMLLink(gid interface{}, samlGroupName string, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gl := new(SAMLGroupLink) + resp, err := s.client.Do(req, &gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, nil +} + +// AddGroupSAMLLinkOptions represents the available AddGroupSAMLLink() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-saml-group-link +type AddGroupSAMLLinkOptions struct { + SAMLGroupName *string `url:"saml_group_name,omitempty" json:"saml_group_name,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` +} + +// AddGroupSAMLLink creates a new group SAML link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-saml-group-link +func (s *GroupsService) AddGroupSAMLLink(gid interface{}, opt *AddGroupSAMLLinkOptions, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gl := new(SAMLGroupLink) + resp, err := s.client.Do(req, &gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, nil +} + +// DeleteGroupSAMLLink deletes a group SAML link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-saml-group-link +func (s *GroupsService) DeleteGroupSAMLLink(gid interface{}, samlGroupName string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ShareGroupWithGroupOptions represents the available ShareGroupWithGroup() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#share-groups-with-groups +type ShareGroupWithGroupOptions struct { + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// ShareGroupWithGroup shares a group with another group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#create-a-link-to-share-a-group-with-another-group +func (s *GroupsService) ShareGroupWithGroup(gid interface{}, opt *ShareGroupWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/share", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, nil +} + +// UnshareGroupFromGroup unshares a group from another group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-link-sharing-group-with-another-group +func (s *GroupsService) UnshareGroupFromGroup(gid interface{}, groupID int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/share/%d", PathEscape(group), groupID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GroupPushRules represents a group push rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#get-group-push-rules +type GroupPushRules struct { + ID int `json:"id"` + CreatedAt *time.Time `json:"created_at"` + CommitMessageRegex string `json:"commit_message_regex"` + CommitMessageNegativeRegex string `json:"commit_message_negative_regex"` + BranchNameRegex string `json:"branch_name_regex"` + DenyDeleteTag bool `json:"deny_delete_tag"` + MemberCheck bool `json:"member_check"` + PreventSecrets bool `json:"prevent_secrets"` + AuthorEmailRegex string `json:"author_email_regex"` + FileNameRegex string `json:"file_name_regex"` + MaxFileSize int `json:"max_file_size"` + CommitCommitterCheck bool `json:"commit_committer_check"` + CommitCommitterNameCheck bool `json:"commit_committer_name_check"` + RejectUnsignedCommits bool `json:"reject_unsigned_commits"` + RejectNonDCOCommits bool `json:"reject_non_dco_commits"` +} + +// GetGroupPushRules gets the push rules of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#get-group-push-rules +func (s *GroupsService) GetGroupPushRules(gid interface{}, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gpr := new(GroupPushRules) + resp, err := s.client.Do(req, gpr) + if err != nil { + return nil, resp, err + } + + return gpr, resp, nil +} + +// AddGroupPushRuleOptions represents the available AddGroupPushRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-group-push-rule +type AddGroupPushRuleOptions struct { + AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` + BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` + CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` + CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` + CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` + DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` + FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` + MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` + PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` + RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` +} + +// AddGroupPushRule adds push rules to the specified group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-group-push-rule +func (s *GroupsService) AddGroupPushRule(gid interface{}, opt *AddGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gpr := new(GroupPushRules) + resp, err := s.client.Do(req, gpr) + if err != nil { + return nil, resp, err + } + + return gpr, resp, nil +} + +// EditGroupPushRuleOptions represents the available EditGroupPushRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#edit-group-push-rule +type EditGroupPushRuleOptions struct { + AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` + BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` + CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` + CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` + CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` + DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` + FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` + MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` + PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` + RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` +} + +// EditGroupPushRule edits a push rule for a specified group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#edit-group-push-rule +func (s *GroupsService) EditGroupPushRule(gid interface{}, opt *EditGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + gpr := new(GroupPushRules) + resp, err := s.client.Do(req, gpr) + if err != nil { + return nil, resp, err + } + + return gpr, resp, nil +} + +// DeleteGroupPushRule deletes the push rules of a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-group-push-rule +func (s *GroupsService) DeleteGroupPushRule(gid interface{}, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/import.go b/vendor/github.com/xanzy/go-gitlab/import.go new file mode 100644 index 0000000000..a8164a70c6 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/import.go @@ -0,0 +1,266 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" +) + +// ImportService handles communication with the import +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html +type ImportService struct { + client *Client +} + +// GitHubImport represents the response from an import from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +type GitHubImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` + RelationType string `json:"relation_type"` + ImportWarning string `json:"import_warning"` +} + +func (s GitHubImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromGitHubOptions represents the available +// ImportRepositoryFromGitHub() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +type ImportRepositoryFromGitHubOptions struct { + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + RepoID *int `url:"repo_id,omitempty" json:"repo_id,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + GitHubHostname *string `url:"github_hostname,omitempty" json:"github_hostname,omitempty"` + OptionalStages struct { + SingleEndpointNotesImport *bool `url:"single_endpoint_notes_import,omitempty" json:"single_endpoint_notes_import,omitempty"` + AttachmentsImport *bool `url:"attachments_import,omitempty" json:"attachments_import,omitempty"` + CollaboratorsImport *bool `url:"collaborators_import,omitempty" json:"collaborators_import,omitempty"` + } `url:"optional_stages,omitempty" json:"optional_stages,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +// Import a repository from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +func (s *ImportService) ImportRepositoryFromGitHub(opt *ImportRepositoryFromGitHubOptions, options ...RequestOptionFunc) (*GitHubImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github", opt, options) + if err != nil { + return nil, nil, err + } + + gi := new(GitHubImport) + resp, err := s.client.Do(req, gi) + if err != nil { + return nil, resp, err + } + + return gi, resp, nil +} + +// CancelledGitHubImport represents the response when canceling +// an import from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +type CancelledGitHubImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` +} + +func (s CancelledGitHubImport) String() string { + return Stringify(s) +} + +// CancelGitHubProjectImportOptions represents the available +// CancelGitHubProjectImport() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +type CancelGitHubProjectImportOptions struct { + ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` +} + +// Cancel an import of a repository from GitHub. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import +func (s *ImportService) CancelGitHubProjectImport(opt *CancelGitHubProjectImportOptions, options ...RequestOptionFunc) (*CancelledGitHubImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github/cancel", opt, options) + if err != nil { + return nil, nil, err + } + + cgi := new(CancelledGitHubImport) + resp, err := s.client.Do(req, cgi) + if err != nil { + return nil, resp, err + } + + return cgi, resp, nil +} + +// ImportGitHubGistsIntoGitLabSnippetsOptions represents the available +// ImportGitHubGistsIntoGitLabSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets +type ImportGitHubGistsIntoGitLabSnippetsOptions struct { + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` +} + +// Import personal GitHub Gists into personal GitLab Snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets +func (s *ImportService) ImportGitHubGistsIntoGitLabSnippets(opt *ImportGitHubGistsIntoGitLabSnippetsOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/github/gists", opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// BitbucketServerImport represents the response from an import from Bitbucket +// Server. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +type BitbucketServerImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` +} + +func (s BitbucketServerImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromBitbucketServerOptions represents the available ImportRepositoryFromBitbucketServer() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +type ImportRepositoryFromBitbucketServerOptions struct { + BitbucketServerUrl *string `url:"bitbucket_server_url,omitempty" json:"bitbucket_server_url,omitempty"` + BitbucketServerUsername *string `url:"bitbucket_server_username,omitempty" json:"bitbucket_server_username,omitempty"` + PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` + BitbucketServerProject *string `url:"bitbucket_server_project,omitempty" json:"bitbucket_server_project,omitempty"` + BitbucketServerRepo *string `url:"bitbucket_server_repo,omitempty" json:"bitbucket_server_repo,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + NewNamespace *string `url:"new_namespace,omitempty" json:"new_namespace,omitempty"` + TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` +} + +// Import a repository from Bitbucket Server. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server +func (s *ImportService) ImportRepositoryFromBitbucketServer(opt *ImportRepositoryFromBitbucketServerOptions, options ...RequestOptionFunc) (*BitbucketServerImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket_server", opt, options) + if err != nil { + return nil, nil, err + } + + bsi := new(BitbucketServerImport) + resp, err := s.client.Do(req, bsi) + if err != nil { + return nil, resp, err + } + + return bsi, resp, nil +} + +// BitbucketCloudImport represents the response from an import from Bitbucket +// Cloud. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +type BitbucketCloudImport struct { + ID int `json:"id"` + Name string `json:"name"` + FullPath string `json:"full_path"` + FullName string `json:"full_name"` + RefsUrl string `json:"refs_url"` + ImportSource string `json:"import_source"` + ImportStatus string `json:"import_status"` + HumanImportStatusName string `json:"human_import_status_name"` + ProviderLink string `json:"provider_link"` + RelationType string `json:"relation_type"` + ImportWarning string `json:"import_warning"` +} + +func (s BitbucketCloudImport) String() string { + return Stringify(s) +} + +// ImportRepositoryFromBitbucketCloudOptions represents the available +// ImportRepositoryFromBitbucketCloud() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +type ImportRepositoryFromBitbucketCloudOptions struct { + BitbucketUsername *string `url:"bitbucket_username,omitempty" json:"bitbucket_username,omitempty"` + BitbucketAppPassword *string `url:"bitbucket_app_password,omitempty" json:"bitbucket_app_password,omitempty"` + RepoPath *string `url:"repo_path,omitempty" json:"repo_path,omitempty"` + TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` +} + +// Import a repository from Bitbucket Cloud. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud +func (s *ImportService) ImportRepositoryFromBitbucketCloud(opt *ImportRepositoryFromBitbucketCloudOptions, options ...RequestOptionFunc) (*BitbucketCloudImport, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket", opt, options) + if err != nil { + return nil, nil, err + } + + bci := new(BitbucketCloudImport) + resp, err := s.client.Do(req, bci) + if err != nil { + return nil, resp, err + } + + return bci, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/instance_clusters.go b/vendor/github.com/xanzy/go-gitlab/instance_clusters.go new file mode 100644 index 0000000000..0760d11579 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/instance_clusters.go @@ -0,0 +1,153 @@ +// +// Copyright 2021, Serena Fang +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// InstanceClustersService handles communication with the +// instance clusters related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_clusters.html +type InstanceClustersService struct { + client *Client +} + +// InstanceCluster represents a GitLab Instance Cluster. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/instance_clusters.html +type InstanceCluster struct { + ID int `json:"id"` + Name string `json:"name"` + Domain string `json:"domain"` + Managed bool `json:"managed"` + CreatedAt *time.Time `json:"created_at"` + ProviderType string `json:"provider_type"` + PlatformType string `json:"platform_type"` + EnvironmentScope string `json:"environment_scope"` + ClusterType string `json:"cluster_type"` + User *User `json:"user"` + PlatformKubernetes *PlatformKubernetes `json:"platform_kubernetes"` + ManagementProject *ManagementProject `json:"management_project"` +} + +func (v InstanceCluster) String() string { + return Stringify(v) +} + +// ListClusters gets a list of all instance clusters. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_clusters.html#list-instance-clusters +func (s *InstanceClustersService) ListClusters(options ...RequestOptionFunc) ([]*InstanceCluster, *Response, error) { + u := "admin/clusters" + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var ics []*InstanceCluster + resp, err := s.client.Do(req, &ics) + if err != nil { + return nil, resp, err + } + + return ics, resp, nil +} + +// GetCluster gets an instance cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_clusters.html#get-a-single-instance-cluster +func (s *InstanceClustersService) GetCluster(cluster int, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { + u := fmt.Sprintf("admin/clusters/%d", cluster) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ic := new(InstanceCluster) + resp, err := s.client.Do(req, &ic) + if err != nil { + return nil, resp, err + } + + return ic, resp, nil +} + +// AddCluster adds an existing cluster to the instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_clusters.html#add-existing-instance-cluster +func (s *InstanceClustersService) AddCluster(opt *AddClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { + u := "admin/clusters/add" + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ic := new(InstanceCluster) + resp, err := s.client.Do(req, ic) + if err != nil { + return nil, resp, err + } + + return ic, resp, nil +} + +// EditCluster updates an existing instance cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_clusters.html#edit-instance-cluster +func (s *InstanceClustersService) EditCluster(cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { + u := fmt.Sprintf("admin/clusters/%d", cluster) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ic := new(InstanceCluster) + resp, err := s.client.Do(req, ic) + if err != nil { + return nil, resp, err + } + + return ic, resp, nil +} + +// DeleteCluster deletes an existing instance cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_clusters.html#delete-instance-cluster +func (s *InstanceClustersService) DeleteCluster(cluster int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("admin/clusters/%d", cluster) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/instance_variables.go b/vendor/github.com/xanzy/go-gitlab/instance_variables.go new file mode 100644 index 0000000000..58eef2b272 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/instance_variables.go @@ -0,0 +1,186 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// InstanceVariablesService handles communication with the +// instance level CI variables related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html +type InstanceVariablesService struct { + client *Client +} + +// InstanceVariable represents a GitLab instance level CI Variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html +type InstanceVariable struct { + Key string `json:"key"` + Value string `json:"value"` + VariableType VariableTypeValue `json:"variable_type"` + Protected bool `json:"protected"` + Masked bool `json:"masked"` + Raw bool `json:"raw"` + Description string `json:"description"` +} + +func (v InstanceVariable) String() string { + return Stringify(v) +} + +// ListInstanceVariablesOptions represents the available options for listing variables +// for an instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#list-all-instance-variables +type ListInstanceVariablesOptions ListOptions + +// ListVariables gets a list of all variables for an instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#list-all-instance-variables +func (s *InstanceVariablesService) ListVariables(opt *ListInstanceVariablesOptions, options ...RequestOptionFunc) ([]*InstanceVariable, *Response, error) { + u := "admin/ci/variables" + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var vs []*InstanceVariable + resp, err := s.client.Do(req, &vs) + if err != nil { + return nil, resp, err + } + + return vs, resp, nil +} + +// GetVariable gets a variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#show-instance-variable-details +func (s *InstanceVariablesService) GetVariable(key string, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { + u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + v := new(InstanceVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// CreateInstanceVariableOptions represents the available CreateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#create-instance-variable +type CreateInstanceVariableOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// CreateVariable creates a new instance level CI variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#create-instance-variable +func (s *InstanceVariablesService) CreateVariable(opt *CreateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { + u := "admin/ci/variables" + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(InstanceVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// UpdateInstanceVariableOptions represents the available UpdateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#update-instance-variable +type UpdateInstanceVariableOptions struct { + Value *string `url:"value,omitempty" json:"value,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// UpdateVariable updates the position of an existing +// instance level CI variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#update-instance-variable +func (s *InstanceVariablesService) UpdateVariable(key string, opt *UpdateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { + u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(InstanceVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// RemoveVariable removes an instance level CI variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#remove-instance-variable +func (s *InstanceVariablesService) RemoveVariable(key string, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/invites.go b/vendor/github.com/xanzy/go-gitlab/invites.go new file mode 100644 index 0000000000..62bad26d22 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/invites.go @@ -0,0 +1,176 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// InvitesService handles communication with the invitation related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/invitations.html +type InvitesService struct { + client *Client +} + +// PendingInvite represents a pending invite. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/invitations.html +type PendingInvite struct { + ID int `json:"id"` + InviteEmail string `json:"invite_email"` + CreatedAt *time.Time `json:"created_at"` + AccessLevel AccessLevelValue `json:"access_level"` + ExpiresAt *time.Time `json:"expires_at"` + UserName string `json:"user_name"` + CreatedByName string `json:"created_by_name"` +} + +// ListPendingInvitationsOptions represents the available +// ListPendingInvitations() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#list-all-invitations-pending-for-a-group-or-project +type ListPendingInvitationsOptions struct { + ListOptions + Query *string `url:"query,omitempty" json:"query,omitempty"` +} + +// ListPendingGroupInvitations gets a list of invited group members. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#list-all-invitations-pending-for-a-group-or-project +func (s *InvitesService) ListPendingGroupInvitations(gid interface{}, opt *ListPendingInvitationsOptions, options ...RequestOptionFunc) ([]*PendingInvite, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/invitations", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pis []*PendingInvite + resp, err := s.client.Do(req, &pis) + if err != nil { + return nil, resp, err + } + + return pis, resp, nil +} + +// ListPendingProjectInvitations gets a list of invited project members. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#list-all-invitations-pending-for-a-group-or-project +func (s *InvitesService) ListPendingProjectInvitations(pid interface{}, opt *ListPendingInvitationsOptions, options ...RequestOptionFunc) ([]*PendingInvite, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invitations", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pis []*PendingInvite + resp, err := s.client.Do(req, &pis) + if err != nil { + return nil, resp, err + } + + return pis, resp, nil +} + +// InvitesOptions represents the available GroupInvites() and ProjectInvites() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project +type InvitesOptions struct { + ID interface{} `url:"id,omitempty" json:"id,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// InvitesResult represents an invitations result. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project +type InvitesResult struct { + Status string `json:"status"` + Message map[string]string `json:"message,omitempty"` +} + +// GroupInvites invites new users by email to join a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project +func (s *InvitesService) GroupInvites(gid interface{}, opt *InvitesOptions, options ...RequestOptionFunc) (*InvitesResult, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/invitations", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ir := new(InvitesResult) + resp, err := s.client.Do(req, ir) + if err != nil { + return nil, resp, err + } + + return ir, resp, nil +} + +// ProjectInvites invites new users by email to join a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project +func (s *InvitesService) ProjectInvites(pid interface{}, opt *InvitesOptions, options ...RequestOptionFunc) (*InvitesResult, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invitations", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ir := new(InvitesResult) + resp, err := s.client.Do(req, ir) + if err != nil { + return nil, resp, err + } + + return ir, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/issue_links.go b/vendor/github.com/xanzy/go-gitlab/issue_links.go new file mode 100644 index 0000000000..d5fcae0ddd --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/issue_links.go @@ -0,0 +1,186 @@ +// +// Copyright 2021, Arkbriar +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// IssueLinksService handles communication with the issue relations related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html +type IssueLinksService struct { + client *Client +} + +// IssueLink represents a two-way relation between two issues. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html +type IssueLink struct { + SourceIssue *Issue `json:"source_issue"` + TargetIssue *Issue `json:"target_issue"` + LinkType string `json:"link_type"` +} + +// IssueRelation gets a relation between two issues. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issue_links.html#list-issue-relations +type IssueRelation struct { + ID int `json:"id"` + IID int `json:"iid"` + State string `json:"state"` + Description string `json:"description"` + Confidential bool `json:"confidential"` + Author *IssueAuthor `json:"author"` + Milestone *Milestone `json:"milestone"` + ProjectID int `json:"project_id"` + Assignees []*IssueAssignee `json:"assignees"` + Assignee *IssueAssignee `json:"assignee"` + UpdatedAt *time.Time `json:"updated_at"` + Title string `json:"title"` + CreatedAt *time.Time `json:"created_at"` + Labels Labels `json:"labels"` + DueDate *ISOTime `json:"due_date"` + WebURL string `json:"web_url"` + References *IssueReferences `json:"references"` + Weight int `json:"weight"` + UserNotesCount int `json:"user_notes_count"` + IssueLinkID int `json:"issue_link_id"` + LinkType string `json:"link_type"` + LinkCreatedAt *time.Time `json:"link_created_at"` + LinkUpdatedAt *time.Time `json:"link_updated_at"` +} + +// ListIssueRelations gets a list of related issues of a given issue, +// sorted by the relationship creation datetime (ascending). +// +// Issues will be filtered according to the user authorizations. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issue_links.html#list-issue-relations +func (s *IssueLinksService) ListIssueRelations(pid interface{}, issue int, options ...RequestOptionFunc) ([]*IssueRelation, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var is []*IssueRelation + resp, err := s.client.Do(req, &is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// GetIssueLink gets a specific issue link. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issue_links.html#get-an-issue-link +func (s *IssueLinksService) GetIssueLink(pid interface{}, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/links/%d", PathEscape(project), issue, issueLink) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + il := new(IssueLink) + resp, err := s.client.Do(req, il) + if err != nil { + return nil, resp, err + } + + return il, resp, nil +} + +// CreateIssueLinkOptions represents the available CreateIssueLink() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html#create-an-issue-link +type CreateIssueLinkOptions struct { + TargetProjectID *string `json:"target_project_id"` + TargetIssueIID *string `json:"target_issue_iid"` + LinkType *string `json:"link_type"` +} + +// CreateIssueLink creates a two-way relation between two issues. +// User must be allowed to update both issues in order to succeed. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issue_links.html#create-an-issue-link +func (s *IssueLinksService) CreateIssueLink(pid interface{}, issue int, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + i := new(IssueLink) + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// DeleteIssueLink deletes an issue link, thus removes the two-way relationship. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issue_links.html#delete-an-issue-link +func (s *IssueLinksService) DeleteIssueLink(pid interface{}, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/links/%d", + PathEscape(project), + issue, + issueLink) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, nil, err + } + + il := new(IssueLink) + resp, err := s.client.Do(req, &il) + if err != nil { + return nil, resp, err + } + + return il, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/github.com/xanzy/go-gitlab/issues.go new file mode 100644 index 0000000000..eecccc475e --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/issues.go @@ -0,0 +1,791 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "time" +) + +// IssuesService handles communication with the issue related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html +type IssuesService struct { + client *Client + timeStats *timeStatsService +} + +// IssueAuthor represents a author of the issue. +type IssueAuthor struct { + ID int `json:"id"` + State string `json:"state"` + WebURL string `json:"web_url"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + Username string `json:"username"` +} + +// IssueAssignee represents a assignee of the issue. +type IssueAssignee struct { + ID int `json:"id"` + State string `json:"state"` + WebURL string `json:"web_url"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + Username string `json:"username"` +} + +// IssueReferences represents references of the issue. +type IssueReferences struct { + Short string `json:"short"` + Relative string `json:"relative"` + Full string `json:"full"` +} + +// IssueCloser represents a closer of the issue. +type IssueCloser struct { + ID int `json:"id"` + State string `json:"state"` + WebURL string `json:"web_url"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + Username string `json:"username"` +} + +// IssueLinks represents links of the issue. +type IssueLinks struct { + Self string `json:"self"` + Notes string `json:"notes"` + AwardEmoji string `json:"award_emoji"` + Project string `json:"project"` +} + +// Issue represents a GitLab issue. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html +type Issue struct { + ID int `json:"id"` + IID int `json:"iid"` + ExternalID string `json:"external_id"` + State string `json:"state"` + Description string `json:"description"` + HealthStatus string `json:"health_status"` + Author *IssueAuthor `json:"author"` + Milestone *Milestone `json:"milestone"` + ProjectID int `json:"project_id"` + Assignees []*IssueAssignee `json:"assignees"` + Assignee *IssueAssignee `json:"assignee"` + UpdatedAt *time.Time `json:"updated_at"` + ClosedAt *time.Time `json:"closed_at"` + ClosedBy *IssueCloser `json:"closed_by"` + Title string `json:"title"` + CreatedAt *time.Time `json:"created_at"` + MovedToID int `json:"moved_to_id"` + Labels Labels `json:"labels"` + LabelDetails []*LabelDetails `json:"label_details"` + Upvotes int `json:"upvotes"` + Downvotes int `json:"downvotes"` + DueDate *ISOTime `json:"due_date"` + WebURL string `json:"web_url"` + References *IssueReferences `json:"references"` + TimeStats *TimeStats `json:"time_stats"` + Confidential bool `json:"confidential"` + Weight int `json:"weight"` + DiscussionLocked bool `json:"discussion_locked"` + IssueType *string `json:"issue_type,omitempty"` + Subscribed bool `json:"subscribed"` + UserNotesCount int `json:"user_notes_count"` + Links *IssueLinks `json:"_links"` + IssueLinkID int `json:"issue_link_id"` + MergeRequestCount int `json:"merge_requests_count"` + EpicIssueID int `json:"epic_issue_id"` + Epic *Epic `json:"epic"` + Iteration *GroupIteration `json:"iteration"` + TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` +} + +func (i Issue) String() string { + return Stringify(i) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (i *Issue) UnmarshalJSON(data []byte) error { + type alias Issue + + raw := make(map[string]interface{}) + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + + if reflect.TypeOf(raw["id"]).Kind() == reflect.String { + raw["external_id"] = raw["id"] + delete(raw, "id") + } + + labelDetails, ok := raw["labels"].([]interface{}) + if ok && len(labelDetails) > 0 { + // We only want to change anything if we got label details. + if _, ok := labelDetails[0].(map[string]interface{}); ok { + labels := make([]interface{}, len(labelDetails)) + for i, details := range labelDetails { + labels[i] = details.(map[string]interface{})["name"] + } + + // Set the correct values + raw["labels"] = labels + raw["label_details"] = labelDetails + } + } + + data, err = json.Marshal(raw) + if err != nil { + return err + } + + return json.Unmarshal(data, (*alias)(i)) +} + +// LabelDetails represents detailed label information. +type LabelDetails struct { + ID int `json:"id"` + Name string `json:"name"` + Color string `json:"color"` + Description string `json:"description"` + DescriptionHTML string `json:"description_html"` + TextColor string `json:"text_color"` +} + +// ListIssuesOptions represents the available ListIssues() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-issues +type ListIssuesOptions struct { + ListOptions + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + NotSearch *string `url:"not[search],omitempty" json:"not[search],omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` +} + +// ListIssues gets all issues created by authenticated user. This function +// takes pagination parameters page and per_page to restrict the list of issues. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-issues +func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "issues", opt, options) + if err != nil { + return nil, nil, err + } + + var i []*Issue + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// ListGroupIssuesOptions represents the available ListGroupIssues() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-group-issues +type ListGroupIssuesOptions struct { + ListOptions + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + NotSearch *string `url:"not[search],omitempty" json:"not[search],omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` +} + +// ListGroupIssues gets a list of group issues. This function accepts +// pagination parameters page and per_page to return the list of group issues. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-group-issues +func (s *IssuesService) ListGroupIssues(pid interface{}, opt *ListGroupIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/issues", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var i []*Issue + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// ListProjectIssuesOptions represents the available ListProjectIssues() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-project-issues +type ListProjectIssuesOptions struct { + ListOptions + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` +} + +// ListProjectIssues gets a list of project issues. This function accepts +// pagination parameters page and per_page to return the list of project issues. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-project-issues +func (s *IssuesService) ListProjectIssues(pid interface{}, opt *ListProjectIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var i []*Issue + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// GetIssueByID gets a single issue. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#single-issue +func (s *IssuesService) GetIssueByID(issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { + u := fmt.Sprintf("issues/%d", issue) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// GetIssue gets a single project issue. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#single-project-issue +func (s *IssuesService) GetIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// CreateIssueOptions represents the available CreateIssue() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#new-issue +type CreateIssueOptions struct { + IID *int `url:"iid,omitempty" json:"iid,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` + DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` + EpicID *int `url:"epic_id,omitempty" json:"epic_id,omitempty"` + MergeRequestToResolveDiscussionsOf *int `url:"merge_request_to_resolve_discussions_of,omitempty" json:"merge_request_to_resolve_discussions_of,omitempty"` + DiscussionToResolve *string `url:"discussion_to_resolve,omitempty" json:"discussion_to_resolve,omitempty"` + Weight *int `url:"weight,omitempty" json:"weight,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` +} + +// CreateIssue creates a new project issue. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#new-issue +func (s *IssuesService) CreateIssue(pid interface{}, opt *CreateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// UpdateIssueOptions represents the available UpdateIssue() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#edit-issue +type UpdateIssueOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + AddLabels *LabelOptions `url:"add_labels,comma,omitempty" json:"add_labels,omitempty"` + RemoveLabels *LabelOptions `url:"remove_labels,comma,omitempty" json:"remove_labels,omitempty"` + StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` + UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` + DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` + EpicID *int `url:"epic_id,omitempty" json:"epic_id,omitempty"` + Weight *int `url:"weight,omitempty" json:"weight,omitempty"` + DiscussionLocked *bool `url:"discussion_locked,omitempty" json:"discussion_locked,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` +} + +// UpdateIssue updates an existing project issue. This function is also used +// to mark an issue as closed. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#edit-issues +func (s *IssuesService) UpdateIssue(pid interface{}, issue int, opt *UpdateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// DeleteIssue deletes a single project issue. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#delete-an-issue +func (s *IssuesService) DeleteIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ReorderIssueOptions represents the available ReorderIssue() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#reorder-an-issue +type ReorderIssueOptions struct { + MoveAfterID *int `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` + MoveBeforeID *int `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` +} + +// ReorderIssue reorders an issue. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#reorder-an-issue +func (s *IssuesService) ReorderIssue(pid interface{}, issue int, opt *ReorderIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/reorder", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// MoveIssueOptions represents the available MoveIssue() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#move-an-issue +type MoveIssueOptions struct { + ToProjectID *int `url:"to_project_id,omitempty" json:"to_project_id,omitempty"` +} + +// MoveIssue updates an existing project issue. This function is also used +// to mark an issue as closed. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#move-an-issue +func (s *IssuesService) MoveIssue(pid interface{}, issue int, opt *MoveIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/move", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// SubscribeToIssue subscribes the authenticated user to the given issue to +// receive notifications. If the user is already subscribed to the issue, the +// status code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#subscribe-to-an-issue +func (s *IssuesService) SubscribeToIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/subscribe", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// UnsubscribeFromIssue unsubscribes the authenticated user from the given +// issue to not receive notifications from that merge request. If the user +// is not subscribed to the issue, status code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#unsubscribe-from-an-issue +func (s *IssuesService) UnsubscribeFromIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/unsubscribe", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + i := new(Issue) + resp, err := s.client.Do(req, i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// CreateTodo creates a todo for the current user for an issue. +// If there already exists a todo for the user on that issue, status code +// 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#create-a-to-do-item +func (s *IssuesService) CreateTodo(pid interface{}, issue int, options ...RequestOptionFunc) (*Todo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/todo", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(Todo) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// ListMergeRequestsClosingIssueOptions represents the available +// ListMergeRequestsClosingIssue() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-that-close-a-particular-issue-on-merge +type ListMergeRequestsClosingIssueOptions ListOptions + +// ListMergeRequestsClosingIssue gets all the merge requests that will close +// issue when merged. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-that-close-a-particular-issue-on-merge +func (s *IssuesService) ListMergeRequestsClosingIssue(pid interface{}, issue int, opt *ListMergeRequestsClosingIssueOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/closed_by", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*MergeRequest + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// ListMergeRequestsRelatedToIssueOptions represents the available +// ListMergeRequestsRelatedToIssue() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-related-to-issue +type ListMergeRequestsRelatedToIssueOptions ListOptions + +// ListMergeRequestsRelatedToIssue gets all the merge requests that are +// related to the issue +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-related-to-issue +func (s *IssuesService) ListMergeRequestsRelatedToIssue(pid interface{}, issue int, opt *ListMergeRequestsRelatedToIssueOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/related_merge_requests", + PathEscape(project), + issue, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*MergeRequest + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// SetTimeEstimate sets the time estimate for a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#set-a-time-estimate-for-an-issue +func (s *IssuesService) SetTimeEstimate(pid interface{}, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.setTimeEstimate(pid, "issues", issue, opt, options...) +} + +// ResetTimeEstimate resets the time estimate for a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#reset-the-time-estimate-for-an-issue +func (s *IssuesService) ResetTimeEstimate(pid interface{}, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.resetTimeEstimate(pid, "issues", issue, options...) +} + +// AddSpentTime adds spent time for a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#add-spent-time-for-an-issue +func (s *IssuesService) AddSpentTime(pid interface{}, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.addSpentTime(pid, "issues", issue, opt, options...) +} + +// ResetSpentTime resets the spent time for a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#reset-spent-time-for-an-issue +func (s *IssuesService) ResetSpentTime(pid interface{}, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.resetSpentTime(pid, "issues", issue, options...) +} + +// GetTimeSpent gets the spent time for a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#get-time-tracking-stats +func (s *IssuesService) GetTimeSpent(pid interface{}, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.getTimeSpent(pid, "issues", issue, options...) +} + +// GetParticipants gets a list of issue participants. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues.html#participants-on-issues +func (s *IssuesService) GetParticipants(pid interface{}, issue int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/participants", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var bu []*BasicUser + resp, err := s.client.Do(req, &bu) + if err != nil { + return nil, resp, err + } + + return bu, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/issues_statistics.go b/vendor/github.com/xanzy/go-gitlab/issues_statistics.go new file mode 100644 index 0000000000..53555781e9 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/issues_statistics.go @@ -0,0 +1,187 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// IssuesStatisticsService handles communication with the issues statistics +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues_statistics.html +type IssuesStatisticsService struct { + client *Client +} + +// IssuesStatistics represents a GitLab issues statistic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/issues_statistics.html +type IssuesStatistics struct { + Statistics struct { + Counts struct { + All int `json:"all"` + Closed int `json:"closed"` + Opened int `json:"opened"` + } `json:"counts"` + } `json:"statistics"` +} + +func (n IssuesStatistics) String() string { + return Stringify(n) +} + +// GetIssuesStatisticsOptions represents the available GetIssuesStatistics() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues_statistics.html#get-issues-statistics +type GetIssuesStatisticsOptions struct { + Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` +} + +// GetIssuesStatistics gets issues statistics on all issues the authenticated +// user has access to. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues_statistics.html#get-issues-statistics +func (s *IssuesStatisticsService) GetIssuesStatistics(opt *GetIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "issues_statistics", opt, options) + if err != nil { + return nil, nil, err + } + + is := new(IssuesStatistics) + resp, err := s.client.Do(req, is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// GetGroupIssuesStatisticsOptions represents the available GetGroupIssuesStatistics() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues_statistics.html#get-group-issues-statistics +type GetGroupIssuesStatisticsOptions struct { + Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` +} + +// GetGroupIssuesStatistics gets issues count statistics for given group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues_statistics.html#get-group-issues-statistics +func (s *IssuesStatisticsService) GetGroupIssuesStatistics(gid interface{}, opt *GetGroupIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/issues_statistics", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + is := new(IssuesStatistics) + resp, err := s.client.Do(req, is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// GetProjectIssuesStatisticsOptions represents the available +// GetProjectIssuesStatistics() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues_statistics.html#get-project-issues-statistics +type GetProjectIssuesStatisticsOptions struct { + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` +} + +// GetProjectIssuesStatistics gets issues count statistics for given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issues_statistics.html#get-project-issues-statistics +func (s *IssuesStatisticsService) GetProjectIssuesStatistics(pid interface{}, opt *GetProjectIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues_statistics", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + is := new(IssuesStatistics) + resp, err := s.client.Do(req, is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go new file mode 100644 index 0000000000..35525b76d8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go @@ -0,0 +1,284 @@ +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package gitlab + +import ( + "fmt" + "net/http" +) + +// JobTokenScopeService handles communication with project CI settings +// such as token permissions. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html +type JobTokenScopeService struct { + client *Client +} + +// JobTokenAccessSettings represents job token access attributes for this project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html +type JobTokenAccessSettings struct { + InboundEnabled bool `json:"inbound_enabled"` + OutboundEnabled bool `json:"outbound_enabled"` +} + +// GetProjectJobTokenAccessSettings fetch the CI/CD job token access settings (job token scope) of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-access-settings +func (j *JobTokenScopeService) GetProjectJobTokenAccessSettings(pid interface{}, options ...RequestOptionFunc) (*JobTokenAccessSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + jt := new(JobTokenAccessSettings) + resp, err := j.client.Do(req, jt) + if err != nil { + return nil, resp, err + } + + return jt, resp, err +} + +// PatchProjectJobTokenAccessSettingsOptions represents the available +// PatchProjectJobTokenAccessSettings() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#patch-a-projects-cicd-job-token-access-settings +type PatchProjectJobTokenAccessSettingsOptions struct { + Enabled bool `json:"enabled"` +} + +// PatchProjectJobTokenAccessSettings patch the Limit access to this project setting (job token scope) of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#patch-a-projects-cicd-job-token-access-settings +func (j *JobTokenScopeService) PatchProjectJobTokenAccessSettings(pid interface{}, opt *PatchProjectJobTokenAccessSettingsOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodPatch, u, opt, options) + if err != nil { + return nil, err + } + + return j.client.Do(req, nil) +} + +// JobTokenInboundAllowItem represents a single job token inbound allowlist item. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html +type JobTokenInboundAllowItem struct { + SourceProjectID int `json:"source_project_id"` + TargetProjectID int `json:"target_project_id"` +} + +// GetJobTokenInboundAllowListOptions represents the available +// GetJobTokenInboundAllowList() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist +type GetJobTokenInboundAllowListOptions struct { + ListOptions +} + +// GetProjectJobTokenInboundAllowList fetches the CI/CD job token inbound +// allowlist (job token scope) of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist +func (j *JobTokenScopeService) GetProjectJobTokenInboundAllowList(pid interface{}, opt *GetJobTokenInboundAllowListOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Project + resp, err := j.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// AddProjectToJobScopeAllowListOptions represents the available +// AddProjectToJobScopeAllowList() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#create-a-new-project-to-a-projects-cicd-job-token-inbound-allowlist +type JobTokenInboundAllowOptions struct { + TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` +} + +// AddProjectToJobScopeAllowList adds a new project to a project's job token +// inbound allow list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#create-a-new-project-to-a-projects-cicd-job-token-inbound-allowlist +func (j *JobTokenScopeService) AddProjectToJobScopeAllowList(pid interface{}, opt *JobTokenInboundAllowOptions, options ...RequestOptionFunc) (*JobTokenInboundAllowItem, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + jt := new(JobTokenInboundAllowItem) + resp, err := j.client.Do(req, jt) + if err != nil { + return nil, resp, err + } + + return jt, resp, nil +} + +// RemoveProjectFromJobScopeAllowList removes a project from a project's job +// token inbound allow list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#remove-a-project-from-a-projects-cicd-job-token-inbound-allowlist +func (j *JobTokenScopeService) RemoveProjectFromJobScopeAllowList(pid interface{}, targetProject int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist/%d`, PathEscape(project), targetProject) + + req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return j.client.Do(req, nil) +} + +// JobTokenAllowlistItem represents a single job token allowlist item. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html +type JobTokenAllowlistItem struct { + SourceProjectID int `json:"source_project_id"` + TargetGroupID int `json:"target_group_id"` +} + +// GetJobTokenAllowlistGroupsOptions represents the available +// GetJobTokenAllowlistGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-allowlist-of-groups +type GetJobTokenAllowlistGroupsOptions struct { + ListOptions +} + +// GetJobTokenAllowListGroups fetches the CI/CD job token allowlist groups +// (job token scopes) of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-allowlist-of-groups +func (j *JobTokenScopeService) GetJobTokenAllowlistGroups(pid interface{}, opt *GetJobTokenAllowlistGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Group + resp, err := j.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// AddGroupToJobTokenAllowlistOptions represents the available +// AddGroupToJobTokenAllowlist() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#add-a-group-to-a-cicd-job-token-allowlist +type AddGroupToJobTokenAllowlistOptions struct { + TargetGroupID *int `url:"target_group_id,omitempty" json:"target_group_id,omitempty"` +} + +// AddProjectToJobScopeGroupsAllowList adds a new group to a project's job token +// inbound groups allow list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#add-a-group-to-a-cicd-job-token-allowlist +func (j *JobTokenScopeService) AddGroupToJobTokenAllowlist(pid interface{}, opt *AddGroupToJobTokenAllowlistOptions, options ...RequestOptionFunc) (*JobTokenAllowlistItem, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) + + req, err := j.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + jt := new(JobTokenAllowlistItem) + resp, err := j.client.Do(req, jt) + if err != nil { + return nil, resp, err + } + + return jt, resp, nil +} + +// RemoveGroupFromJopTokenAllowlist removes a group from a project's job +// token inbound groups allow list. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#remove-a-group-from-a-cicd-job-token-allowlist +func (j *JobTokenScopeService) RemoveGroupFromJobTokenAllowlist(pid interface{}, targetGroup int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist/%d`, PathEscape(project), targetGroup) + + req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return j.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/jobs.go b/vendor/github.com/xanzy/go-gitlab/jobs.go new file mode 100644 index 0000000000..f25c020f12 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/jobs.go @@ -0,0 +1,585 @@ +// +// Copyright 2021, Arkbriar +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "net/http" + "time" +) + +// JobsService handles communication with the ci builds related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html +type JobsService struct { + client *Client +} + +// Job represents a ci build. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html +type Job struct { + Commit *Commit `json:"commit"` + Coverage float64 `json:"coverage"` + AllowFailure bool `json:"allow_failure"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + ErasedAt *time.Time `json:"erased_at"` + Duration float64 `json:"duration"` + QueuedDuration float64 `json:"queued_duration"` + ArtifactsExpireAt *time.Time `json:"artifacts_expire_at"` + TagList []string `json:"tag_list"` + ID int `json:"id"` + Name string `json:"name"` + Pipeline struct { + ID int `json:"id"` + ProjectID int `json:"project_id"` + Ref string `json:"ref"` + Sha string `json:"sha"` + Status string `json:"status"` + } `json:"pipeline"` + Ref string `json:"ref"` + Artifacts []struct { + FileType string `json:"file_type"` + Filename string `json:"filename"` + Size int `json:"size"` + FileFormat string `json:"file_format"` + } `json:"artifacts"` + ArtifactsFile struct { + Filename string `json:"filename"` + Size int `json:"size"` + } `json:"artifacts_file"` + Runner struct { + ID int `json:"id"` + Description string `json:"description"` + Active bool `json:"active"` + IsShared bool `json:"is_shared"` + Name string `json:"name"` + } `json:"runner"` + Stage string `json:"stage"` + Status string `json:"status"` + FailureReason string `json:"failure_reason"` + Tag bool `json:"tag"` + WebURL string `json:"web_url"` + Project *Project `json:"project"` + User *User `json:"user"` +} + +// Bridge represents a pipeline bridge. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-bridges +type Bridge struct { + Commit *Commit `json:"commit"` + Coverage float64 `json:"coverage"` + AllowFailure bool `json:"allow_failure"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + ErasedAt *time.Time `json:"erased_at"` + Duration float64 `json:"duration"` + QueuedDuration float64 `json:"queued_duration"` + ID int `json:"id"` + Name string `json:"name"` + Pipeline PipelineInfo `json:"pipeline"` + Ref string `json:"ref"` + Stage string `json:"stage"` + Status string `json:"status"` + FailureReason string `json:"failure_reason"` + Tag bool `json:"tag"` + WebURL string `json:"web_url"` + User *User `json:"user"` + DownstreamPipeline *PipelineInfo `json:"downstream_pipeline"` +} + +// ListJobsOptions represents the available ListProjectJobs() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#list-project-jobs +type ListJobsOptions struct { + ListOptions + Scope *[]BuildStateValue `url:"scope[],omitempty" json:"scope,omitempty"` + IncludeRetried *bool `url:"include_retried,omitempty" json:"include_retried,omitempty"` +} + +// ListProjectJobs gets a list of jobs in a project. +// +// The scope of jobs to show, one or array of: created, pending, running, +// failed, success, canceled, skipped; showing all jobs if none provided +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#list-project-jobs +func (s *JobsService) ListProjectJobs(pid interface{}, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var jobs []*Job + resp, err := s.client.Do(req, &jobs) + if err != nil { + return nil, resp, err + } + + return jobs, resp, nil +} + +// ListPipelineJobs gets a list of jobs for specific pipeline in a +// project. If the pipeline ID is not found, it will respond with 404. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-jobs +func (s *JobsService) ListPipelineJobs(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/jobs", PathEscape(project), pipelineID) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var jobs []*Job + resp, err := s.client.Do(req, &jobs) + if err != nil { + return nil, resp, err + } + + return jobs, resp, nil +} + +// ListPipelineBridges gets a list of bridges for specific pipeline in a +// project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-jobs +func (s *JobsService) ListPipelineBridges(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Bridge, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/bridges", PathEscape(project), pipelineID) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var bridges []*Bridge + resp, err := s.client.Do(req, &bridges) + if err != nil { + return nil, resp, err + } + + return bridges, resp, nil +} + +// GetJobTokensJobOptions represents the available GetJobTokensJob() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html#get-job-tokens-job +type GetJobTokensJobOptions struct { + JobToken *string `url:"job_token,omitempty" json:"job_token,omitempty"` +} + +// GetJobTokensJob retrieves the job that generated a job token. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html#get-job-tokens-job +func (s *JobsService) GetJobTokensJob(opts *GetJobTokensJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "job", opts, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// GetJob gets a single job of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#get-a-single-job +func (s *JobsService) GetJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// GetJobArtifacts get jobs artifacts of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#get-job-artifacts +func (s *JobsService) GetJobArtifacts(pid interface{}, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + artifactsBuf := new(bytes.Buffer) + resp, err := s.client.Do(req, artifactsBuf) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(artifactsBuf.Bytes()), resp, err +} + +// DownloadArtifactsFileOptions represents the available DownloadArtifactsFile() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#download-the-artifacts-archive +type DownloadArtifactsFileOptions struct { + Job *string `url:"job" json:"job"` +} + +// DownloadArtifactsFile download the artifacts file from the given +// reference name and job provided the job finished successfully. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#download-the-artifacts-archive +func (s *JobsService) DownloadArtifactsFile(pid interface{}, refName string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/artifacts/%s/download", PathEscape(project), refName) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + artifactsBuf := new(bytes.Buffer) + resp, err := s.client.Do(req, artifactsBuf) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(artifactsBuf.Bytes()), resp, err +} + +// DownloadSingleArtifactsFile download a file from the artifacts from the +// given reference name and job provided the job finished successfully. +// Only a single file is going to be extracted from the archive and streamed +// to a client. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#download-a-single-artifact-file-by-job-id +func (s *JobsService) DownloadSingleArtifactsFile(pid interface{}, jobID int, artifactPath string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + + u := fmt.Sprintf( + "projects/%s/jobs/%d/artifacts/%s", + PathEscape(project), + jobID, + artifactPath, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + artifactBuf := new(bytes.Buffer) + resp, err := s.client.Do(req, artifactBuf) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(artifactBuf.Bytes()), resp, err +} + +// DownloadSingleArtifactsFile download a single artifact file for a specific +// job of the latest successful pipeline for the given reference name from +// inside the job’s artifacts archive. The file is extracted from the archive +// and streamed to the client. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#download-a-single-artifact-file-from-specific-tag-or-branch +func (s *JobsService) DownloadSingleArtifactsFileByTagOrBranch(pid interface{}, refName string, artifactPath string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + + u := fmt.Sprintf( + "projects/%s/jobs/artifacts/%s/raw/%s", + PathEscape(project), + PathEscape(refName), + artifactPath, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + artifactBuf := new(bytes.Buffer) + resp, err := s.client.Do(req, artifactBuf) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(artifactBuf.Bytes()), resp, err +} + +// GetTraceFile gets a trace of a specific job of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#get-a-log-file +func (s *JobsService) GetTraceFile(pid interface{}, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/trace", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + traceBuf := new(bytes.Buffer) + resp, err := s.client.Do(req, traceBuf) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(traceBuf.Bytes()), resp, err +} + +// CancelJob cancels a single job of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#cancel-a-job +func (s *JobsService) CancelJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/cancel", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// RetryJob retries a single job of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#retry-a-job +func (s *JobsService) RetryJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/retry", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// EraseJob erases a single job of a project, removes a job +// artifacts and a job trace. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#erase-a-job +func (s *JobsService) EraseJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/erase", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// KeepArtifacts prevents artifacts from being deleted when +// expiration is set. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#keep-artifacts +func (s *JobsService) KeepArtifacts(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/artifacts/keep", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// PlayJobOptions represents the available PlayJob() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#run-a-job +type PlayJobOptions struct { + JobVariablesAttributes *[]*JobVariableOptions `url:"job_variables_attributes,omitempty" json:"job_variables_attributes,omitempty"` +} + +// JobVariableOptions represents a single job variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#run-a-job +type JobVariableOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// PlayJob triggers a manual action to start a job. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/jobs.html#run-a-job +func (s *JobsService) PlayJob(pid interface{}, jobID int, opt *PlayJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/play", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + job := new(Job) + resp, err := s.client.Do(req, job) + if err != nil { + return nil, resp, err + } + + return job, resp, nil +} + +// DeleteArtifacts delete artifacts of a job +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#delete-job-artifacts +func (s *JobsService) DeleteArtifacts(pid interface{}, jobID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", PathEscape(project), jobID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteProjectArtifacts delete artifacts eligible for deletion in a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/job_artifacts.html#delete-project-artifacts +func (s *JobsService) DeleteProjectArtifacts(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/artifacts", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/keys.go b/vendor/github.com/xanzy/go-gitlab/keys.go new file mode 100644 index 0000000000..d9cf598333 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/keys.go @@ -0,0 +1,97 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// KeysService handles communication with the +// keys related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/keys.html +type KeysService struct { + client *Client +} + +// Key represents a GitLab user's SSH key. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/keys.html +type Key struct { + ID int `json:"id"` + Title string `json:"title"` + Key string `json:"key"` + CreatedAt *time.Time `json:"created_at"` + User User `json:"user"` +} + +// GetKeyWithUser gets a single key by id along with the associated +// user information. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/keys.html#get-ssh-key-with-user-by-id-of-an-ssh-key +func (s *KeysService) GetKeyWithUser(key int, options ...RequestOptionFunc) (*Key, *Response, error) { + u := fmt.Sprintf("keys/%d", key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(Key) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// GetKeyByFingerprintOptions represents the available GetKeyByFingerprint() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/keys.html#get-user-by-fingerprint-of-ssh-key +// https://docs.gitlab.com/ee/api/keys.html#get-user-by-deploy-key-fingerprint +type GetKeyByFingerprintOptions struct { + Fingerprint string `url:"fingerprint" json:"fingerprint"` +} + +// GetKeyByFingerprint gets a specific SSH key or deploy key by fingerprint +// along with the associated user information. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/keys.html#get-user-by-fingerprint-of-ssh-key +// https://docs.gitlab.com/ee/api/keys.html#get-user-by-deploy-key-fingerprint +func (s *KeysService) GetKeyByFingerprint(opt *GetKeyByFingerprintOptions, options ...RequestOptionFunc) (*Key, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "keys", opt, options) + if err != nil { + return nil, nil, err + } + + k := new(Key) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/labels.go b/vendor/github.com/xanzy/go-gitlab/labels.go new file mode 100644 index 0000000000..d36e85b086 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/labels.go @@ -0,0 +1,317 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "net/http" +) + +// LabelsService handles communication with the label related methods of the +// GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html +type LabelsService struct { + client *Client +} + +// Label represents a GitLab label. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html +type Label struct { + ID int `json:"id"` + Name string `json:"name"` + Color string `json:"color"` + TextColor string `json:"text_color"` + Description string `json:"description"` + OpenIssuesCount int `json:"open_issues_count"` + ClosedIssuesCount int `json:"closed_issues_count"` + OpenMergeRequestsCount int `json:"open_merge_requests_count"` + Subscribed bool `json:"subscribed"` + Priority int `json:"priority"` + IsProjectLabel bool `json:"is_project_label"` +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *Label) UnmarshalJSON(data []byte) error { + type alias Label + if err := json.Unmarshal(data, (*alias)(l)); err != nil { + return err + } + + if l.Name == "" { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if title, ok := raw["title"].(string); ok { + l.Name = title + } + } + + return nil +} + +func (l Label) String() string { + return Stringify(l) +} + +// ListLabelsOptions represents the available ListLabels() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#list-labels +type ListLabelsOptions struct { + ListOptions + WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` + IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` +} + +// ListLabels gets all labels for given project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#list-labels +func (s *LabelsService) ListLabels(pid interface{}, opt *ListLabelsOptions, options ...RequestOptionFunc) ([]*Label, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var l []*Label + resp, err := s.client.Do(req, &l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// GetLabel get a single label for a given project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#get-a-single-project-label +func (s *LabelsService) GetLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var l *Label + resp, err := s.client.Do(req, &l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// CreateLabelOptions represents the available CreateLabel() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#create-a-new-label +type CreateLabelOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Priority *int `url:"priority,omitempty" json:"priority,omitempty"` +} + +// CreateLabel creates a new label for given repository with given name and +// color. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#create-a-new-label +func (s *LabelsService) CreateLabel(pid interface{}, opt *CreateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + l := new(Label) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// DeleteLabelOptions represents the available DeleteLabel() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#delete-a-label +type DeleteLabelOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// DeleteLabel deletes a label given by its name or ID. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#delete-a-label +func (s *LabelsService) DeleteLabel(pid interface{}, lid interface{}, opt *DeleteLabelOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + + if lid != nil { + label, err := parseID(lid) + if err != nil { + return nil, err + } + u = fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) + } + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UpdateLabelOptions represents the available UpdateLabel() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#edit-an-existing-label +type UpdateLabelOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` + Color *string `url:"color,omitempty" json:"color,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Priority *int `url:"priority,omitempty" json:"priority,omitempty"` +} + +// UpdateLabel updates an existing label with new name or now color. At least +// one parameter is required, to update the label. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#edit-an-existing-label +func (s *LabelsService) UpdateLabel(pid interface{}, opt *UpdateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + l := new(Label) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// SubscribeToLabel subscribes the authenticated user to a label to receive +// notifications. If the user is already subscribed to the label, the status +// code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/labels.html#subscribe-to-a-label +func (s *LabelsService) SubscribeToLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/labels/%s/subscribe", PathEscape(project), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + l := new(Label) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// UnsubscribeFromLabel unsubscribes the authenticated user from a label to not +// receive notifications from it. If the user is not subscribed to the label, the +// status code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/labels.html#unsubscribe-from-a-label +func (s *LabelsService) UnsubscribeFromLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/labels/%s/unsubscribe", PathEscape(project), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PromoteLabel Promotes a project label to a group label. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/labels.html#promote-a-project-label-to-a-group-label +func (s *LabelsService) PromoteLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + label, err := parseID(labelID) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/labels/%s/promote", PathEscape(project), PathEscape(label)) + + req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/license.go b/vendor/github.com/xanzy/go-gitlab/license.go new file mode 100644 index 0000000000..4882f90a9d --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/license.go @@ -0,0 +1,128 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// LicenseService handles communication with the license +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/license.html +type LicenseService struct { + client *Client +} + +// License represents a GitLab license. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/license.html +type License struct { + ID int `json:"id"` + Plan string `json:"plan"` + CreatedAt *time.Time `json:"created_at"` + StartsAt *ISOTime `json:"starts_at"` + ExpiresAt *ISOTime `json:"expires_at"` + HistoricalMax int `json:"historical_max"` + MaximumUserCount int `json:"maximum_user_count"` + Expired bool `json:"expired"` + Overage int `json:"overage"` + UserLimit int `json:"user_limit"` + ActiveUsers int `json:"active_users"` + Licensee struct { + Name string `json:"Name"` + Company string `json:"Company"` + Email string `json:"Email"` + } `json:"licensee"` + // Add on codes that may occur in legacy licenses that don't have a plan yet. + // https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/license.rb + AddOns struct { + GitLabAuditorUser int `json:"GitLab_Auditor_User"` + GitLabDeployBoard int `json:"GitLab_DeployBoard"` + GitLabFileLocks int `json:"GitLab_FileLocks"` + GitLabGeo int `json:"GitLab_Geo"` + GitLabServiceDesk int `json:"GitLab_ServiceDesk"` + } `json:"add_ons"` +} + +func (l License) String() string { + return Stringify(l) +} + +// GetLicense retrieves information about the current license. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/license.html#retrieve-information-about-the-current-license +func (s *LicenseService) GetLicense(options ...RequestOptionFunc) (*License, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "license", nil, options) + if err != nil { + return nil, nil, err + } + + l := new(License) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// AddLicenseOptions represents the available AddLicense() options. +// +// https://docs.gitlab.com/ee/api/license.html#add-a-new-license +type AddLicenseOptions struct { + License *string `url:"license" json:"license"` +} + +// AddLicense adds a new license. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/license.html#add-a-new-license +func (s *LicenseService) AddLicense(opt *AddLicenseOptions, options ...RequestOptionFunc) (*License, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "license", opt, options) + if err != nil { + return nil, nil, err + } + + l := new(License) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// DeleteLicense deletes an existing license. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/license.html#delete-a-license +func (s *LicenseService) DeleteLicense(licenseID int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("license/%d", licenseID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/license_templates.go b/vendor/github.com/xanzy/go-gitlab/license_templates.go new file mode 100644 index 0000000000..7eea05fd79 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/license_templates.go @@ -0,0 +1,109 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// LicenseTemplate represents a license template. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/licenses.html +type LicenseTemplate struct { + Key string `json:"key"` + Name string `json:"name"` + Nickname string `json:"nickname"` + Featured bool `json:"featured"` + HTMLURL string `json:"html_url"` + SourceURL string `json:"source_url"` + Description string `json:"description"` + Conditions []string `json:"conditions"` + Permissions []string `json:"permissions"` + Limitations []string `json:"limitations"` + Content string `json:"content"` +} + +// LicenseTemplatesService handles communication with the license templates +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/templates/licenses.html +type LicenseTemplatesService struct { + client *Client +} + +// ListLicenseTemplatesOptions represents the available +// ListLicenseTemplates() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/licenses.html#list-license-templates +type ListLicenseTemplatesOptions struct { + ListOptions + Popular *bool `url:"popular,omitempty" json:"popular,omitempty"` +} + +// ListLicenseTemplates get all license templates. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/licenses.html#list-license-templates +func (s *LicenseTemplatesService) ListLicenseTemplates(opt *ListLicenseTemplatesOptions, options ...RequestOptionFunc) ([]*LicenseTemplate, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "templates/licenses", opt, options) + if err != nil { + return nil, nil, err + } + + var lts []*LicenseTemplate + resp, err := s.client.Do(req, <s) + if err != nil { + return nil, resp, err + } + + return lts, resp, nil +} + +// GetLicenseTemplateOptions represents the available +// GetLicenseTemplate() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/licenses.html#single-license-template +type GetLicenseTemplateOptions struct { + Project *string `url:"project,omitempty" json:"project,omitempty"` + Fullname *string `url:"fullname,omitempty" json:"fullname,omitempty"` +} + +// GetLicenseTemplate get a single license template. You can pass parameters +// to replace the license placeholder. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/templates/licenses.html#single-license-template +func (s *LicenseTemplatesService) GetLicenseTemplate(template string, opt *GetLicenseTemplateOptions, options ...RequestOptionFunc) (*LicenseTemplate, *Response, error) { + u := fmt.Sprintf("templates/licenses/%s", template) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + lt := new(LicenseTemplate) + resp, err := s.client.Do(req, lt) + if err != nil { + return nil, resp, err + } + + return lt, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/markdown.go b/vendor/github.com/xanzy/go-gitlab/markdown.go new file mode 100644 index 0000000000..8c20749fe8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/markdown.go @@ -0,0 +1,47 @@ +package gitlab + +import "net/http" + +// MarkdownService handles communication with the markdown related methods of +// the GitLab API. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/markdown.html +type MarkdownService struct { + client *Client +} + +// Markdown represents a markdown document. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/markdown.html +type Markdown struct { + HTML string `json:"html"` +} + +// RenderOptions represents the available Render() options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/markdown.html#render-an-arbitrary-markdown-document +type RenderOptions struct { + Text *string `url:"text,omitempty" json:"text,omitempty"` + GitlabFlavouredMarkdown *bool `url:"gfm,omitempty" json:"gfm,omitempty"` + Project *string `url:"project,omitempty" json:"project,omitempty"` +} + +// Render an arbitrary markdown document. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/markdown.html#render-an-arbitrary-markdown-document +func (s *MarkdownService) Render(opt *RenderOptions, options ...RequestOptionFunc) (*Markdown, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "markdown", opt, options) + if err != nil { + return nil, nil, err + } + + md := new(Markdown) + response, err := s.client.Do(req, md) + if err != nil { + return nil, response, err + } + + return md, response, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/member_roles.go b/vendor/github.com/xanzy/go-gitlab/member_roles.go new file mode 100644 index 0000000000..4d791a9137 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/member_roles.go @@ -0,0 +1,144 @@ +package gitlab + +import ( + "fmt" + "net/http" +) + +// MemberRolesService handles communication with the member roles related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html +type MemberRolesService struct { + client *Client +} + +// MemberRole represents a GitLab member role. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html +type MemberRole struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + GroupID int `json:"group_id"` + BaseAccessLevel AccessLevelValue `json:"base_access_level"` + AdminCICDVariables bool `json:"admin_cicd_variables,omitempty"` + AdminComplianceFramework bool `json:"admin_compliance_framework,omitempty"` + AdminGroupMembers bool `json:"admin_group_member,omitempty"` + AdminMergeRequests bool `json:"admin_merge_request,omitempty"` + AdminPushRules bool `json:"admin_push_rules,omitempty"` + AdminTerraformState bool `json:"admin_terraform_state,omitempty"` + AdminVulnerability bool `json:"admin_vulnerability,omitempty"` + AdminWebHook bool `json:"admin_web_hook,omitempty"` + ArchiveProject bool `json:"archive_project,omitempty"` + ManageDeployTokens bool `json:"manage_deploy_tokens,omitempty"` + ManageGroupAccesToken bool `json:"manage_group_access_tokens,omitempty"` + ManageMergeRequestSettings bool `json:"manage_merge_request_settings,omitempty"` + ManageProjectAccessToken bool `json:"manage_project_access_tokens,omitempty"` + ManageSecurityPolicyLink bool `json:"manage_security_policy_link,omitempty"` + ReadCode bool `json:"read_code,omitempty"` + ReadRunners bool `json:"read_runners,omitempty"` + ReadDependency bool `json:"read_dependency,omitempty"` + ReadVulnerability bool `json:"read_vulnerability,omitempty"` + RemoveGroup bool `json:"remove_group,omitempty"` + RemoveProject bool `json:"remove_project,omitempty"` +} + +// ListMemberRoles gets a list of member roles for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#list-all-member-roles-of-a-group +func (s *MemberRolesService) ListMemberRoles(gid interface{}, options ...RequestOptionFunc) ([]*MemberRole, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var mrs []*MemberRole + resp, err := s.client.Do(req, &mrs) + if err != nil { + return nil, resp, err + } + + return mrs, resp, nil +} + +// CreateMemberRoleOptions represents the available CreateMemberRole() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group +type CreateMemberRoleOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + BaseAccessLevel *AccessLevelValue `url:"base_access_level,omitempty" json:"base_access_level,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + AdminCICDVariables *bool `url:"admin_cicd_variables" json:"admin_cicd_variables,omitempty"` + AdminComplianceFramework *bool `url:"admin_compliance_framework" json:"admin_compliance_framework,omitempty"` + AdminGroupMembers *bool `url:"admin_group_member" json:"admin_group_member,omitempty"` + AdminMergeRequest *bool `url:"admin_merge_request,omitempty" json:"admin_merge_request,omitempty"` + AdminPushRules *bool `url:"admin_push_rules" json:"admin_push_rules,omitempty"` + AdminTerraformState *bool `url:"admin_terraform_state" json:"admin_terraform_state,omitempty"` + AdminVulnerability *bool `url:"admin_vulnerability,omitempty" json:"admin_vulnerability,omitempty"` + AdminWebHook *bool `url:"admin_web_hook" json:"admin_web_hook,omitempty"` + ArchiveProject *bool `url:"archive_project" json:"archive_project,omitempty"` + ManageDeployTokens *bool `url:"manage_deploy_tokens" json:"manage_deploy_tokens,omitempty"` + ManageGroupAccesToken *bool `url:"manage_group_access_tokens" json:"manage_group_access_tokens,omitempty"` + ManageMergeRequestSettings *bool `url:"manage_merge_request_settings" json:"manage_merge_request_settings,omitempty"` + ManageProjectAccessToken *bool `url:"manage_project_access_tokens" json:"manage_project_access_tokens,omitempty"` + ManageSecurityPolicyLink *bool `url:"manage_security_policy_link" json:"manage_security_policy_link,omitempty"` + ReadCode *bool `url:"read_code,omitempty" json:"read_code,omitempty"` + ReadRunners *bool `url:"read_runners" json:"read_runners,omitempty"` + ReadDependency *bool `url:"read_dependency,omitempty" json:"read_dependency,omitempty"` + ReadVulnerability *bool `url:"read_vulnerability,omitempty" json:"read_vulnerability,omitempty"` + RemoveGroup *bool `url:"remove_group" json:"remove_group,omitempty"` + RemoveProject *bool `url:"remove_project" json:"remove_project,omitempty"` +} + +// CreateMemberRole creates a new member role for a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group +func (s *MemberRolesService) CreateMemberRole(gid interface{}, opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + mr := new(MemberRole) + resp, err := s.client.Do(req, mr) + if err != nil { + return nil, resp, err + } + + return mr, resp, nil +} + +// DeleteMemberRole deletes a member role from a specified group. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/member_roles.html#remove-member-role-of-a-group +func (s *MemberRolesService) DeleteMemberRole(gid interface{}, memberRole int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/member_roles/%d", PathEscape(group), memberRole) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go b/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go new file mode 100644 index 0000000000..d2f1f81ff3 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go @@ -0,0 +1,440 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// MergeRequestApprovalsService handles communication with the merge request +// approvals related methods of the GitLab API. This includes reading/updating +// approval settings and approve/unapproving merge requests +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_request_approvals.html +type MergeRequestApprovalsService struct { + client *Client +} + +// MergeRequestApprovals represents GitLab merge request approvals. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#merge-request-level-mr-approvals +type MergeRequestApprovals struct { + ID int `json:"id"` + IID int `json:"iid"` + ProjectID int `json:"project_id"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + MergeStatus string `json:"merge_status"` + Approved bool `json:"approved"` + ApprovalsBeforeMerge int `json:"approvals_before_merge"` + ApprovalsRequired int `json:"approvals_required"` + ApprovalsLeft int `json:"approvals_left"` + RequirePasswordToApprove bool `json:"require_password_to_approve"` + ApprovedBy []*MergeRequestApproverUser `json:"approved_by"` + SuggestedApprovers []*BasicUser `json:"suggested_approvers"` + Approvers []*MergeRequestApproverUser `json:"approvers"` + ApproverGroups []*MergeRequestApproverGroup `json:"approver_groups"` + UserHasApproved bool `json:"user_has_approved"` + UserCanApprove bool `json:"user_can_approve"` + ApprovalRulesLeft []*MergeRequestApprovalRule `json:"approval_rules_left"` + HasApprovalRules bool `json:"has_approval_rules"` + MergeRequestApproversAvailable bool `json:"merge_request_approvers_available"` + MultipleApprovalRulesAvailable bool `json:"multiple_approval_rules_available"` +} + +func (m MergeRequestApprovals) String() string { + return Stringify(m) +} + +// MergeRequestApproverGroup represents GitLab project level merge request approver group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#project-level-mr-approvals +type MergeRequestApproverGroup struct { + Group struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Description string `json:"description"` + Visibility string `json:"visibility"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` + LFSEnabled bool `json:"lfs_enabled"` + RequestAccessEnabled bool `json:"request_access_enabled"` + } +} + +// MergeRequestApprovalRule represents a GitLab merge request approval rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-merge-request-level-rules +type MergeRequestApprovalRule struct { + ID int `json:"id"` + Name string `json:"name"` + RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` + EligibleApprovers []*BasicUser `json:"eligible_approvers"` + ApprovalsRequired int `json:"approvals_required"` + SourceRule *ProjectApprovalRule `json:"source_rule"` + Users []*BasicUser `json:"users"` + Groups []*Group `json:"groups"` + ContainsHiddenGroups bool `json:"contains_hidden_groups"` + Section string `json:"section"` + ApprovedBy []*BasicUser `json:"approved_by"` + Approved bool `json:"approved"` +} + +// MergeRequestApprovalState represents a GitLab merge request approval state. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-the-approval-state-of-merge-requests +type MergeRequestApprovalState struct { + ApprovalRulesOverwritten bool `json:"approval_rules_overwritten"` + Rules []*MergeRequestApprovalRule `json:"rules"` +} + +// String is a stringify for MergeRequestApprovalRule +func (s MergeRequestApprovalRule) String() string { + return Stringify(s) +} + +// MergeRequestApproverUser represents GitLab project level merge request approver user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#project-level-mr-approvals +type MergeRequestApproverUser struct { + User *BasicUser +} + +// ApproveMergeRequestOptions represents the available ApproveMergeRequest() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#approve-merge-request +type ApproveMergeRequestOptions struct { + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` +} + +// ApproveMergeRequest approves a merge request on GitLab. If a non-empty sha +// is provided then it must match the sha at the HEAD of the MR. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#approve-merge-request +func (s *MergeRequestApprovalsService) ApproveMergeRequest(pid interface{}, mr int, opt *ApproveMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approve", PathEscape(project), mr) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequestApprovals) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// UnapproveMergeRequest unapproves a previously approved merge request on GitLab. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#unapprove-merge-request +func (s *MergeRequestApprovalsService) UnapproveMergeRequest(pid interface{}, mr int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/unapprove", PathEscape(project), mr) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ResetApprovalsOfMergeRequest clear all approvals of merge request on GitLab. +// Available only for bot users based on project or group tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#reset-approvals-of-a-merge-request +func (s *MergeRequestApprovalsService) ResetApprovalsOfMergeRequest(pid interface{}, mr int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/reset_approvals", PathEscape(project), mr) + + req, err := s.client.NewRequest(http.MethodPut, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ChangeMergeRequestApprovalConfigurationOptions represents the available +// ChangeMergeRequestApprovalConfiguration() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated +type ChangeMergeRequestApprovalConfigurationOptions struct { + ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` +} + +// GetConfiguration shows information about single merge request approvals +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-configuration-1 +func (s *MergeRequestApprovalsService) GetConfiguration(pid interface{}, mr int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mr) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequestApprovals) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// ChangeApprovalConfiguration updates the approval configuration of a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated +func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// ChangeMergeRequestAllowedApproversOptions represents the available +// ChangeMergeRequestAllowedApprovers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers-for-merge-request +type ChangeMergeRequestAllowedApproversOptions struct { + ApproverIDs []int `url:"approver_ids" json:"approver_ids"` + ApproverGroupIDs []int `url:"approver_group_ids" json:"approver_group_ids"` +} + +// ChangeAllowedApprovers updates the approvers for a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers-for-merge-request +func (s *MergeRequestApprovalsService) ChangeAllowedApprovers(pid interface{}, mergeRequest int, opt *ChangeMergeRequestAllowedApproversOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approvers", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// GetApprovalRules requests information about a merge request’s approval rules +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-merge-request-level-rules +func (s *MergeRequestApprovalsService) GetApprovalRules(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var par []*MergeRequestApprovalRule + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// GetApprovalState requests information about a merge request’s approval state +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-the-approval-state-of-merge-requests +func (s *MergeRequestApprovalsService) GetApprovalState(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovalState, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_state", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var pas *MergeRequestApprovalState + resp, err := s.client.Do(req, &pas) + if err != nil { + return nil, resp, err + } + + return pas, resp, nil +} + +// CreateMergeRequestApprovalRuleOptions represents the available CreateApprovalRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-merge-request-level-rule +type CreateMergeRequestApprovalRuleOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + ApprovalProjectRuleID *int `url:"approval_project_rule_id,omitempty" json:"approval_project_rule_id,omitempty"` + UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` +} + +// CreateApprovalRule creates a new MR level approval rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-merge-request-level-rule +func (s *MergeRequestApprovalsService) CreateApprovalRule(pid interface{}, mergeRequest int, opt *CreateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + par := new(MergeRequestApprovalRule) + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// UpdateMergeRequestApprovalRuleOptions represents the available UpdateApprovalRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-merge-request-level-rule +type UpdateMergeRequestApprovalRuleOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` +} + +// UpdateApprovalRule updates an existing approval rule with new options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-merge-request-level-rule +func (s *MergeRequestApprovalsService) UpdateApprovalRule(pid interface{}, mergeRequest int, approvalRule int, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + par := new(MergeRequestApprovalRule) + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// DeleteApprovalRule deletes a mr level approval rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#delete-merge-request-level-rule +func (s *MergeRequestApprovalsService) DeleteApprovalRule(pid interface{}, mergeRequest int, approvalRule int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_requests.go b/vendor/github.com/xanzy/go-gitlab/merge_requests.go new file mode 100644 index 0000000000..a9e8d2e5c6 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/merge_requests.go @@ -0,0 +1,1080 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "net/http" + "time" +) + +// MergeRequestsService handles communication with the merge requests related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_requests.html +type MergeRequestsService struct { + client *Client + timeStats *timeStatsService +} + +// MergeRequest represents a GitLab merge request. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_requests.html +type MergeRequest struct { + ID int `json:"id"` + IID int `json:"iid"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + ProjectID int `json:"project_id"` + Title string `json:"title"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Upvotes int `json:"upvotes"` + Downvotes int `json:"downvotes"` + Author *BasicUser `json:"author"` + Assignee *BasicUser `json:"assignee"` + Assignees []*BasicUser `json:"assignees"` + Reviewers []*BasicUser `json:"reviewers"` + SourceProjectID int `json:"source_project_id"` + TargetProjectID int `json:"target_project_id"` + Labels Labels `json:"labels"` + LabelDetails []*LabelDetails `json:"label_details"` + Description string `json:"description"` + Draft bool `json:"draft"` + WorkInProgress bool `json:"work_in_progress"` + Milestone *Milestone `json:"milestone"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + DetailedMergeStatus string `json:"detailed_merge_status"` + MergeError string `json:"merge_error"` + MergedBy *BasicUser `json:"merged_by"` + MergedAt *time.Time `json:"merged_at"` + ClosedBy *BasicUser `json:"closed_by"` + ClosedAt *time.Time `json:"closed_at"` + Subscribed bool `json:"subscribed"` + SHA string `json:"sha"` + MergeCommitSHA string `json:"merge_commit_sha"` + SquashCommitSHA string `json:"squash_commit_sha"` + UserNotesCount int `json:"user_notes_count"` + ChangesCount string `json:"changes_count"` + ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"` + ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` + AllowCollaboration bool `json:"allow_collaboration"` + WebURL string `json:"web_url"` + References *IssueReferences `json:"references"` + DiscussionLocked bool `json:"discussion_locked"` + Changes []*MergeRequestDiff `json:"changes"` + User struct { + CanMerge bool `json:"can_merge"` + } `json:"user"` + TimeStats *TimeStats `json:"time_stats"` + Squash bool `json:"squash"` + Pipeline *PipelineInfo `json:"pipeline"` + HeadPipeline *Pipeline `json:"head_pipeline"` + DiffRefs struct { + BaseSha string `json:"base_sha"` + HeadSha string `json:"head_sha"` + StartSha string `json:"start_sha"` + } `json:"diff_refs"` + DivergedCommitsCount int `json:"diverged_commits_count"` + RebaseInProgress bool `json:"rebase_in_progress"` + ApprovalsBeforeMerge int `json:"approvals_before_merge"` + Reference string `json:"reference"` + FirstContribution bool `json:"first_contribution"` + TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` + HasConflicts bool `json:"has_conflicts"` + BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` + Overflow bool `json:"overflow"` + + // Deprecated: This parameter is replaced by DetailedMergeStatus in GitLab 15.6. + MergeStatus string `json:"merge_status"` +} + +func (m MergeRequest) String() string { + return Stringify(m) +} + +func (m *MergeRequest) UnmarshalJSON(data []byte) error { + type alias MergeRequest + + raw := make(map[string]interface{}) + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + + labelDetails, ok := raw["labels"].([]interface{}) + if ok && len(labelDetails) > 0 { + // We only want to change anything if we got label details. + if _, ok := labelDetails[0].(map[string]interface{}); !ok { + return json.Unmarshal(data, (*alias)(m)) + } + + labels := make([]interface{}, len(labelDetails)) + for i, details := range labelDetails { + labels[i] = details.(map[string]interface{})["name"] + } + + // Set the correct values + raw["labels"] = labels + raw["label_details"] = labelDetails + + data, err = json.Marshal(raw) + if err != nil { + return err + } + } + + return json.Unmarshal(data, (*alias)(m)) +} + +// MergeRequestDiff represents Gitlab merge request diff. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs +type MergeRequestDiff struct { + OldPath string `json:"old_path"` + NewPath string `json:"new_path"` + AMode string `json:"a_mode"` + BMode string `json:"b_mode"` + Diff string `json:"diff"` + NewFile bool `json:"new_file"` + RenamedFile bool `json:"renamed_file"` + DeletedFile bool `json:"deleted_file"` +} + +// MergeRequestDiffVersion represents Gitlab merge request version. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-diff-versions +type MergeRequestDiffVersion struct { + ID int `json:"id"` + HeadCommitSHA string `json:"head_commit_sha,omitempty"` + BaseCommitSHA string `json:"base_commit_sha,omitempty"` + StartCommitSHA string `json:"start_commit_sha,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + MergeRequestID int `json:"merge_request_id,omitempty"` + State string `json:"state,omitempty"` + RealSize string `json:"real_size,omitempty"` + Commits []*Commit `json:"commits,omitempty"` + Diffs []*Diff `json:"diffs,omitempty"` +} + +func (m MergeRequestDiffVersion) String() string { + return Stringify(m) +} + +// ListMergeRequestsOptions represents the available ListMergeRequests() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-requests +type ListMergeRequestsOptions struct { + ListOptions + Approved *string `url:"approved,omitempty" json:"approved,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + View *string `url:"view,omitempty" json:"view,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + WithMergeStatusRecheck *bool `url:"with_merge_status_recheck,omitempty" json:"with_merge_status_recheck,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + ApproverIDs *ApproverIDsValue `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` + ApprovedByIDs *ApproverIDsValue `url:"approved_by_ids,omitempty" json:"approved_by_ids,omitempty"` + ReviewerID *ReviewerIDValue `url:"reviewer_id,omitempty" json:"reviewer_id,omitempty"` + ReviewerUsername *string `url:"reviewer_username,omitempty" json:"reviewer_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` + TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + Draft *bool `url:"draft,omitempty" json:"draft,omitempty"` + WIP *string `url:"wip,omitempty" json:"wip,omitempty"` +} + +// ListMergeRequests gets all merge requests. The state parameter can be used +// to get only merge requests with a given state (opened, closed, or merged) +// or all of them (all). The pagination parameters page and per_page can be +// used to restrict the list of merge requests. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-requests +func (s *MergeRequestsService) ListMergeRequests(opt *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "merge_requests", opt, options) + if err != nil { + return nil, nil, err + } + + var m []*MergeRequest + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// ListProjectMergeRequestsOptions represents the available ListMergeRequests() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-project-merge-requests +type ListProjectMergeRequestsOptions struct { + ListOptions + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + View *string `url:"view,omitempty" json:"view,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + WithMergeStatusRecheck *bool `url:"with_merge_status_recheck,omitempty" json:"with_merge_status_recheck,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + ApproverIDs *ApproverIDsValue `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` + ApprovedByIDs *ApproverIDsValue `url:"approved_by_ids,omitempty" json:"approved_by_ids,omitempty"` + ReviewerID *ReviewerIDValue `url:"reviewer_id,omitempty" json:"reviewer_id,omitempty"` + ReviewerUsername *string `url:"reviewer_username,omitempty" json:"reviewer_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` + TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + Draft *bool `url:"draft,omitempty" json:"draft,omitempty"` + WIP *string `url:"wip,omitempty" json:"wip,omitempty"` +} + +// ListProjectMergeRequests gets all merge requests for this project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-project-merge-requests +func (s *MergeRequestsService) ListProjectMergeRequests(pid interface{}, opt *ListProjectMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*MergeRequest + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// ListGroupMergeRequestsOptions represents the available ListGroupMergeRequests() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-group-merge-requests +type ListGroupMergeRequestsOptions struct { + ListOptions + State *string `url:"state,omitempty" json:"state,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + View *string `url:"view,omitempty" json:"view,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + WithMergeStatusRecheck *bool `url:"with_merge_status_recheck,omitempty" json:"with_merge_status_recheck,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + ApproverIDs *ApproverIDsValue `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` + ApprovedByIDs *ApproverIDsValue `url:"approved_by_ids,omitempty" json:"approved_by_ids,omitempty"` + ReviewerID *ReviewerIDValue `url:"reviewer_id,omitempty" json:"reviewer_id,omitempty"` + ReviewerUsername *string `url:"reviewer_username,omitempty" json:"reviewer_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` + TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + Draft *bool `url:"draft,omitempty" json:"draft,omitempty"` + WIP *string `url:"wip,omitempty" json:"wip,omitempty"` +} + +// ListGroupMergeRequests gets all merge requests for this group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-group-merge-requests +func (s *MergeRequestsService) ListGroupMergeRequests(gid interface{}, opt *ListGroupMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/merge_requests", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*MergeRequest + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// GetMergeRequestsOptions represents the available GetMergeRequests() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-mr +type GetMergeRequestsOptions struct { + RenderHTML *bool `url:"render_html,omitempty" json:"render_html,omitempty"` + IncludeDivergedCommitsCount *bool `url:"include_diverged_commits_count,omitempty" json:"include_diverged_commits_count,omitempty"` + IncludeRebaseInProgress *bool `url:"include_rebase_in_progress,omitempty" json:"include_rebase_in_progress,omitempty"` +} + +// GetMergeRequest shows information about a single merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-mr +func (s *MergeRequestsService) GetMergeRequest(pid interface{}, mergeRequest int, opt *GetMergeRequestsOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// GetMergeRequestApprovals gets information about a merge requests approvals +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#merge-request-level-mr-approvals +func (s *MergeRequestsService) GetMergeRequestApprovals(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(MergeRequestApprovals) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + +// GetMergeRequestCommitsOptions represents the available GetMergeRequestCommits() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-commits +type GetMergeRequestCommitsOptions ListOptions + +// GetMergeRequestCommits gets a list of merge request commits. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-commits +func (s *MergeRequestsService) GetMergeRequestCommits(pid interface{}, mergeRequest int, opt *GetMergeRequestCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/commits", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var c []*Commit + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// GetMergeRequestChangesOptions represents the available GetMergeRequestChanges() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-changes +type GetMergeRequestChangesOptions struct { + AccessRawDiffs *bool `url:"access_raw_diffs,omitempty" json:"access_raw_diffs,omitempty"` + Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` +} + +// GetMergeRequestChanges shows information about the merge request including +// its files and changes. +// +// Deprecated: This endpoint has been replaced by +// MergeRequestsService.ListMergeRequestDiffs() +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-changes +func (s *MergeRequestsService) GetMergeRequestChanges(pid interface{}, mergeRequest int, opt *GetMergeRequestChangesOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/changes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// ListMergeRequestDiffsOptions represents the available ListMergeRequestDiffs() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs +type ListMergeRequestDiffsOptions struct { + ListOptions + Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` +} + +// ListMergeRequestDiffs List diffs of the files changed in a merge request +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs +func (s *MergeRequestsService) ListMergeRequestDiffs(pid interface{}, mergeRequest int, opt *ListMergeRequestDiffsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiff, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/diffs", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*MergeRequestDiff + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// GetMergeRequestParticipants gets a list of merge request participants. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-participants +func (s *MergeRequestsService) GetMergeRequestParticipants(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/participants", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var bu []*BasicUser + resp, err := s.client.Do(req, &bu) + if err != nil { + return nil, resp, err + } + + return bu, resp, nil +} + +// MergeRequestReviewer represents a GitLab merge request reviewer. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-reviewers +type MergeRequestReviewer struct { + User *BasicUser `json:"user"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` +} + +// GetMergeRequestReviewers gets a list of merge request reviewers. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-reviewers +func (s *MergeRequestsService) GetMergeRequestReviewers(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestReviewer, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/reviewers", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var mrr []*MergeRequestReviewer + resp, err := s.client.Do(req, &mrr) + if err != nil { + return nil, resp, err + } + + return mrr, resp, nil +} + +// ListMergeRequestPipelines gets all pipelines for the provided merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-pipelines +func (s *MergeRequestsService) ListMergeRequestPipelines(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/pipelines", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var p []*PipelineInfo + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CreateMergeRequestPipeline creates a new pipeline for a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-merge-request-pipeline +func (s *MergeRequestsService) CreateMergeRequestPipeline(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*PipelineInfo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/pipelines", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineInfo) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetIssuesClosedOnMergeOptions represents the available GetIssuesClosedOnMerge() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-issues-that-close-on-merge +type GetIssuesClosedOnMergeOptions ListOptions + +// GetIssuesClosedOnMerge gets all the issues that would be closed by merging the +// provided merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#list-issues-that-close-on-merge +func (s *MergeRequestsService) GetIssuesClosedOnMerge(pid interface{}, mergeRequest int, opt *GetIssuesClosedOnMergeOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/closes_issues", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var i []*Issue + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// CreateMergeRequestOptions represents the available CreateMergeRequest() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-mr +type CreateMergeRequestOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` + TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` + TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` + Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` + AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` + ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` +} + +// CreateMergeRequest creates a new merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-mr +func (s *MergeRequestsService) CreateMergeRequest(pid interface{}, opt *CreateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// UpdateMergeRequestOptions represents the available UpdateMergeRequest() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#update-mr +type UpdateMergeRequestOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` + ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` + Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` + AddLabels *LabelOptions `url:"add_labels,comma,omitempty" json:"add_labels,omitempty"` + RemoveLabels *LabelOptions `url:"remove_labels,comma,omitempty" json:"remove_labels,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` + RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` + Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` + DiscussionLocked *bool `url:"discussion_locked,omitempty" json:"discussion_locked,omitempty"` + AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` +} + +// UpdateMergeRequest updates an existing project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#update-mr +func (s *MergeRequestsService) UpdateMergeRequest(pid interface{}, mergeRequest int, opt *UpdateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// DeleteMergeRequest deletes a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#delete-a-merge-request +func (s *MergeRequestsService) DeleteMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// AcceptMergeRequestOptions represents the available AcceptMergeRequest() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#merge-a-merge-request +type AcceptMergeRequestOptions struct { + MergeCommitMessage *string `url:"merge_commit_message,omitempty" json:"merge_commit_message,omitempty"` + SquashCommitMessage *string `url:"squash_commit_message,omitempty" json:"squash_commit_message,omitempty"` + Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` + ShouldRemoveSourceBranch *bool `url:"should_remove_source_branch,omitempty" json:"should_remove_source_branch,omitempty"` + MergeWhenPipelineSucceeds *bool `url:"merge_when_pipeline_succeeds,omitempty" json:"merge_when_pipeline_succeeds,omitempty"` + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` +} + +// AcceptMergeRequest merges changes submitted with MR using this API. If merge +// success you get 200 OK. If it has some conflicts and can not be merged - you +// get 405 and error message 'Branch cannot be merged'. If merge request is +// already merged or closed - you get 405 and error message 'Method Not Allowed' +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#merge-a-merge-request +func (s *MergeRequestsService) AcceptMergeRequest(pid interface{}, mergeRequest int, opt *AcceptMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/merge", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// CancelMergeWhenPipelineSucceeds cancels a merge when pipeline succeeds. If +// you don't have permissions to accept this merge request - you'll get a 401. +// If the merge request is already merged or closed - you get 405 and error +// message 'Method Not Allowed'. In case the merge request is not set to be +// merged when the pipeline succeeds, you'll also get a 406 error. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#cancel-merge-when-pipeline-succeeds +func (s *MergeRequestsService) CancelMergeWhenPipelineSucceeds(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/cancel_merge_when_pipeline_succeeds", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// RebaseMergeRequestOptions represents the available RebaseMergeRequest() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#rebase-a-merge-request +type RebaseMergeRequestOptions struct { + SkipCI *bool `url:"skip_ci,omitempty" json:"skip_ci,omitempty"` +} + +// RebaseMergeRequest automatically rebases the source_branch of the merge +// request against its target_branch. If you don’t have permissions to push +// to the merge request’s source branch, you’ll get a 403 Forbidden response. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#rebase-a-merge-request +func (s *MergeRequestsService) RebaseMergeRequest(pid interface{}, mergeRequest int, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/rebase", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GetMergeRequestDiffVersionsOptions represents the available +// GetMergeRequestDiffVersions() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-diff-versions +type GetMergeRequestDiffVersionsOptions ListOptions + +// GetMergeRequestDiffVersions get a list of merge request diff versions. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-diff-versions +func (s *MergeRequestsService) GetMergeRequestDiffVersions(pid interface{}, mergeRequest int, opt *GetMergeRequestDiffVersionsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiffVersion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/versions", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var v []*MergeRequestDiffVersion + resp, err := s.client.Do(req, &v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// GetSingleMergeRequestDiffVersionOptions represents the available +// GetSingleMergeRequestDiffVersion() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-a-single-merge-request-diff-version +type GetSingleMergeRequestDiffVersionOptions struct { + Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` +} + +// GetSingleMergeRequestDiffVersion get a single MR diff version +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-a-single-merge-request-diff-version +func (s *MergeRequestsService) GetSingleMergeRequestDiffVersion(pid interface{}, mergeRequest, version int, opt *GetSingleMergeRequestDiffVersionOptions, options ...RequestOptionFunc) (*MergeRequestDiffVersion, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/versions/%d", PathEscape(project), mergeRequest, version) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(MergeRequestDiffVersion) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// SubscribeToMergeRequest subscribes the authenticated user to the given merge +// request to receive notifications. If the user is already subscribed to the +// merge request, the status code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#subscribe-to-a-merge-request +func (s *MergeRequestsService) SubscribeToMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/subscribe", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// UnsubscribeFromMergeRequest unsubscribes the authenticated user from the +// given merge request to not receive notifications from that merge request. +// If the user is not subscribed to the merge request, status code 304 is +// returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#unsubscribe-from-a-merge-request +func (s *MergeRequestsService) UnsubscribeFromMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/unsubscribe", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + m := new(MergeRequest) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// CreateTodo manually creates a todo for the current user on a merge request. +// If there already exists a todo for the user on that merge request, +// status code 304 is returned. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#create-a-to-do-item +func (s *MergeRequestsService) CreateTodo(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Todo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/todo", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(Todo) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// SetTimeEstimate sets the time estimate for a single project merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#set-a-time-estimate-for-a-merge-request +func (s *MergeRequestsService) SetTimeEstimate(pid interface{}, mergeRequest int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.setTimeEstimate(pid, "merge_requests", mergeRequest, opt, options...) +} + +// ResetTimeEstimate resets the time estimate for a single project merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#reset-the-time-estimate-for-a-merge-request +func (s *MergeRequestsService) ResetTimeEstimate(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.resetTimeEstimate(pid, "merge_requests", mergeRequest, options...) +} + +// AddSpentTime adds spent time for a single project merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#add-spent-time-for-a-merge-request +func (s *MergeRequestsService) AddSpentTime(pid interface{}, mergeRequest int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.addSpentTime(pid, "merge_requests", mergeRequest, opt, options...) +} + +// ResetSpentTime resets the spent time for a single project merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#reset-spent-time-for-a-merge-request +func (s *MergeRequestsService) ResetSpentTime(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.resetSpentTime(pid, "merge_requests", mergeRequest, options...) +} + +// GetTimeSpent gets the spent time for a single project merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_requests.html#get-time-tracking-stats +func (s *MergeRequestsService) GetTimeSpent(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + return s.timeStats.getTimeSpent(pid, "merge_requests", mergeRequest, options...) +} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_trains.go b/vendor/github.com/xanzy/go-gitlab/merge_trains.go new file mode 100644 index 0000000000..e55917fa0c --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/merge_trains.go @@ -0,0 +1,170 @@ +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// MergeTrainsService handles communication with the merge trains related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_trains.html +type MergeTrainsService struct { + client *Client +} + +// MergeTrain represents a Gitlab merge train. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_trains.html +type MergeTrain struct { + ID int `json:"id"` + MergeRequest *MergeTrainMergeRequest `json:"merge_request"` + User *BasicUser `json:"user"` + Pipeline *Pipeline `json:"pipeline"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + TargetBranch string `json:"target_branch"` + Status string `json:"status"` + MergedAt *time.Time `json:"merged_at"` + Duration int `json:"duration"` +} + +// MergeTrainMergeRequest represents a Gitlab merge request inside merge train. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_trains.html +type MergeTrainMergeRequest struct { + ID int `json:"id"` + IID int `json:"iid"` + ProjectID int `json:"project_id"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + WebURL string `json:"web_url"` +} + +// ListMergeTrainsOptions represents the available ListMergeTrain() options. +// +// Gitab API docs: +// https://docs.gitlab.com/ee/api/merge_trains.html#list-merge-trains-for-a-project +type ListMergeTrainsOptions struct { + ListOptions + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListProjectMergeTrains get a list of merge trains in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_trains.html#list-merge-trains-for-a-project +func (s *MergeTrainsService) ListProjectMergeTrains(pid interface{}, opt *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_trains", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mts []*MergeTrain + resp, err := s.client.Do(req, &mts) + if err != nil { + return nil, resp, err + } + + return mts, resp, nil +} + +// ListMergeRequestInMergeTrain gets a list of merge requests added to a merge +// train for the requested target branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_trains.html#list-merge-requests-in-a-merge-train +func (s *MergeTrainsService) ListMergeRequestInMergeTrain(pid interface{}, targetBranch string, opts *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_trains/%s", PathEscape(project), targetBranch) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var mts []*MergeTrain + resp, err := s.client.Do(req, &mts) + if err != nil { + return nil, resp, err + } + + return mts, resp, nil +} + +// GetMergeRequestOnAMergeTrain Get merge train information for the requested +// merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_trains.html#get-the-status-of-a-merge-request-on-a-merge-train +func (s *MergeTrainsService) GetMergeRequestOnAMergeTrain(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeTrain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_trains/merge_requests/%d", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + mt := new(MergeTrain) + resp, err := s.client.Do(req, mt) + if err != nil { + return nil, resp, err + } + + return mt, resp, nil +} + +// AddMergeRequestToMergeTrainOptions represents the available +// AddMergeRequestToMergeTrain() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_trains.html#add-a-merge-request-to-a-merge-train +type AddMergeRequestToMergeTrainOptions struct { + WhenPipelineSucceeds *bool `url:"when_pipeline_succeeds,omitempty" json:"when_pipeline_succeeds,omitempty"` + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` + Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` +} + +// AddMergeRequestToMergeTrain Add a merge request to the merge train targeting +// the merge request’s target branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_trains.html#add-a-merge-request-to-a-merge-train +func (s *MergeTrainsService) AddMergeRequestToMergeTrain(pid interface{}, mergeRequest int, opts *AddMergeRequestToMergeTrainOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_trains/merge_requests/%d", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opts, options) + if err != nil { + return nil, nil, err + } + + var mts []*MergeTrain + resp, err := s.client.Do(req, &mts) + if err != nil { + return nil, resp, err + } + + return mts, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/metadata.go b/vendor/github.com/xanzy/go-gitlab/metadata.go new file mode 100644 index 0000000000..db23a81e46 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/metadata.go @@ -0,0 +1,63 @@ +// +// Copyright 2022, Timo Furrer +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import "net/http" + +// MetadataService handles communication with the GitLab server instance to +// retrieve its metadata information via the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/metadata.html +type MetadataService struct { + client *Client +} + +// Metadata represents a GitLab instance version. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/metadata.html +type Metadata struct { + Version string `json:"version"` + Revision string `json:"revision"` + KAS struct { + Enabled bool `json:"enabled"` + ExternalURL string `json:"externalUrl"` + Version string `json:"version"` + } `json:"kas"` + Enterprise bool `json:"enterprise"` +} + +func (s Metadata) String() string { + return Stringify(s) +} + +// GetMetadata gets a GitLab server instance meteadata. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/metadata.html +func (s *MetadataService) GetMetadata(options ...RequestOptionFunc) (*Metadata, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "metadata", nil, options) + if err != nil { + return nil, nil, err + } + + v := new(Metadata) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/milestones.go b/vendor/github.com/xanzy/go-gitlab/milestones.go new file mode 100644 index 0000000000..17c97e031a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/milestones.go @@ -0,0 +1,272 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// MilestonesService handles communication with the milestone related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/milestones.html +type MilestonesService struct { + client *Client +} + +// Milestone represents a GitLab milestone. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/milestones.html +type Milestone struct { + ID int `json:"id"` + IID int `json:"iid"` + GroupID int `json:"group_id"` + ProjectID int `json:"project_id"` + Title string `json:"title"` + Description string `json:"description"` + StartDate *ISOTime `json:"start_date"` + DueDate *ISOTime `json:"due_date"` + State string `json:"state"` + WebURL string `json:"web_url"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` + Expired *bool `json:"expired"` +} + +func (m Milestone) String() string { + return Stringify(m) +} + +// ListMilestonesOptions represents the available ListMilestones() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#list-project-milestones +type ListMilestonesOptions struct { + ListOptions + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` +} + +// ListMilestones returns a list of project milestones. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#list-project-milestones +func (s *MilestonesService) ListMilestones(pid interface{}, opt *ListMilestonesOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/milestones", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*Milestone + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// GetMilestone gets a single project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#get-single-milestone +func (s *MilestonesService) GetMilestone(pid interface{}, milestone int, options ...RequestOptionFunc) (*Milestone, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + m := new(Milestone) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// CreateMilestoneOptions represents the available CreateMilestone() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#create-new-milestone +type CreateMilestoneOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` +} + +// CreateMilestone creates a new project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#create-new-milestone +func (s *MilestonesService) CreateMilestone(pid interface{}, opt *CreateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/milestones", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(Milestone) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// UpdateMilestoneOptions represents the available UpdateMilestone() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#edit-milestone +type UpdateMilestoneOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` + DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` + StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` +} + +// UpdateMilestone updates an existing project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#edit-milestone +func (s *MilestonesService) UpdateMilestone(pid interface{}, milestone int, opt *UpdateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + m := new(Milestone) + resp, err := s.client.Do(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// DeleteMilestone deletes a specified project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#delete-project-milestone +func (s *MilestonesService) DeleteMilestone(pid interface{}, milestone int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// GetMilestoneIssuesOptions represents the available GetMilestoneIssues() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#get-all-issues-assigned-to-a-single-milestone +type GetMilestoneIssuesOptions ListOptions + +// GetMilestoneIssues gets all issues assigned to a single project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#get-all-issues-assigned-to-a-single-milestone +func (s *MilestonesService) GetMilestoneIssues(pid interface{}, milestone int, opt *GetMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/milestones/%d/issues", PathEscape(project), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var i []*Issue + resp, err := s.client.Do(req, &i) + if err != nil { + return nil, resp, err + } + + return i, resp, nil +} + +// GetMilestoneMergeRequestsOptions represents the available +// GetMilestoneMergeRequests() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#get-all-merge-requests-assigned-to-a-single-milestone +type GetMilestoneMergeRequestsOptions ListOptions + +// GetMilestoneMergeRequests gets all merge requests assigned to a single +// project milestone. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/milestones.html#get-all-merge-requests-assigned-to-a-single-milestone +func (s *MilestonesService) GetMilestoneMergeRequests(pid interface{}, milestone int, opt *GetMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/milestones/%d/merge_requests", PathEscape(project), milestone) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mr []*MergeRequest + resp, err := s.client.Do(req, &mr) + if err != nil { + return nil, resp, err + } + + return mr, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/namespaces.go b/vendor/github.com/xanzy/go-gitlab/namespaces.go new file mode 100644 index 0000000000..da82a0c588 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/namespaces.go @@ -0,0 +1,174 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// NamespacesService handles communication with the namespace related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html +type NamespacesService struct { + client *Client +} + +// Namespace represents a GitLab namespace. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html +type Namespace struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Kind string `json:"kind"` + FullPath string `json:"full_path"` + ParentID int `json:"parent_id"` + AvatarURL *string `json:"avatar_url"` + WebURL string `json:"web_url"` + MembersCountWithDescendants int `json:"members_count_with_descendants"` + BillableMembersCount int `json:"billable_members_count"` + Plan string `json:"plan"` + TrialEndsOn *ISOTime `json:"trial_ends_on"` + Trial bool `json:"trial"` + MaxSeatsUsed *int `json:"max_seats_used"` + SeatsInUse *int `json:"seats_in_use"` +} + +func (n Namespace) String() string { + return Stringify(n) +} + +// ListNamespacesOptions represents the available ListNamespaces() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces +type ListNamespacesOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + OwnedOnly *bool `url:"owned_only,omitempty" json:"owned_only,omitempty"` +} + +// ListNamespaces gets a list of projects accessible by the authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces +func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options ...RequestOptionFunc) ([]*Namespace, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "namespaces", opt, options) + if err != nil { + return nil, nil, err + } + + var n []*Namespace + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// SearchNamespace gets all namespaces that match your string in their name +// or path. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces +func (s *NamespacesService) SearchNamespace(query string, options ...RequestOptionFunc) ([]*Namespace, *Response, error) { + var q struct { + Search string `url:"search,omitempty" json:"search,omitempty"` + } + q.Search = query + + req, err := s.client.NewRequest(http.MethodGet, "namespaces", &q, options) + if err != nil { + return nil, nil, err + } + + var n []*Namespace + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetNamespace gets a namespace by id. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/namespaces.html#get-namespace-by-id +func (s *NamespacesService) GetNamespace(id interface{}, options ...RequestOptionFunc) (*Namespace, *Response, error) { + namespace, err := parseID(id) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("namespaces/%s", PathEscape(namespace)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(Namespace) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// NamespaceExistance represents a namespace exists result. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/namespaces.html#get-existence-of-a-namespace +type NamespaceExistance struct { + Exists bool `json:"exists"` + Suggests []string `json:"suggests"` +} + +// NamespaceExistsOptions represents the available NamespaceExists() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/namespaces.html#get-existence-of-a-namespace +type NamespaceExistsOptions struct { + ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` +} + +// NamespaceExists checks the existence of a namespace. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/namespaces.html#get-existence-of-a-namespace +func (s *NamespacesService) NamespaceExists(id interface{}, opt *NamespaceExistsOptions, options ...RequestOptionFunc) (*NamespaceExistance, *Response, error) { + namespace, err := parseID(id) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("namespaces/%s/exists", namespace) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(NamespaceExistance) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/notes.go b/vendor/github.com/xanzy/go-gitlab/notes.go new file mode 100644 index 0000000000..0c57ae2294 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/notes.go @@ -0,0 +1,696 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// NotesService handles communication with the notes related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/notes.html +type NotesService struct { + client *Client +} + +// Note represents a GitLab note. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/notes.html +type Note struct { + ID int `json:"id"` + Type NoteTypeValue `json:"type"` + Body string `json:"body"` + Attachment string `json:"attachment"` + Title string `json:"title"` + FileName string `json:"file_name"` + Author struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"author"` + System bool `json:"system"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + ExpiresAt *time.Time `json:"expires_at"` + CommitID string `json:"commit_id"` + Position *NotePosition `json:"position"` + NoteableID int `json:"noteable_id"` + NoteableType string `json:"noteable_type"` + ProjectID int `json:"project_id"` + NoteableIID int `json:"noteable_iid"` + Resolvable bool `json:"resolvable"` + Resolved bool `json:"resolved"` + ResolvedAt *time.Time `json:"resolved_at"` + ResolvedBy struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"resolved_by"` + Confidential bool `json:"confidential"` + Internal bool `json:"internal"` +} + +// NotePosition represents the position attributes of a note. +type NotePosition struct { + BaseSHA string `json:"base_sha"` + StartSHA string `json:"start_sha"` + HeadSHA string `json:"head_sha"` + PositionType string `json:"position_type"` + NewPath string `json:"new_path,omitempty"` + NewLine int `json:"new_line,omitempty"` + OldPath string `json:"old_path,omitempty"` + OldLine int `json:"old_line,omitempty"` + LineRange *LineRange `json:"line_range,omitempty"` +} + +// LineRange represents the range of a note. +type LineRange struct { + StartRange *LinePosition `json:"start"` + EndRange *LinePosition `json:"end"` +} + +// LinePosition represents a position in a line range. +type LinePosition struct { + LineCode string `json:"line_code"` + Type string `json:"type"` + OldLine int `json:"old_line"` + NewLine int `json:"new_line"` +} + +func (n Note) String() string { + return Stringify(n) +} + +// ListIssueNotesOptions represents the available ListIssueNotes() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-project-issue-notes +type ListIssueNotesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListIssueNotes gets a list of all notes for a single issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-project-issue-notes +func (s *NotesService) ListIssueNotes(pid interface{}, issue int, opt *ListIssueNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/notes", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var n []*Note + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetIssueNote returns a single note for a specific project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#get-single-issue-note +func (s *NotesService) GetIssueNote(pid interface{}, issue, note int, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// CreateIssueNoteOptions represents the available CreateIssueNote() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-issue-note +type CreateIssueNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` + CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` +} + +// CreateIssueNote creates a new note to a single project issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-issue-note +func (s *NotesService) CreateIssueNote(pid interface{}, issue int, opt *CreateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/notes", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateIssueNoteOptions represents the available UpdateIssueNote() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-issue-note +type UpdateIssueNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// UpdateIssueNote modifies existing note of an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-issue-note +func (s *NotesService) UpdateIssueNote(pid interface{}, issue, note int, opt *UpdateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteIssueNote deletes an existing note of an issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#delete-an-issue-note +func (s *NotesService) DeleteIssueNote(pid interface{}, issue, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListSnippetNotesOptions represents the available ListSnippetNotes() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-all-snippet-notes +type ListSnippetNotesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListSnippetNotes gets a list of all notes for a single snippet. Snippet +// notes are comments users can post to a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-all-snippet-notes +func (s *NotesService) ListSnippetNotes(pid interface{}, snippet int, opt *ListSnippetNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/notes", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var n []*Note + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetSnippetNote returns a single note for a given snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#get-single-snippet-note +func (s *NotesService) GetSnippetNote(pid interface{}, snippet, note int, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// CreateSnippetNoteOptions represents the available CreateSnippetNote() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-snippet-note +type CreateSnippetNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// CreateSnippetNote creates a new note for a single snippet. Snippet notes are +// comments users can post to a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-snippet-note +func (s *NotesService) CreateSnippetNote(pid interface{}, snippet int, opt *CreateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/notes", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateSnippetNoteOptions represents the available UpdateSnippetNote() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-snippet-note +type UpdateSnippetNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// UpdateSnippetNote modifies existing note of a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-snippet-note +func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet, note int, opt *UpdateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteSnippetNote deletes an existing note of a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#delete-a-snippet-note +func (s *NotesService) DeleteSnippetNote(pid interface{}, snippet, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListMergeRequestNotesOptions represents the available ListMergeRequestNotes() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-all-merge-request-notes +type ListMergeRequestNotesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListMergeRequestNotes gets a list of all notes for a single merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-all-merge-request-notes +func (s *NotesService) ListMergeRequestNotes(pid interface{}, mergeRequest int, opt *ListMergeRequestNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/notes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var n []*Note + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetMergeRequestNote returns a single note for a given merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#get-single-merge-request-note +func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest, note int, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// CreateMergeRequestNoteOptions represents the available +// CreateMergeRequestNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-merge-request-note +type CreateMergeRequestNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// CreateMergeRequestNote creates a new note for a single merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-merge-request-note +func (s *NotesService) CreateMergeRequestNote(pid interface{}, mergeRequest int, opt *CreateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/notes", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateMergeRequestNoteOptions represents the available +// UpdateMergeRequestNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-merge-request-note +type UpdateMergeRequestNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// UpdateMergeRequestNote modifies existing note of a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-merge-request-note +func (s *NotesService) UpdateMergeRequestNote(pid interface{}, mergeRequest, note int, opt *UpdateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteMergeRequestNote deletes an existing note of a merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#delete-a-merge-request-note +func (s *NotesService) DeleteMergeRequestNote(pid interface{}, mergeRequest, note int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf( + "projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListEpicNotesOptions represents the available ListEpicNotes() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-all-epic-notes +type ListEpicNotesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListEpicNotes gets a list of all notes for a single epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#list-all-epic-notes +func (s *NotesService) ListEpicNotes(gid interface{}, epic int, opt *ListEpicNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/notes", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var n []*Note + resp, err := s.client.Do(req, &n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// GetEpicNote returns a single note for an epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#get-single-epic-note +func (s *NotesService) GetEpicNote(gid interface{}, epic, note int, options ...RequestOptionFunc) (*Note, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// CreateEpicNoteOptions represents the available CreateEpicNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-epic-note +type CreateEpicNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// CreateEpicNote creates a new note for a single merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#create-new-epic-note +func (s *NotesService) CreateEpicNote(gid interface{}, epic int, opt *CreateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/notes", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// UpdateEpicNoteOptions represents the available UpdateEpicNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-epic-note +type UpdateEpicNoteOptions struct { + Body *string `url:"body,omitempty" json:"body,omitempty"` +} + +// UpdateEpicNote modifies existing note of an epic. +// +// https://docs.gitlab.com/ee/api/notes.html#modify-existing-epic-note +func (s *NotesService) UpdateEpicNote(gid interface{}, epic, note int, opt *UpdateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + n := new(Note) + resp, err := s.client.Do(req, n) + if err != nil { + return nil, resp, err + } + + return n, resp, nil +} + +// DeleteEpicNote deletes an existing note of a merge request. +// +// https://docs.gitlab.com/ee/api/notes.html#delete-an-epic-note +func (s *NotesService) DeleteEpicNote(gid interface{}, epic, note int, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/notifications.go b/vendor/github.com/xanzy/go-gitlab/notifications.go new file mode 100644 index 0000000000..92c2cb189a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/notifications.go @@ -0,0 +1,242 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "errors" + "fmt" + "net/http" +) + +// NotificationSettingsService handles communication with the notification settings +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/notification_settings.html +type NotificationSettingsService struct { + client *Client +} + +// NotificationSettings represents the Gitlab notification setting. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#valid-notification-levels +type NotificationSettings struct { + Level NotificationLevelValue `json:"level"` + NotificationEmail string `json:"notification_email"` + Events *NotificationEvents `json:"events"` +} + +// NotificationEvents represents the available notification setting events. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#valid-notification-levels +type NotificationEvents struct { + CloseIssue bool `json:"close_issue"` + CloseMergeRequest bool `json:"close_merge_request"` + FailedPipeline bool `json:"failed_pipeline"` + FixedPipeline bool `json:"fixed_pipeline"` + IssueDue bool `json:"issue_due"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + MergeMergeRequest bool `json:"merge_merge_request"` + MovedProject bool `json:"moved_project"` + NewIssue bool `json:"new_issue"` + NewMergeRequest bool `json:"new_merge_request"` + NewEpic bool `json:"new_epic"` + NewNote bool `json:"new_note"` + PushToMergeRequest bool `json:"push_to_merge_request"` + ReassignIssue bool `json:"reassign_issue"` + ReassignMergeRequest bool `json:"reassign_merge_request"` + ReopenIssue bool `json:"reopen_issue"` + ReopenMergeRequest bool `json:"reopen_merge_request"` + SuccessPipeline bool `json:"success_pipeline"` +} + +func (ns NotificationSettings) String() string { + return Stringify(ns) +} + +// GetGlobalSettings returns current notification settings and email address. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#global-notification-settings +func (s *NotificationSettingsService) GetGlobalSettings(options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { + u := "notification_settings" + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ns := new(NotificationSettings) + resp, err := s.client.Do(req, ns) + if err != nil { + return nil, resp, err + } + + return ns, resp, nil +} + +// NotificationSettingsOptions represents the available options that can be passed +// to the API when updating the notification settings. +type NotificationSettingsOptions struct { + Level *NotificationLevelValue `url:"level,omitempty" json:"level,omitempty"` + NotificationEmail *string `url:"notification_email,omitempty" json:"notification_email,omitempty"` + CloseIssue *bool `url:"close_issue,omitempty" json:"close_issue,omitempty"` + CloseMergeRequest *bool `url:"close_merge_request,omitempty" json:"close_merge_request,omitempty"` + FailedPipeline *bool `url:"failed_pipeline,omitempty" json:"failed_pipeline,omitempty"` + FixedPipeline *bool `url:"fixed_pipeline,omitempty" json:"fixed_pipeline,omitempty"` + IssueDue *bool `url:"issue_due,omitempty" json:"issue_due,omitempty"` + MergeMergeRequest *bool `url:"merge_merge_request,omitempty" json:"merge_merge_request,omitempty"` + MergeWhenPipelineSucceeds *bool `url:"merge_when_pipeline_succeeds,omitempty" json:"merge_when_pipeline_succeeds,omitempty"` + MovedProject *bool `url:"moved_project,omitempty" json:"moved_project,omitempty"` + NewEpic *bool `url:"new_epic,omitempty" json:"new_epic,omitempty"` + NewIssue *bool `url:"new_issue,omitempty" json:"new_issue,omitempty"` + NewMergeRequest *bool `url:"new_merge_request,omitempty" json:"new_merge_request,omitempty"` + NewNote *bool `url:"new_note,omitempty" json:"new_note,omitempty"` + PushToMergeRequest *bool `url:"push_to_merge_request,omitempty" json:"push_to_merge_request,omitempty"` + ReassignIssue *bool `url:"reassign_issue,omitempty" json:"reassign_issue,omitempty"` + ReassignMergeRequest *bool `url:"reassign_merge_request,omitempty" json:"reassign_merge_request,omitempty"` + ReopenIssue *bool `url:"reopen_issue,omitempty" json:"reopen_issue,omitempty"` + ReopenMergeRequest *bool `url:"reopen_merge_request,omitempty" json:"reopen_merge_request,omitempty"` + SuccessPipeline *bool `url:"success_pipeline,omitempty" json:"success_pipeline,omitempty"` +} + +// UpdateGlobalSettings updates current notification settings and email address. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#update-global-notification-settings +func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { + if opt.Level != nil && *opt.Level == GlobalNotificationLevel { + return nil, nil, errors.New( + "notification level 'global' is not valid for global notification settings") + } + + u := "notification_settings" + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ns := new(NotificationSettings) + resp, err := s.client.Do(req, ns) + if err != nil { + return nil, resp, err + } + + return ns, resp, nil +} + +// GetSettingsForGroup returns current group notification settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#group--project-level-notification-settings +func (s *NotificationSettingsService) GetSettingsForGroup(gid interface{}, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/notification_settings", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ns := new(NotificationSettings) + resp, err := s.client.Do(req, ns) + if err != nil { + return nil, resp, err + } + + return ns, resp, nil +} + +// GetSettingsForProject returns current project notification settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#group--project-level-notification-settings +func (s *NotificationSettingsService) GetSettingsForProject(pid interface{}, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/notification_settings", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ns := new(NotificationSettings) + resp, err := s.client.Do(req, ns) + if err != nil { + return nil, resp, err + } + + return ns, resp, nil +} + +// UpdateSettingsForGroup updates current group notification settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#update-groupproject-level-notification-settings +func (s *NotificationSettingsService) UpdateSettingsForGroup(gid interface{}, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/notification_settings", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ns := new(NotificationSettings) + resp, err := s.client.Do(req, ns) + if err != nil { + return nil, resp, err + } + + return ns, resp, nil +} + +// UpdateSettingsForProject updates current project notification settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/notification_settings.html#update-groupproject-level-notification-settings +func (s *NotificationSettingsService) UpdateSettingsForProject(pid interface{}, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/notification_settings", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ns := new(NotificationSettings) + resp, err := s.client.Do(req, ns) + if err != nil { + return nil, resp, err + } + + return ns, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/packages.go b/vendor/github.com/xanzy/go-gitlab/packages.go new file mode 100644 index 0000000000..a6b252ed2e --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/packages.go @@ -0,0 +1,261 @@ +// +// Copyright 2021, Kordian Bruck +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// PackagesService handles communication with the packages related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html +type PackagesService struct { + client *Client +} + +// Package represents a GitLab package. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html +type Package struct { + ID int `json:"id"` + Name string `json:"name"` + Version string `json:"version"` + PackageType string `json:"package_type"` + Status string `json:"status"` + Links *PackageLinks `json:"_links"` + CreatedAt *time.Time `json:"created_at"` + LastDownloadedAt *time.Time `json:"last_downloaded_at"` + Tags []PackageTag `json:"tags"` +} + +func (s Package) String() string { + return Stringify(s) +} + +// GroupPackage represents a GitLab group package. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html +type GroupPackage struct { + Package + ProjectID int `json:"project_id"` + ProjectPath string `json:"project_path"` +} + +func (s GroupPackage) String() string { + return Stringify(s) +} + +// PackageLinks holds links for itself and deleting. +type PackageLinks struct { + WebPath string `json:"web_path"` + DeleteAPIPath string `json:"delete_api_path"` +} + +func (s PackageLinks) String() string { + return Stringify(s) +} + +// PackageTag holds label information about the package +type PackageTag struct { + ID int `json:"id"` + PackageID int `json:"package_id"` + Name string `json:"name"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +func (s PackageTag) String() string { + return Stringify(s) +} + +// PackageFile represents one file contained within a package. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html +type PackageFile struct { + ID int `json:"id"` + PackageID int `json:"package_id"` + CreatedAt *time.Time `json:"created_at"` + FileName string `json:"file_name"` + Size int `json:"size"` + FileMD5 string `json:"file_md5"` + FileSHA1 string `json:"file_sha1"` + FileSHA256 string `json:"file_sha256"` + Pipeline *[]Pipeline `json:"pipelines"` +} + +func (s PackageFile) String() string { + return Stringify(s) +} + +// ListProjectPackagesOptions represents the available ListProjectPackages() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#within-a-project +type ListProjectPackagesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + PackageType *string `url:"package_type,omitempty" json:"package_type,omitempty"` + PackageName *string `url:"package_name,omitempty" json:"package_name,omitempty"` + PackageVersion *string `url:"package_version,omitempty" json:"package_version,omitempty"` + IncludeVersionless *bool `url:"include_versionless,omitempty" json:"include_versionless,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` +} + +// ListProjectPackages gets a list of packages in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#within-a-project +func (s *PackagesService) ListProjectPackages(pid interface{}, opt *ListProjectPackagesOptions, options ...RequestOptionFunc) ([]*Package, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/packages", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Package + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// ListGroupPackagesOptions represents the available ListGroupPackages() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#within-a-group +type ListGroupPackagesOptions struct { + ListOptions + ExcludeSubGroups *bool `url:"exclude_subgroups,omitempty" json:"exclude_subgroups,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + PackageType *string `url:"package_type,omitempty" json:"package_type,omitempty"` + PackageName *string `url:"package_name,omitempty" json:"package_name,omitempty"` + IncludeVersionless *bool `url:"include_versionless,omitempty" json:"include_versionless,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` +} + +// ListGroupPackages gets a list of packages in a group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#within-a-group +func (s *PackagesService) ListGroupPackages(gid interface{}, opt *ListGroupPackagesOptions, options ...RequestOptionFunc) ([]*GroupPackage, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/packages", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*GroupPackage + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// ListPackageFilesOptions represents the available ListPackageFiles() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#list-package-files +type ListPackageFilesOptions ListOptions + +// ListPackageFiles gets a list of files that are within a package +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#list-package-files +func (s *PackagesService) ListPackageFiles(pid interface{}, pkg int, opt *ListPackageFilesOptions, options ...RequestOptionFunc) ([]*PackageFile, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/packages/%d/package_files", + PathEscape(project), + pkg, + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pfs []*PackageFile + resp, err := s.client.Do(req, &pfs) + if err != nil { + return nil, resp, err + } + + return pfs, resp, nil +} + +// DeleteProjectPackage deletes a package in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#delete-a-project-package +func (s *PackagesService) DeleteProjectPackage(pid interface{}, pkg int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/packages/%d", PathEscape(project), pkg) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeletePackageFile deletes a file in project package +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#delete-a-package-file +func (s *PackagesService) DeletePackageFile(pid interface{}, pkg, file int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/packages/%d/package_files/%d", PathEscape(project), pkg, file) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/pages.go b/vendor/github.com/xanzy/go-gitlab/pages.go new file mode 100644 index 0000000000..617b0ba4b9 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/pages.go @@ -0,0 +1,45 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +type PagesService struct { + client *Client +} + +// UnpublishPages unpublished pages. The user must have admin privileges. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages.html#unpublish-pages +func (s *PagesService) UnpublishPages(gid interface{}, options ...RequestOptionFunc) (*Response, error) { + page, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/pages", PathEscape(page)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/pages_domains.go b/vendor/github.com/xanzy/go-gitlab/pages_domains.go new file mode 100644 index 0000000000..07d985da76 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/pages_domains.go @@ -0,0 +1,216 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// PagesDomainsService handles communication with the pages domains +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pages_domains.html +type PagesDomainsService struct { + client *Client +} + +// PagesDomain represents a pages domain. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pages_domains.html +type PagesDomain struct { + Domain string `json:"domain"` + AutoSslEnabled bool `json:"auto_ssl_enabled"` + URL string `json:"url"` + ProjectID int `json:"project_id"` + Verified bool `json:"verified"` + VerificationCode string `json:"verification_code"` + EnabledUntil *time.Time `json:"enabled_until"` + Certificate struct { + Subject string `json:"subject"` + Expired bool `json:"expired"` + Expiration *time.Time `json:"expiration"` + Certificate string `json:"certificate"` + CertificateText string `json:"certificate_text"` + } `json:"certificate"` +} + +// ListPagesDomainsOptions represents the available ListPagesDomains() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#list-pages-domains +type ListPagesDomainsOptions ListOptions + +// ListPagesDomains gets a list of project pages domains. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#list-pages-domains +func (s *PagesDomainsService) ListPagesDomains(pid interface{}, opt *ListPagesDomainsOptions, options ...RequestOptionFunc) ([]*PagesDomain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages/domains", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pd []*PagesDomain + resp, err := s.client.Do(req, &pd) + if err != nil { + return nil, resp, err + } + + return pd, resp, nil +} + +// ListAllPagesDomains gets a list of all pages domains. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#list-all-pages-domains +func (s *PagesDomainsService) ListAllPagesDomains(options ...RequestOptionFunc) ([]*PagesDomain, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "pages/domains", nil, options) + if err != nil { + return nil, nil, err + } + + var pd []*PagesDomain + resp, err := s.client.Do(req, &pd) + if err != nil { + return nil, resp, err + } + + return pd, resp, nil +} + +// GetPagesDomain get a specific pages domain for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#single-pages-domain +func (s *PagesDomainsService) GetPagesDomain(pid interface{}, domain string, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pd := new(PagesDomain) + resp, err := s.client.Do(req, pd) + if err != nil { + return nil, resp, err + } + + return pd, resp, nil +} + +// CreatePagesDomainOptions represents the available CreatePagesDomain() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#create-new-pages-domain +type CreatePagesDomainOptions struct { + Domain *string `url:"domain,omitempty" json:"domain,omitempty"` + AutoSslEnabled *bool `url:"auto_ssl_enabled,omitempty" json:"auto_ssl_enabled,omitempty"` + Certificate *string `url:"certificate,omitempty" json:"certificate,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` +} + +// CreatePagesDomain creates a new project pages domain. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#create-new-pages-domain +func (s *PagesDomainsService) CreatePagesDomain(pid interface{}, opt *CreatePagesDomainOptions, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages/domains", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pd := new(PagesDomain) + resp, err := s.client.Do(req, pd) + if err != nil { + return nil, resp, err + } + + return pd, resp, nil +} + +// UpdatePagesDomainOptions represents the available UpdatePagesDomain() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#update-pages-domain +type UpdatePagesDomainOptions struct { + AutoSslEnabled *bool `url:"auto_ssl_enabled,omitempty" json:"auto_ssl_enabled,omitempty"` + Certificate *string `url:"certificate,omitempty" json:"certificate,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` +} + +// UpdatePagesDomain updates an existing project pages domain. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#update-pages-domain +func (s *PagesDomainsService) UpdatePagesDomain(pid interface{}, domain string, opt *UpdatePagesDomainOptions, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pd := new(PagesDomain) + resp, err := s.client.Do(req, pd) + if err != nil { + return nil, resp, err + } + + return pd, resp, nil +} + +// DeletePagesDomain deletes an existing prject pages domain. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pages_domains.html#delete-pages-domain +func (s *PagesDomainsService) DeletePagesDomain(pid interface{}, domain string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go new file mode 100644 index 0000000000..14aee9ee05 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go @@ -0,0 +1,222 @@ +// +// Copyright 2022, Ryan Glab +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// PersonalAccessTokensService handles communication with the personal access +// tokens related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/personal_access_tokens.html +type PersonalAccessTokensService struct { + client *Client +} + +// PersonalAccessToken represents a personal access token. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/personal_access_tokens.html +type PersonalAccessToken struct { + ID int `json:"id"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + CreatedAt *time.Time `json:"created_at"` + Scopes []string `json:"scopes"` + UserID int `json:"user_id"` + LastUsedAt *time.Time `json:"last_used_at,omitempty"` + Active bool `json:"active"` + ExpiresAt *ISOTime `json:"expires_at"` + Token string `json:"token,omitempty"` +} + +func (p PersonalAccessToken) String() string { + return Stringify(p) +} + +// ListPersonalAccessTokensOptions represents the available +// ListPersonalAccessTokens() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens +type ListPersonalAccessTokensOptions struct { + ListOptions + CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` + LastUsedAfter *ISOTime `url:"last_used_after,omitempty" json:"last_used_after,omitempty"` + LastUsedBefore *ISOTime `url:"last_used_before,omitempty" json:"last_used_before,omitempty"` + Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` +} + +// ListPersonalAccessTokens gets a list of all personal access tokens. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens +func (s *PersonalAccessTokensService) ListPersonalAccessTokens(opt *ListPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "personal_access_tokens", opt, options) + if err != nil { + return nil, nil, err + } + + var pats []*PersonalAccessToken + resp, err := s.client.Do(req, &pats) + if err != nil { + return nil, resp, err + } + + return pats, resp, nil +} + +// GetSinglePersonalAccessTokenByID get a single personal access token by its ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id +func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := fmt.Sprintf("personal_access_tokens/%d", token) + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// GetSinglePersonalAccessToken get a single personal access token by using +// passing the token in a header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header +func (s *PersonalAccessTokensService) GetSinglePersonalAccessToken(options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := "personal_access_tokens/self" + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RotatePersonalAccessTokenOptions represents the available RotatePersonalAccessToken() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token +type RotatePersonalAccessTokenOptions struct { + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// RotatePersonalAccessToken is a backwards-compat shim for RotatePersonalAccessTokenByID. +func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + return s.RotatePersonalAccessTokenByID(token, opt, options...) +} + +// RotatePersonalAccessTokenByID revokes a token and returns a new token that +// expires in one week per default. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-personal-access-token-id +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := fmt.Sprintf("personal_access_tokens/%d/rotate", token) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RotatePersonalAccessTokenSelf revokes the currently authenticated token +// and returns a new token that expires in one week per default. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-request-header +func (s *PersonalAccessTokensService) RotatePersonalAccessTokenSelf(opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := "personal_access_tokens/self/rotate" + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(PersonalAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RevokePersonalAccessToken is a backwards-compat shim for RevokePersonalAccessTokenByID. +func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, options ...RequestOptionFunc) (*Response, error) { + return s.RevokePersonalAccessTokenByID(token, options...) +} + +// RevokePersonalAccessTokenByID revokes a personal access token by its ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id-1 +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("personal_access_tokens/%d", token) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// RevokePersonalAccessTokenSelf revokes the currently authenticated +// personal access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header-1 +func (s *PersonalAccessTokensService) RevokePersonalAccessTokenSelf(options ...RequestOptionFunc) (*Response, error) { + u := "personal_access_tokens/self" + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go b/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go new file mode 100644 index 0000000000..51477f21bd --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go @@ -0,0 +1,385 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// PipelineSchedulesService handles communication with the pipeline +// schedules related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipeline_schedules.html +type PipelineSchedulesService struct { + client *Client +} + +// PipelineSchedule represents a pipeline schedule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html +type PipelineSchedule struct { + ID int `json:"id"` + Description string `json:"description"` + Ref string `json:"ref"` + Cron string `json:"cron"` + CronTimezone string `json:"cron_timezone"` + NextRunAt *time.Time `json:"next_run_at"` + Active bool `json:"active"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Owner *User `json:"owner"` + LastPipeline *LastPipeline `json:"last_pipeline"` + Variables []*PipelineVariable `json:"variables"` +} + +// LastPipeline represents the last pipeline ran by schedule +// this will be returned only for individual schedule get operation +type LastPipeline struct { + ID int `json:"id"` + SHA string `json:"sha"` + Ref string `json:"ref"` + Status string `json:"status"` + WebURL string `json:"web_url"` +} + +// ListPipelineSchedulesOptions represents the available ListPipelineTriggers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipeline-schedules +type ListPipelineSchedulesOptions ListOptions + +// ListPipelineSchedules gets a list of project triggers. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipeline-schedules +func (s *PipelineSchedulesService) ListPipelineSchedules(pid interface{}, opt *ListPipelineSchedulesOptions, options ...RequestOptionFunc) ([]*PipelineSchedule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*PipelineSchedule + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// GetPipelineSchedule gets a pipeline schedule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-a-single-pipeline-schedule +func (s *PipelineSchedulesService) GetPipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineSchedule) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ListPipelinesTriggeredByScheduleOptions represents the available +// ListPipelinesTriggeredBySchedule() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipelines-triggered-by-a-pipeline-schedule +type ListPipelinesTriggeredByScheduleOptions ListOptions + +// ListPipelinesTriggeredBySchedule gets all pipelines triggered by a pipeline +// schedule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipelines-triggered-by-a-pipeline-schedule +func (s *PipelineSchedulesService) ListPipelinesTriggeredBySchedule(pid interface{}, schedule int, opt *ListPipelinesTriggeredByScheduleOptions, options ...RequestOptionFunc) ([]*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/pipelines", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*Pipeline + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CreatePipelineScheduleOptions represents the available +// CreatePipelineSchedule() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule +type CreatePipelineScheduleOptions struct { + Description *string `url:"description" json:"description"` + Ref *string `url:"ref" json:"ref"` + Cron *string `url:"cron" json:"cron"` + CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` +} + +// CreatePipelineSchedule creates a pipeline schedule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule +func (s *PipelineSchedulesService) CreatePipelineSchedule(pid interface{}, opt *CreatePipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineSchedule) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// EditPipelineScheduleOptions represents the available +// EditPipelineSchedule() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule +type EditPipelineScheduleOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Cron *string `url:"cron,omitempty" json:"cron,omitempty"` + CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` +} + +// EditPipelineSchedule edits a pipeline schedule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule +func (s *PipelineSchedulesService) EditPipelineSchedule(pid interface{}, schedule int, opt *EditPipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineSchedule) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// TakeOwnershipOfPipelineSchedule sets the owner of the specified +// pipeline schedule to the user issuing the request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#take-ownership-of-a-pipeline-schedule +func (s *PipelineSchedulesService) TakeOwnershipOfPipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/take_ownership", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineSchedule) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// DeletePipelineSchedule deletes a pipeline schedule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#delete-a-pipeline-schedule +func (s *PipelineSchedulesService) DeletePipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// RunPipelineSchedule triggers a new scheduled pipeline to run immediately. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#run-a-scheduled-pipeline-immediately +func (s *PipelineSchedulesService) RunPipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/play", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// CreatePipelineScheduleVariableOptions represents the available +// CreatePipelineScheduleVariable() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule +type CreatePipelineScheduleVariableOptions struct { + Key *string `url:"key" json:"key"` + Value *string `url:"value" json:"value"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// CreatePipelineScheduleVariable creates a pipeline schedule variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule +func (s *PipelineSchedulesService) CreatePipelineScheduleVariable(pid interface{}, schedule int, opt *CreatePipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables", PathEscape(project), schedule) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineVariable) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// EditPipelineScheduleVariableOptions represents the available +// EditPipelineScheduleVariable() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule-variable +type EditPipelineScheduleVariableOptions struct { + Value *string `url:"value" json:"value"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// EditPipelineScheduleVariable creates a pipeline schedule variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule-variable +func (s *PipelineSchedulesService) EditPipelineScheduleVariable(pid interface{}, schedule int, key string, opt *EditPipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables/%s", PathEscape(project), schedule, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineVariable) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// DeletePipelineScheduleVariable creates a pipeline schedule variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_schedules.html#delete-a-pipeline-schedule-variable +func (s *PipelineSchedulesService) DeletePipelineScheduleVariable(pid interface{}, schedule int, key string, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables/%s", PathEscape(project), schedule, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineVariable) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go b/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go new file mode 100644 index 0000000000..39269a3dbc --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go @@ -0,0 +1,248 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// PipelineTriggersService handles Project pipeline triggers. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html +type PipelineTriggersService struct { + client *Client +} + +// PipelineTrigger represents a project pipeline trigger. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html +type PipelineTrigger struct { + ID int `json:"id"` + Description string `json:"description"` + CreatedAt *time.Time `json:"created_at"` + DeletedAt *time.Time `json:"deleted_at"` + LastUsed *time.Time `json:"last_used"` + Token string `json:"token"` + UpdatedAt *time.Time `json:"updated_at"` + Owner *User `json:"owner"` +} + +// ListPipelineTriggersOptions represents the available ListPipelineTriggers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#list-project-trigger-tokens +type ListPipelineTriggersOptions ListOptions + +// ListPipelineTriggers gets a list of project triggers. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#list-project-trigger-tokens +func (s *PipelineTriggersService) ListPipelineTriggers(pid interface{}, opt *ListPipelineTriggersOptions, options ...RequestOptionFunc) ([]*PipelineTrigger, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/triggers", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pt []*PipelineTrigger + resp, err := s.client.Do(req, &pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// GetPipelineTrigger gets a specific pipeline trigger for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#get-trigger-token-details +func (s *PipelineTriggersService) GetPipelineTrigger(pid interface{}, trigger int, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pt := new(PipelineTrigger) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// AddPipelineTriggerOptions represents the available AddPipelineTrigger() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#create-a-trigger-token +type AddPipelineTriggerOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` +} + +// AddPipelineTrigger adds a pipeline trigger to a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#create-a-trigger-token +func (s *PipelineTriggersService) AddPipelineTrigger(pid interface{}, opt *AddPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/triggers", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pt := new(PipelineTrigger) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// EditPipelineTriggerOptions represents the available EditPipelineTrigger() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#update-a-project-trigger-token +type EditPipelineTriggerOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` +} + +// EditPipelineTrigger edits a trigger for a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#update-a-project-trigger-token +func (s *PipelineTriggersService) EditPipelineTrigger(pid interface{}, trigger int, opt *EditPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pt := new(PipelineTrigger) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// TakeOwnershipOfPipelineTrigger sets the owner of the specified +// pipeline trigger to the user issuing the request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#take-ownership-of-a-project-trigger +func (s *PipelineTriggersService) TakeOwnershipOfPipelineTrigger(pid interface{}, trigger int, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/triggers/%d/take_ownership", PathEscape(project), trigger) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + pt := new(PipelineTrigger) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// DeletePipelineTrigger removes a trigger from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#remove-a-project-trigger-token +func (s *PipelineTriggersService) DeletePipelineTrigger(pid interface{}, trigger int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// RunPipelineTriggerOptions represents the available RunPipelineTrigger() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#trigger-a-pipeline-with-a-token +type RunPipelineTriggerOptions struct { + Ref *string `url:"ref" json:"ref"` + Token *string `url:"token" json:"token"` + Variables map[string]string `url:"variables,omitempty" json:"variables,omitempty"` +} + +// RunPipelineTrigger starts a trigger from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipeline_triggers.html#trigger-a-pipeline-with-a-token +func (s *PipelineTriggersService) RunPipelineTrigger(pid interface{}, opt *RunPipelineTriggerOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/trigger/pipeline", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pt := new(Pipeline) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go new file mode 100644 index 0000000000..3f2448447e --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/pipelines.go @@ -0,0 +1,408 @@ +// +// Copyright 2021, Igor Varavko +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// PipelinesService handles communication with the repositories related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html +type PipelinesService struct { + client *Client +} + +// PipelineVariable represents a pipeline variable. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html +type PipelineVariable struct { + Key string `json:"key"` + Value string `json:"value"` + VariableType VariableTypeValue `json:"variable_type"` +} + +// Pipeline represents a GitLab pipeline. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html +type Pipeline struct { + ID int `json:"id"` + IID int `json:"iid"` + ProjectID int `json:"project_id"` + Status string `json:"status"` + Source string `json:"source"` + Ref string `json:"ref"` + Name string `json:"name"` + SHA string `json:"sha"` + BeforeSHA string `json:"before_sha"` + Tag bool `json:"tag"` + YamlErrors string `json:"yaml_errors"` + User *BasicUser `json:"user"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + CommittedAt *time.Time `json:"committed_at"` + Duration int `json:"duration"` + QueuedDuration int `json:"queued_duration"` + Coverage string `json:"coverage"` + WebURL string `json:"web_url"` + DetailedStatus *DetailedStatus `json:"detailed_status"` +} + +// DetailedStatus contains detailed information about the status of a pipeline. +type DetailedStatus struct { + Icon string `json:"icon"` + Text string `json:"text"` + Label string `json:"label"` + Group string `json:"group"` + Tooltip string `json:"tooltip"` + HasDetails bool `json:"has_details"` + DetailsPath string `json:"details_path"` + Illustration struct { + Image string `json:"image"` + } `json:"illustration"` + Favicon string `json:"favicon"` +} + +func (p Pipeline) String() string { + return Stringify(p) +} + +// PipelineTestReport contains a detailed report of a test run. +type PipelineTestReport struct { + TotalTime float64 `json:"total_time"` + TotalCount int `json:"total_count"` + SuccessCount int `json:"success_count"` + FailedCount int `json:"failed_count"` + SkippedCount int `json:"skipped_count"` + ErrorCount int `json:"error_count"` + TestSuites []*PipelineTestSuites `json:"test_suites"` +} + +// PipelineTestSuites contains test suites results. +type PipelineTestSuites struct { + Name string `json:"name"` + TotalTime float64 `json:"total_time"` + TotalCount int `json:"total_count"` + SuccessCount int `json:"success_count"` + FailedCount int `json:"failed_count"` + SkippedCount int `json:"skipped_count"` + ErrorCount int `json:"error_count"` + TestCases []*PipelineTestCases `json:"test_cases"` +} + +// PipelineTestCases contains test cases details. +type PipelineTestCases struct { + Status string `json:"status"` + Name string `json:"name"` + Classname string `json:"classname"` + File string `json:"file"` + ExecutionTime float64 `json:"execution_time"` + SystemOutput interface{} `json:"system_output"` + StackTrace string `json:"stack_trace"` + AttachmentURL string `json:"attachment_url"` + RecentFailures *RecentFailures `json:"recent_failures"` +} + +// RecentFailures contains failures count for the project's default branch. +type RecentFailures struct { + Count int `json:"count"` + BaseBranch string `json:"base_branch"` +} + +func (p PipelineTestReport) String() string { + return Stringify(p) +} + +// PipelineInfo shows the basic entities of a pipeline, mostly used as fields +// on other assets, like Commit. +type PipelineInfo struct { + ID int `json:"id"` + IID int `json:"iid"` + ProjectID int `json:"project_id"` + Status string `json:"status"` + Source string `json:"source"` + Ref string `json:"ref"` + SHA string `json:"sha"` + WebURL string `json:"web_url"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` +} + +func (p PipelineInfo) String() string { + return Stringify(p) +} + +// ListProjectPipelinesOptions represents the available ListProjectPipelines() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines +type ListProjectPipelinesOptions struct { + ListOptions + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + Status *BuildStateValue `url:"status,omitempty" json:"status,omitempty"` + Source *string `url:"source,omitempty" json:"source,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` + YamlErrors *bool `url:"yaml_errors,omitempty" json:"yaml_errors,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListProjectPipelines gets a list of project piplines. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines +func (s *PipelinesService) ListProjectPipelines(pid interface{}, opt *ListProjectPipelinesOptions, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*PipelineInfo + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetPipeline gets a single project pipeline. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-a-single-pipeline +func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Pipeline) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetPipelineVariables gets the variables of a single project pipeline. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-variables-of-a-pipeline +func (s *PipelinesService) GetPipelineVariables(pid interface{}, pipeline int, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/variables", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var p []*PipelineVariable + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetPipelineTestReport gets the test report of a single project pipeline. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-a-pipelines-test-report +func (s *PipelinesService) GetPipelineTestReport(pid interface{}, pipeline int, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/test_report", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(PipelineTestReport) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetLatestPipelineOptions represents the available GetLatestPipeline() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline +type GetLatestPipelineOptions struct { + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +// GetLatestPipeline gets the latest pipeline for a specific ref in a project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline +func (s *PipelinesService) GetLatestPipeline(pid interface{}, opt *GetLatestPipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/latest", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Pipeline) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CreatePipelineOptions represents the available CreatePipeline() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline +type CreatePipelineOptions struct { + Ref *string `url:"ref" json:"ref"` + Variables *[]*PipelineVariableOptions `url:"variables,omitempty" json:"variables,omitempty"` +} + +// PipelineVariable represents a pipeline variable. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline +type PipelineVariableOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// CreatePipeline creates a new project pipeline. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline +func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipeline", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Pipeline) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// RetryPipelineBuild retries failed builds in a pipeline +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#retry-jobs-in-a-pipeline +func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/retry", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Pipeline) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CancelPipelineBuild cancels a pipeline builds +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#cancel-a-pipelines-jobs +func (s *PipelinesService) CancelPipelineBuild(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d/cancel", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Pipeline) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// DeletePipeline deletes an existing pipeline. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/pipelines.html#delete-a-pipeline +func (s *PipelinesService) DeletePipeline(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/pipelines/%d", PathEscape(project), pipeline) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/plan_limits.go b/vendor/github.com/xanzy/go-gitlab/plan_limits.go new file mode 100644 index 0000000000..893ae756eb --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/plan_limits.go @@ -0,0 +1,104 @@ +// +// Copyright 2021, Igor Varavko +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import "net/http" + +// PlanLimitsService handles communication with the repositories related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/plan_limits.html +type PlanLimitsService struct { + client *Client +} + +// PlanLimit represents a GitLab pipeline. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/plan_limits.html +type PlanLimit struct { + ConanMaxFileSize int `json:"conan_max_file_size,omitempty"` + GenericPackagesMaxFileSize int `json:"generic_packages_max_file_size,omitempty"` + HelmMaxFileSize int `json:"helm_max_file_size,omitempty"` + MavenMaxFileSize int `json:"maven_max_file_size,omitempty"` + NPMMaxFileSize int `json:"npm_max_file_size,omitempty"` + NugetMaxFileSize int `json:"nuget_max_file_size,omitempty"` + PyPiMaxFileSize int `json:"pypi_max_file_size,omitempty"` + TerraformModuleMaxFileSize int `json:"terraform_module_max_file_size,omitempty"` +} + +// GetCurrentPlanLimitsOptions represents the available GetCurrentPlanLimits() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/plan_limits.html#get-current-plan-limits +type GetCurrentPlanLimitsOptions struct { + PlanName *string `url:"plan_name,omitempty" json:"plan_name,omitempty"` +} + +// List the current limits of a plan on the GitLab instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/plan_limits.html#get-current-plan-limits +func (s *PlanLimitsService) GetCurrentPlanLimits(opt *GetCurrentPlanLimitsOptions, options ...RequestOptionFunc) (*PlanLimit, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "application/plan_limits", opt, options) + if err != nil { + return nil, nil, err + } + + pl := new(PlanLimit) + resp, err := s.client.Do(req, pl) + if err != nil { + return nil, resp, err + } + + return pl, resp, nil +} + +// ChangePlanLimitOptions represents the available ChangePlanLimits() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/plan_limits.html#change-plan-limits +type ChangePlanLimitOptions struct { + PlanName *string `url:"plan_name,omitempty" json:"plan_name,omitempty"` + ConanMaxFileSize *int `url:"conan_max_file_size,omitempty" json:"conan_max_file_size,omitempty"` + GenericPackagesMaxFileSize *int `url:"generic_packages_max_file_size,omitempty" json:"generic_packages_max_file_size,omitempty"` + HelmMaxFileSize *int `url:"helm_max_file_size,omitempty" json:"helm_max_file_size,omitempty"` + MavenMaxFileSize *int `url:"maven_max_file_size,omitempty" json:"maven_max_file_size,omitempty"` + NPMMaxFileSize *int `url:"npm_max_file_size,omitempty" json:"npm_max_file_size,omitempty"` + NugetMaxFileSize *int `url:"nuget_max_file_size,omitempty" json:"nuget_max_file_size,omitempty"` + PyPiMaxFileSize *int `url:"pypi_max_file_size,omitempty" json:"pypi_max_file_size,omitempty"` + TerraformModuleMaxFileSize *int `url:"terraform_module_max_file_size,omitempty" json:"terraform_module_max_file_size,omitempty"` +} + +// ChangePlanLimits modifies the limits of a plan on the GitLab instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/plan_limits.html#change-plan-limits +func (s *PlanLimitsService) ChangePlanLimits(opt *ChangePlanLimitOptions, options ...RequestOptionFunc) (*PlanLimit, *Response, error) { + req, err := s.client.NewRequest(http.MethodPut, "application/plan_limits", opt, options) + if err != nil { + return nil, nil, err + } + + pl := new(PlanLimit) + resp, err := s.client.Do(req, pl) + if err != nil { + return nil, resp, err + } + + return pl, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go new file mode 100644 index 0000000000..2d6057e053 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go @@ -0,0 +1,200 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ProjectAccessTokensService handles communication with the +// project access tokens related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_access_tokens.html +type ProjectAccessTokensService struct { + client *Client +} + +// ProjectAccessToken represents a GitLab project access token. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_access_tokens.html +type ProjectAccessToken struct { + ID int `json:"id"` + UserID int `json:"user_id"` + Name string `json:"name"` + Scopes []string `json:"scopes"` + CreatedAt *time.Time `json:"created_at"` + LastUsedAt *time.Time `json:"last_used_at"` + ExpiresAt *ISOTime `json:"expires_at"` + Active bool `json:"active"` + Revoked bool `json:"revoked"` + Token string `json:"token"` + AccessLevel AccessLevelValue `json:"access_level"` +} + +func (v ProjectAccessToken) String() string { + return Stringify(v) +} + +// ListProjectAccessTokensOptions represents the available +// ListProjectAccessTokens() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#list-project-access-tokens +type ListProjectAccessTokensOptions ListOptions + +// ListProjectAccessTokens gets a list of all project access tokens in a +// project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#list-project-access-tokens +func (s *ProjectAccessTokensService) ListProjectAccessTokens(pid interface{}, opt *ListProjectAccessTokensOptions, options ...RequestOptionFunc) ([]*ProjectAccessToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_tokens", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pats []*ProjectAccessToken + resp, err := s.client.Do(req, &pats) + if err != nil { + return nil, resp, err + } + + return pats, resp, nil +} + +// GetProjectAccessToken gets a single project access tokens in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#get-a-project-access-token +func (s *ProjectAccessTokensService) GetProjectAccessToken(pid interface{}, id int, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_tokens/%d", PathEscape(project), id) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pat := new(ProjectAccessToken) + resp, err := s.client.Do(req, &pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// CreateProjectAccessTokenOptions represents the available CreateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#create-a-project-access-token +type CreateProjectAccessTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// CreateProjectAccessToken creates a new project access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#create-a-project-access-token +func (s *ProjectAccessTokensService) CreateProjectAccessToken(pid interface{}, opt *CreateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_tokens", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(ProjectAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RotateProjectAccessTokenOptions represents the available RotateProjectAccessToken() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#rotate-a-project-access-token +type RotateProjectAccessTokenOptions struct { + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// RotateProjectAccessToken revokes a project access token and returns a new +// project access token that expires in one week per default. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#rotate-a-project-access-token +func (s *ProjectAccessTokensService) RotateProjectAccessToken(pid interface{}, id int, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { + projects, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/access_tokens/%d/rotate", PathEscape(projects), id) + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pat := new(ProjectAccessToken) + resp, err := s.client.Do(req, pat) + if err != nil { + return nil, resp, err + } + + return pat, resp, nil +} + +// RevokeProjectAccessToken revokes a project access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_access_tokens.html#revoke-a-project-access-token +func (s *ProjectAccessTokensService) RevokeProjectAccessToken(pid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/access_tokens/%d", PathEscape(project), id) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_badges.go b/vendor/github.com/xanzy/go-gitlab/project_badges.go new file mode 100644 index 0000000000..d5afe1e039 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_badges.go @@ -0,0 +1,230 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ProjectBadge represents a project badge. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#list-all-badges-of-a-project +type ProjectBadge struct { + ID int `json:"id"` + Name string `json:"name"` + LinkURL string `json:"link_url"` + ImageURL string `json:"image_url"` + RenderedLinkURL string `json:"rendered_link_url"` + RenderedImageURL string `json:"rendered_image_url"` + // Kind represents a project badge kind. Can be empty, when used PreviewProjectBadge(). + Kind string `json:"kind"` +} + +// ProjectBadgesService handles communication with the project badges +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_badges.html +type ProjectBadgesService struct { + client *Client +} + +// ListProjectBadgesOptions represents the available ListProjectBadges() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#list-all-badges-of-a-project +type ListProjectBadgesOptions struct { + ListOptions + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// ListProjectBadges gets a list of a project's badges and its group badges. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#list-all-badges-of-a-project +func (s *ProjectBadgesService) ListProjectBadges(pid interface{}, opt *ListProjectBadgesOptions, options ...RequestOptionFunc) ([]*ProjectBadge, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/badges", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pb []*ProjectBadge + resp, err := s.client.Do(req, &pb) + if err != nil { + return nil, resp, err + } + + return pb, resp, nil +} + +// GetProjectBadge gets a project badge. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#get-a-badge-of-a-project +func (s *ProjectBadgesService) GetProjectBadge(pid interface{}, badge int, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pb := new(ProjectBadge) + resp, err := s.client.Do(req, pb) + if err != nil { + return nil, resp, err + } + + return pb, resp, nil +} + +// AddProjectBadgeOptions represents the available AddProjectBadge() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#add-a-badge-to-a-project +type AddProjectBadgeOptions struct { + LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` + ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// AddProjectBadge adds a badge to a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#add-a-badge-to-a-project +func (s *ProjectBadgesService) AddProjectBadge(pid interface{}, opt *AddProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/badges", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pb := new(ProjectBadge) + resp, err := s.client.Do(req, pb) + if err != nil { + return nil, resp, err + } + + return pb, resp, nil +} + +// EditProjectBadgeOptions represents the available EditProjectBadge() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#edit-a-badge-of-a-project +type EditProjectBadgeOptions struct { + LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` + ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// EditProjectBadge updates a badge of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#edit-a-badge-of-a-project +func (s *ProjectBadgesService) EditProjectBadge(pid interface{}, badge int, opt *EditProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pb := new(ProjectBadge) + resp, err := s.client.Do(req, pb) + if err != nil { + return nil, resp, err + } + + return pb, resp, nil +} + +// DeleteProjectBadge removes a badge from a project. Only project's +// badges will be removed by using this endpoint. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#remove-a-badge-from-a-project +func (s *ProjectBadgesService) DeleteProjectBadge(pid interface{}, badge int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ProjectBadgePreviewOptions represents the available PreviewProjectBadge() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#preview-a-badge-from-a-project +type ProjectBadgePreviewOptions struct { + LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` + ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` +} + +// PreviewProjectBadge returns how the link_url and image_url final URLs would be after +// resolving the placeholder interpolation. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_badges.html#preview-a-badge-from-a-project +func (s *ProjectBadgesService) PreviewProjectBadge(pid interface{}, opt *ProjectBadgePreviewOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/badges/render", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + pb := new(ProjectBadge) + resp, err := s.client.Do(req, &pb) + if err != nil { + return nil, resp, err + } + + return pb, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_clusters.go b/vendor/github.com/xanzy/go-gitlab/project_clusters.go new file mode 100644 index 0000000000..792a69b613 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_clusters.go @@ -0,0 +1,236 @@ +// +// Copyright 2021, Matej Velikonja +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ProjectClustersService handles communication with the +// project clusters related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html +type ProjectClustersService struct { + client *Client +} + +// ProjectCluster represents a GitLab Project Cluster. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_clusters.html +type ProjectCluster struct { + ID int `json:"id"` + Name string `json:"name"` + Domain string `json:"domain"` + CreatedAt *time.Time `json:"created_at"` + ProviderType string `json:"provider_type"` + PlatformType string `json:"platform_type"` + EnvironmentScope string `json:"environment_scope"` + ClusterType string `json:"cluster_type"` + User *User `json:"user"` + PlatformKubernetes *PlatformKubernetes `json:"platform_kubernetes"` + ManagementProject *ManagementProject `json:"management_project"` + Project *Project `json:"project"` +} + +func (v ProjectCluster) String() string { + return Stringify(v) +} + +// PlatformKubernetes represents a GitLab Project Cluster PlatformKubernetes. +type PlatformKubernetes struct { + APIURL string `json:"api_url"` + Token string `json:"token"` + CaCert string `json:"ca_cert"` + Namespace string `json:"namespace"` + AuthorizationType string `json:"authorization_type"` +} + +// ManagementProject represents a GitLab Project Cluster management_project. +type ManagementProject struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` +} + +// ListClusters gets a list of all clusters in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#list-project-clusters +func (s *ProjectClustersService) ListClusters(pid interface{}, options ...RequestOptionFunc) ([]*ProjectCluster, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/clusters", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var pcs []*ProjectCluster + resp, err := s.client.Do(req, &pcs) + if err != nil { + return nil, resp, err + } + + return pcs, resp, nil +} + +// GetCluster gets a cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#get-a-single-project-cluster +func (s *ProjectClustersService) GetCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pc := new(ProjectCluster) + resp, err := s.client.Do(req, &pc) + if err != nil { + return nil, resp, err + } + + return pc, resp, nil +} + +// AddClusterOptions represents the available AddCluster() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#add-existing-cluster-to-project +type AddClusterOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Domain *string `url:"domain,omitempty" json:"domain,omitempty"` + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + Managed *bool `url:"managed,omitempty" json:"managed,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + PlatformKubernetes *AddPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` + ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` +} + +// AddPlatformKubernetesOptions represents the available PlatformKubernetes options for adding. +type AddPlatformKubernetesOptions struct { + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` + Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` + AuthorizationType *string `url:"authorization_type,omitempty" json:"authorization_type,omitempty"` +} + +// AddCluster adds an existing cluster to the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#add-existing-cluster-to-project +func (s *ProjectClustersService) AddCluster(pid interface{}, opt *AddClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/clusters/user", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pc := new(ProjectCluster) + resp, err := s.client.Do(req, pc) + if err != nil { + return nil, resp, err + } + + return pc, resp, nil +} + +// EditClusterOptions represents the available EditCluster() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#edit-project-cluster +type EditClusterOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Domain *string `url:"domain,omitempty" json:"domain,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` + PlatformKubernetes *EditPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` +} + +// EditPlatformKubernetesOptions represents the available PlatformKubernetes options for editing. +type EditPlatformKubernetesOptions struct { + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` + Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` +} + +// EditCluster updates an existing project cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#edit-project-cluster +func (s *ProjectClustersService) EditCluster(pid interface{}, cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pc := new(ProjectCluster) + resp, err := s.client.Do(req, pc) + if err != nil { + return nil, resp, err + } + + return pc, resp, nil +} + +// DeleteCluster deletes an existing project cluster. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_clusters.html#delete-project-cluster +func (s *ProjectClustersService) DeleteCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go b/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go new file mode 100644 index 0000000000..98c358868c --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go @@ -0,0 +1,246 @@ +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ProjectFeatureFlagService handles operations on gitlab project feature +// flags using the following api: +// +// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html +type ProjectFeatureFlagService struct { + client *Client +} + +// ProjectFeatureFlag represents a GitLab project iteration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html +type ProjectFeatureFlag struct { + Name string `json:"name"` + Description string `json:"description"` + Active bool `json:"active"` + Version string `json:"version"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Scopes []*ProjectFeatureFlagScope `json:"scopes"` + Strategies []*ProjectFeatureFlagStrategy `json:"strategies"` +} + +// ProjectFeatureFlagScope defines the scopes of a feature flag +// +// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html +type ProjectFeatureFlagScope struct { + ID int `json:"id"` + EnvironmentScope string `json:"environment_scope"` +} + +// ProjectFeatureFlagStrategy defines the strategy used for a feature flag +// +// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html +type ProjectFeatureFlagStrategy struct { + ID int `json:"id"` + Name string `json:"name"` + Parameters *ProjectFeatureFlagStrategyParameter `json:"parameters"` + Scopes []*ProjectFeatureFlagScope `json:"scopes"` +} + +// ProjectFeatureFlagStrategyParameter is used in updating and creating feature flags +// +// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html +type ProjectFeatureFlagStrategyParameter struct { + GroupID string `json:"groupId,omitempty"` + UserIDs string `json:"userIds,omitempty"` + Percentage string `json:"percentage,omitempty"` + + // Following fields aren't documented in Gitlab API docs, + // but are present in Gitlab API since 13.5. + // Docs: https://docs.getunleash.io/reference/activation-strategies#gradual-rollout + Rollout string `json:"rollout,omitempty"` + Stickiness string `json:"stickiness,omitempty"` +} + +func (i ProjectFeatureFlag) String() string { + return Stringify(i) +} + +// ListProjectFeatureFlagOptions contains the options for ListProjectFeatureFlags +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#list-feature-flags-for-a-project +type ListProjectFeatureFlagOptions struct { + ListOptions + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` +} + +// ListProjectFeatureFlags returns a list with the feature flags of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#list-feature-flags-for-a-project +func (s *ProjectFeatureFlagService) ListProjectFeatureFlags(pid interface{}, opt *ListProjectFeatureFlagOptions, options ...RequestOptionFunc) ([]*ProjectFeatureFlag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/feature_flags", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pffs []*ProjectFeatureFlag + resp, err := s.client.Do(req, &pffs) + if err != nil { + return nil, resp, err + } + + return pffs, resp, nil +} + +// GetProjectFeatureFlag gets a single feature flag for the specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#get-a-single-feature-flag +func (s *ProjectFeatureFlagService) GetProjectFeatureFlag(pid interface{}, name string, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/feature_flags/%s", PathEscape(project), name) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + flag := new(ProjectFeatureFlag) + resp, err := s.client.Do(req, flag) + if err != nil { + return nil, resp, err + } + + return flag, resp, nil +} + +// CreateProjectFeatureFlagOptions represents the available +// CreateProjectFeatureFlag() options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag +type CreateProjectFeatureFlagOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Version *string `url:"version,omitempty" json:"version,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Strategies *[]*FeatureFlagStrategyOptions `url:"strategies,omitempty" json:"strategies,omitempty"` +} + +// FeatureFlagStrategyOptions represents the available feature flag strategy +// options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag +type FeatureFlagStrategyOptions struct { + ID *int `url:"id,omitempty" json:"id,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Parameters *ProjectFeatureFlagStrategyParameter `url:"parameters,omitempty" json:"parameters,omitempty"` + Scopes *[]*ProjectFeatureFlagScope `url:"scopes,omitempty" json:"scopes,omitempty"` +} + +// ProjectFeatureFlagScopeOptions represents the available feature flag scope +// options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag +type ProjectFeatureFlagScopeOptions struct { + ID *int `url:"id,omitempty" json:"id,omitempty"` + EnvironmentScope *string `url:"id,omitempty" json:"environment_scope,omitempty"` +} + +// CreateProjectFeatureFlag creates a feature flag +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag +func (s *ProjectFeatureFlagService) CreateProjectFeatureFlag(pid interface{}, opt *CreateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/feature_flags", + PathEscape(project), + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + flag := new(ProjectFeatureFlag) + resp, err := s.client.Do(req, flag) + if err != nil { + return flag, resp, err + } + + return flag, resp, nil +} + +// UpdateProjectFeatureFlagOptions represents the available +// UpdateProjectFeatureFlag() options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#update-a-feature-flag +type UpdateProjectFeatureFlagOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Strategies *[]*FeatureFlagStrategyOptions `url:"strategies,omitempty" json:"strategies,omitempty"` +} + +// UpdateProjectFeatureFlag updates a feature flag +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#update-a-feature-flag +func (s *ProjectFeatureFlagService) UpdateProjectFeatureFlag(pid interface{}, name string, opt *UpdateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { + group, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/feature_flags/%s", + PathEscape(group), + name, + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + flag := new(ProjectFeatureFlag) + resp, err := s.client.Do(req, flag) + if err != nil { + return flag, resp, err + } + + return flag, resp, nil +} + +// DeleteProjectFeatureFlag deletes a feature flag +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/feature_flags.html#delete-a-feature-flag +func (s *ProjectFeatureFlagService) DeleteProjectFeatureFlag(pid interface{}, name string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/feature_flags/%s", PathEscape(project), name) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_import_export.go b/vendor/github.com/xanzy/go-gitlab/project_import_export.go new file mode 100644 index 0000000000..266be839a6 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_import_export.go @@ -0,0 +1,225 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "io" + "net/http" + "time" +) + +// ProjectImportExportService handles communication with the project +// import/export related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html +type ProjectImportExportService struct { + client *Client +} + +// ImportStatus represents a project import status. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#import-status +type ImportStatus struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreateAt *time.Time `json:"create_at"` + ImportStatus string `json:"import_status"` + ImportType string `json:"import_type"` + CorrelationID string `json:"correlation_id"` + ImportError string `json:"import_error"` +} + +func (s ImportStatus) String() string { + return Stringify(s) +} + +// ExportStatus represents a project export status. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#export-status +type ExportStatus struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` + ExportStatus string `json:"export_status"` + Message string `json:"message"` + Links struct { + APIURL string `json:"api_url"` + WebURL string `json:"web_url"` + } `json:"_links"` +} + +func (s ExportStatus) String() string { + return Stringify(s) +} + +// ScheduleExportOptions represents the available ScheduleExport() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#schedule-an-export +type ScheduleExportOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + Upload struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + HTTPMethod *string `url:"http_method,omitempty" json:"http_method,omitempty"` + } `url:"upload,omitempty" json:"upload,omitempty"` +} + +// ScheduleExport schedules a project export. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#schedule-an-export +func (s *ProjectImportExportService) ScheduleExport(pid interface{}, opt *ScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/export", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ExportStatus get the status of export. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#export-status +func (s *ProjectImportExportService) ExportStatus(pid interface{}, options ...RequestOptionFunc) (*ExportStatus, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/export", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + es := new(ExportStatus) + resp, err := s.client.Do(req, es) + if err != nil { + return nil, resp, err + } + + return es, resp, nil +} + +// ExportDownload download the finished export. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#export-download +func (s *ProjectImportExportService) ExportDownload(pid interface{}, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/export/download", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} + +// ImportFileOptions represents the available ImportFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#import-a-file +type ImportFileOptions struct { + Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Overwrite *bool `url:"overwrite,omitempty" json:"overwrite,omitempty"` + OverrideParams *CreateProjectOptions `url:"override_params,omitempty" json:"override_params,omitempty"` +} + +// Import a project from an archive file. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#import-a-file +func (s *ProjectImportExportService) ImportFromFile(archive io.Reader, opt *ImportFileOptions, options ...RequestOptionFunc) (*ImportStatus, *Response, error) { + req, err := s.client.UploadRequest( + http.MethodPost, + "projects/import", + archive, + "archive.tar.gz", + UploadFile, + opt, + options, + ) + if err != nil { + return nil, nil, err + } + + is := new(ImportStatus) + resp, err := s.client.Do(req, is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} + +// ImportStatus get the status of an import. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_import_export.html#import-status +func (s *ProjectImportExportService) ImportStatus(pid interface{}, options ...RequestOptionFunc) (*ImportStatus, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/import", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + is := new(ImportStatus) + resp, err := s.client.Do(req, is) + if err != nil { + return nil, resp, err + } + + return is, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_iterations.go b/vendor/github.com/xanzy/go-gitlab/project_iterations.go new file mode 100644 index 0000000000..1fe0ddf811 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_iterations.go @@ -0,0 +1,90 @@ +// +// Copyright 2022, Daniel Steinke +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// IterationsAPI handles communication with the project iterations related +// methods of the GitLab API +// +// GitLab API docs: https://docs.gitlab.com/ee/api/iterations.html +type ProjectIterationsService struct { + client *Client +} + +// ProjectIteration represents a GitLab project iteration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/iterations.html +type ProjectIteration struct { + ID int `json:"id"` + IID int `json:"iid"` + Sequence int `json:"sequence"` + GroupID int `json:"group_id"` + Title string `json:"title"` + Description string `json:"description"` + State int `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + DueDate *ISOTime `json:"due_date"` + StartDate *ISOTime `json:"start_date"` + WebURL string `json:"web_url"` +} + +func (i ProjectIteration) String() string { + return Stringify(i) +} + +// ListProjectIterationsOptions contains the available ListProjectIterations() +// options +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/iterations.html#list-project-iterations +type ListProjectIterationsOptions struct { + ListOptions + State *string `url:"state,omitempty" json:"state,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` +} + +// ListProjectIterations returns a list of projects iterations. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/iterations.html#list-project-iterations +func (i *ProjectIterationsService) ListProjectIterations(pid interface{}, opt *ListProjectIterationsOptions, options ...RequestOptionFunc) ([]*ProjectIteration, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/iterations", PathEscape(project)) + + req, err := i.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pis []*ProjectIteration + resp, err := i.client.Do(req, &pis) + if err != nil { + return nil, resp, err + } + + return pis, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go b/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go new file mode 100644 index 0000000000..d6f23f2d9a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go @@ -0,0 +1,188 @@ +// +// Copyright 2021, Andrea Perizzato +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ManagedLicensesService handles communication with the managed licenses +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/managed_licenses.html +type ManagedLicensesService struct { + client *Client +} + +// ManagedLicense represents a managed license. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/managed_licenses.html +type ManagedLicense struct { + ID int `json:"id"` + Name string `json:"name"` + ApprovalStatus LicenseApprovalStatusValue `json:"approval_status"` +} + +// ListManagedLicenses returns a list of managed licenses from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#list-managed-licenses +func (s *ManagedLicensesService) ListManagedLicenses(pid interface{}, options ...RequestOptionFunc) ([]*ManagedLicense, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/managed_licenses", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var mls []*ManagedLicense + resp, err := s.client.Do(req, &mls) + if err != nil { + return nil, resp, err + } + + return mls, resp, nil +} + +// GetManagedLicense returns an existing managed license. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#show-an-existing-managed-license +func (s *ManagedLicensesService) GetManagedLicense(pid, mlid interface{}, options ...RequestOptionFunc) (*ManagedLicense, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + license, err := parseID(mlid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/managed_licenses/%s", PathEscape(project), PathEscape(license)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ml := new(ManagedLicense) + resp, err := s.client.Do(req, ml) + if err != nil { + return nil, resp, err + } + + return ml, resp, nil +} + +// AddManagedLicenseOptions represents the available AddManagedLicense() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#create-a-new-managed-license +type AddManagedLicenseOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalStatus *LicenseApprovalStatusValue `url:"approval_status,omitempty" json:"approval_status,omitempty"` +} + +// AddManagedLicense adds a managed license to a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#create-a-new-managed-license +func (s *ManagedLicensesService) AddManagedLicense(pid interface{}, opt *AddManagedLicenseOptions, options ...RequestOptionFunc) (*ManagedLicense, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/managed_licenses", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ml := new(ManagedLicense) + resp, err := s.client.Do(req, ml) + if err != nil { + return nil, resp, err + } + + return ml, resp, nil +} + +// DeleteManagedLicense deletes a managed license with a given ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#delete-a-managed-license +func (s *ManagedLicensesService) DeleteManagedLicense(pid, mlid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + license, err := parseID(mlid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/managed_licenses/%s", PathEscape(project), PathEscape(license)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// EditManagedLicenceOptions represents the available EditManagedLicense() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#edit-an-existing-managed-license +type EditManagedLicenceOptions struct { + ApprovalStatus *LicenseApprovalStatusValue `url:"approval_status,omitempty" json:"approval_status,omitempty"` +} + +// EditManagedLicense updates an existing managed license with a new approval +// status. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/managed_licenses.html#edit-an-existing-managed-license +func (s *ManagedLicensesService) EditManagedLicense(pid, mlid interface{}, opt *EditManagedLicenceOptions, options ...RequestOptionFunc) (*ManagedLicense, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + license, err := parseID(mlid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/managed_licenses/%s", PathEscape(project), PathEscape(license)) + + req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) + if err != nil { + return nil, nil, err + } + + ml := new(ManagedLicense) + resp, err := s.client.Do(req, ml) + if err != nil { + return nil, resp, err + } + + return ml, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_members.go b/vendor/github.com/xanzy/go-gitlab/project_members.go new file mode 100644 index 0000000000..37d4b8a2e6 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_members.go @@ -0,0 +1,238 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ProjectMembersService handles communication with the project members +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html +type ProjectMembersService struct { + client *Client +} + +// ListProjectMembersOptions represents the available ListProjectMembers() and +// ListAllProjectMembers() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +type ListProjectMembersOptions struct { + ListOptions + Query *string `url:"query,omitempty" json:"query,omitempty"` + UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` +} + +// ListProjectMembers gets a list of a project's team members viewable by the +// authenticated user. Returns only direct members and not inherited members +// through ancestors groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +func (s *ProjectMembersService) ListProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/members", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pm []*ProjectMember + resp, err := s.client.Do(req, &pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// ListAllProjectMembers gets a list of a project's team members viewable by the +// authenticated user. Returns a list including inherited members through +// ancestor groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project-including-inherited-and-invited-members +func (s *ProjectMembersService) ListAllProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/members/all", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pm []*ProjectMember + resp, err := s.client.Do(req, &pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// GetProjectMember gets a project team member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project +func (s *ProjectMembersService) GetProjectMember(pid interface{}, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMember) + resp, err := s.client.Do(req, pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// GetInheritedProjectMember gets a project team member, including inherited +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project-including-inherited-and-invited-members +func (s *ProjectMembersService) GetInheritedProjectMember(pid interface{}, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/members/all/%d", PathEscape(project), user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMember) + resp, err := s.client.Do(req, pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// AddProjectMemberOptions represents the available AddProjectMember() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project +type AddProjectMemberOptions struct { + UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` +} + +// AddProjectMember adds a user to a project team. This is an idempotent +// method and can be called multiple times with the same parameters. Adding +// team membership to a user that is already a member does not affect the +// existing membership. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project +func (s *ProjectMembersService) AddProjectMember(pid interface{}, opt *AddProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/members", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMember) + resp, err := s.client.Do(req, pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// EditProjectMemberOptions represents the available EditProjectMember() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project +type EditProjectMemberOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` + MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` +} + +// EditProjectMember updates a project team member to a specified access level.. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project +func (s *ProjectMembersService) EditProjectMember(pid interface{}, user int, opt *EditProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMember) + resp, err := s.client.Do(req, pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// DeleteProjectMember removes a user from a project team. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project +func (s *ProjectMembersService) DeleteProjectMember(pid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_mirror.go b/vendor/github.com/xanzy/go-gitlab/project_mirror.go new file mode 100644 index 0000000000..16f030d39c --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_mirror.go @@ -0,0 +1,195 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ProjectMirrorService handles communication with the project mirror +// related methods of the GitLab API. +// +// GitLAb API docs: https://docs.gitlab.com/ee/api/remote_mirrors.html +type ProjectMirrorService struct { + client *Client +} + +// ProjectMirror represents a project mirror configuration. +// +// GitLAb API docs: https://docs.gitlab.com/ee/api/remote_mirrors.html +type ProjectMirror struct { + Enabled bool `json:"enabled"` + ID int `json:"id"` + LastError string `json:"last_error"` + LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` + LastUpdateAt *time.Time `json:"last_update_at"` + LastUpdateStartedAt *time.Time `json:"last_update_started_at"` + MirrorBranchRegex string `json:"mirror_branch_regex"` + OnlyProtectedBranches bool `json:"only_protected_branches"` + KeepDivergentRefs bool `json:"keep_divergent_refs"` + UpdateStatus string `json:"update_status"` + URL string `json:"url"` +} + +// ListProjectMirrorOptions represents the available ListProjectMirror() options. +type ListProjectMirrorOptions ListOptions + +// ListProjectMirror gets a list of mirrors configured on the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#list-a-projects-remote-mirrors +func (s *ProjectMirrorService) ListProjectMirror(pid interface{}, opt *ListProjectMirrorOptions, options ...RequestOptionFunc) ([]*ProjectMirror, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/remote_mirrors", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pm []*ProjectMirror + resp, err := s.client.Do(req, &pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// GetProjectMirror gets a single mirror configured on the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#get-a-single-projects-remote-mirror +func (s *ProjectMirrorService) GetProjectMirror(pid interface{}, mirror int, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMirror) + resp, err := s.client.Do(req, &pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// AddProjectMirrorOptions contains the properties requires to create +// a new project mirror. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#create-a-push-mirror +type AddProjectMirrorOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + KeepDivergentRefs *bool `url:"keep_divergent_refs,omitempty" json:"keep_divergent_refs,omitempty"` + OnlyProtectedBranches *bool `url:"only_protected_branches,omitempty" json:"only_protected_branches,omitempty"` + MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` +} + +// AddProjectMirror creates a new mirror on the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#create-a-push-mirror +func (s *ProjectMirrorService) AddProjectMirror(pid interface{}, opt *AddProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/remote_mirrors", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMirror) + resp, err := s.client.Do(req, pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// EditProjectMirrorOptions contains the properties requires to edit +// an existing project mirror. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#update-a-remote-mirrors-attributes +type EditProjectMirrorOptions struct { + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + KeepDivergentRefs *bool `url:"keep_divergent_refs,omitempty" json:"keep_divergent_refs,omitempty"` + OnlyProtectedBranches *bool `url:"only_protected_branches,omitempty" json:"only_protected_branches,omitempty"` + MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` +} + +// EditProjectMirror updates a project team member to a specified access level.. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#update-a-remote-mirrors-attributes +func (s *ProjectMirrorService) EditProjectMirror(pid interface{}, mirror int, opt *EditProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pm := new(ProjectMirror) + resp, err := s.client.Do(req, pm) + if err != nil { + return nil, resp, err + } + + return pm, resp, nil +} + +// DeleteProjectMirror deletes a project mirror. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/remote_mirrors.html#delete-a-remote-mirror +func (s *ProjectMirrorService) DeleteProjectMirror(pid interface{}, mirror int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go new file mode 100644 index 0000000000..3beecb1f75 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go @@ -0,0 +1,199 @@ +// +// Copyright 2023, Nick Westbury +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ProjectRepositoryStorageMoveService handles communication with the +// repositories related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html +type ProjectRepositoryStorageMoveService struct { + client *Client +} + +// ProjectRepositoryStorageMove represents the status of a repository move. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html +type ProjectRepositoryStorageMove struct { + ID int `json:"id"` + CreatedAt *time.Time `json:"created_at"` + State string `json:"state"` + SourceStorageName string `json:"source_storage_name"` + DestinationStorageName string `json:"destination_storage_name"` + Project *RepositoryProject `json:"project"` +} + +type RepositoryProject struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` +} + +// RetrieveAllProjectStorageMovesOptions represents the available +// RetrieveAllStorageMoves() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-project-repository-storage-moves +type RetrieveAllProjectStorageMovesOptions ListOptions + +// RetrieveAllStorageMoves retrieves all project repository storage moves +// accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-project-repository-storage-moves +func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { + req, err := p.client.NewRequest(http.MethodGet, "project_repository_storage_moves", opts, options) + if err != nil { + return nil, nil, err + } + + var psms []*ProjectRepositoryStorageMove + resp, err := p.client.Do(req, &psms) + if err != nil { + return nil, resp, err + } + + return psms, resp, err +} + +// RetrieveAllStorageMovesForProject retrieves all repository storage moves for +// a single project accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-project +func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMovesForProject(project int, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("projects/%d/repository_storage_moves", project) + + req, err := p.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var psms []*ProjectRepositoryStorageMove + resp, err := p.client.Do(req, &psms) + if err != nil { + return nil, resp, err + } + + return psms, resp, err +} + +// GetStorageMove gets a single project repository storage move. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#get-a-single-project-repository-storage-move +func (p ProjectRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("project_repository_storage_moves/%d", repositoryStorage) + + req, err := p.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + psm := new(ProjectRepositoryStorageMove) + resp, err := p.client.Do(req, psm) + if err != nil { + return nil, resp, err + } + + return psm, resp, err +} + +// GetStorageMoveForProject gets a single repository storage move for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-project +func (p ProjectRepositoryStorageMoveService) GetStorageMoveForProject(project int, repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("projects/%d/repository_storage_moves/%d", project, repositoryStorage) + + req, err := p.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + psm := new(ProjectRepositoryStorageMove) + resp, err := p.client.Do(req, psm) + if err != nil { + return nil, resp, err + } + + return psm, resp, err +} + +// ScheduleStorageMoveForProjectOptions represents the available +// ScheduleStorageMoveForProject() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-project +type ScheduleStorageMoveForProjectOptions struct { + DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` +} + +// ScheduleStorageMoveForProject schedule a repository to be moved for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-project +func (p ProjectRepositoryStorageMoveService) ScheduleStorageMoveForProject(project int, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("projects/%d/repository_storage_moves", project) + + req, err := p.client.NewRequest(http.MethodPost, u, opts, options) + if err != nil { + return nil, nil, err + } + + psm := new(ProjectRepositoryStorageMove) + resp, err := p.client.Do(req, psm) + if err != nil { + return nil, resp, err + } + + return psm, resp, err +} + +// ScheduleAllProjectStorageMovesOptions represents the available +// ScheduleAllStorageMoves() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard +type ScheduleAllProjectStorageMovesOptions struct { + SourceStorageName *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"` + DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` +} + +// ScheduleAllStorageMoves schedules all repositories to be moved. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard +func (p ProjectRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllProjectStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := p.client.NewRequest(http.MethodPost, "project_repository_storage_moves", opts, options) + if err != nil { + return nil, err + } + + return p.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_snippets.go b/vendor/github.com/xanzy/go-gitlab/project_snippets.go new file mode 100644 index 0000000000..fe8e46aaf2 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_snippets.go @@ -0,0 +1,209 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "net/http" +) + +// ProjectSnippetsService handles communication with the project snippets +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_snippets.html +type ProjectSnippetsService struct { + client *Client +} + +// ListProjectSnippetsOptions represents the available ListSnippets() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_snippets.html#list-snippets +type ListProjectSnippetsOptions ListOptions + +// ListSnippets gets a list of project snippets. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_snippets.html#list-snippets +func (s *ProjectSnippetsService) ListSnippets(pid interface{}, opt *ListProjectSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Snippet + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// GetSnippet gets a single project snippet +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#single-snippet +func (s *ProjectSnippetsService) GetSnippet(pid interface{}, snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ps := new(Snippet) + resp, err := s.client.Do(req, ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// CreateProjectSnippetOptions represents the available CreateSnippet() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#create-new-snippet +type CreateProjectSnippetOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + Files *[]*CreateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` +} + +// CreateSnippet creates a new project snippet. The user must have permission +// to create new snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#create-new-snippet +func (s *ProjectSnippetsService) CreateSnippet(pid interface{}, opt *CreateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ps := new(Snippet) + resp, err := s.client.Do(req, ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// UpdateProjectSnippetOptions represents the available UpdateSnippet() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#update-snippet +type UpdateProjectSnippetOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + Files *[]*UpdateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` +} + +// UpdateSnippet updates an existing project snippet. The user must have +// permission to change an existing snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#update-snippet +func (s *ProjectSnippetsService) UpdateSnippet(pid interface{}, snippet int, opt *UpdateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ps := new(Snippet) + resp, err := s.client.Do(req, ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// DeleteSnippet deletes an existing project snippet. This is an idempotent +// function and deleting a non-existent snippet still returns a 200 OK status +// code. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#delete-snippet +func (s *ProjectSnippetsService) DeleteSnippet(pid interface{}, snippet int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SnippetContent returns the raw project snippet as plain text. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_snippets.html#snippet-content +func (s *ProjectSnippetsService) SnippetContent(pid interface{}, snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/snippets/%d/raw", PathEscape(project), snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_templates.go b/vendor/github.com/xanzy/go-gitlab/project_templates.go new file mode 100644 index 0000000000..86010bd1b3 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_templates.go @@ -0,0 +1,110 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ProjectTemplatesService handles communication with the project templates +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_templates.html +type ProjectTemplatesService struct { + client *Client +} + +// ProjectTemplate represents a GitLab ProjectTemplate. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_templates.html +type ProjectTemplate struct { + Key string `json:"key"` + Name string `json:"name"` + Nickname string `json:"nickname"` + Popular bool `json:"popular"` + HTMLURL string `json:"html_url"` + SourceURL string `json:"source_url"` + Description string `json:"description"` + Conditions []string `json:"conditions"` + Permissions []string `json:"permissions"` + Limitations []string `json:"limitations"` + Content string `json:"content"` +} + +func (s ProjectTemplate) String() string { + return Stringify(s) +} + +// ListProjectTemplatesOptions represents the available ListSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_templates.html#get-all-templates-of-a-particular-type +type ListProjectTemplatesOptions struct { + ListOptions + ID *int `url:"id,omitempty" json:"id,omitempty"` + Type *string `url:"type,omitempty" json:"type,omitempty"` +} + +// ListTemplates gets a list of project templates. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_templates.html#get-all-templates-of-a-particular-type +func (s *ProjectTemplatesService) ListTemplates(pid interface{}, templateType string, opt *ListProjectTemplatesOptions, options ...RequestOptionFunc) ([]*ProjectTemplate, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/templates/%s", PathEscape(project), templateType) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pt []*ProjectTemplate + resp, err := s.client.Do(req, &pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// GetProjectTemplate gets a single project template. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_templates.html#get-one-template-of-a-particular-type +func (s *ProjectTemplatesService) GetProjectTemplate(pid interface{}, templateType string, templateName string, options ...RequestOptionFunc) (*ProjectTemplate, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/templates/%s/%s", PathEscape(project), templateType, templateName) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ptd := new(ProjectTemplate) + resp, err := s.client.Do(req, ptd) + if err != nil { + return nil, resp, err + } + + return ptd, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_variables.go b/vendor/github.com/xanzy/go-gitlab/project_variables.go new file mode 100644 index 0000000000..e75c74634a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_variables.go @@ -0,0 +1,232 @@ +// +// Copyright 2021, Patrick Webster +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// ProjectVariablesService handles communication with the +// project variables related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html +type ProjectVariablesService struct { + client *Client +} + +// ProjectVariable represents a GitLab Project Variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html +type ProjectVariable struct { + Key string `json:"key"` + Value string `json:"value"` + VariableType VariableTypeValue `json:"variable_type"` + Protected bool `json:"protected"` + Masked bool `json:"masked"` + Raw bool `json:"raw"` + EnvironmentScope string `json:"environment_scope"` + Description string `json:"description"` +} + +func (v ProjectVariable) String() string { + return Stringify(v) +} + +// VariableFilter filters available for project variable related functions +type VariableFilter struct { + EnvironmentScope string `url:"environment_scope, omitempty" json:"environment_scope,omitempty"` +} + +// ListProjectVariablesOptions represents the available options for listing variables +// in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#list-project-variables +type ListProjectVariablesOptions ListOptions + +// ListVariables gets a list of all variables in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#list-project-variables +func (s *ProjectVariablesService) ListVariables(pid interface{}, opt *ListProjectVariablesOptions, options ...RequestOptionFunc) ([]*ProjectVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/variables", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var vs []*ProjectVariable + resp, err := s.client.Do(req, &vs) + if err != nil { + return nil, resp, err + } + + return vs, resp, nil +} + +// GetProjectVariableOptions represents the available GetVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#get-a-single-variable +type GetProjectVariableOptions struct { + Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` +} + +// GetVariable gets a variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#get-a-single-variable +func (s *ProjectVariablesService) GetVariable(pid interface{}, key string, opt *GetProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(ProjectVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// CreateProjectVariableOptions represents the available CreateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#create-a-variable +type CreateProjectVariableOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// CreateVariable creates a new project variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#create-a-variable +func (s *ProjectVariablesService) CreateVariable(pid interface{}, opt *CreateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/variables", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(ProjectVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// UpdateProjectVariableOptions represents the available UpdateVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#update-a-variable +type UpdateProjectVariableOptions struct { + Value *string `url:"value,omitempty" json:"value,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` + Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` + Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` + Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` + Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` + VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` +} + +// UpdateVariable updates a project's variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#update-a-variable +func (s *ProjectVariablesService) UpdateVariable(pid interface{}, key string, opt *UpdateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + v := new(ProjectVariable) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} + +// RemoveProjectVariableOptions represents the available RemoveVariable() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#delete-a-variable +type RemoveProjectVariableOptions struct { + Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` +} + +// RemoveVariable removes a project's variable. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_level_variables.html#delete-a-variable +func (s *ProjectVariablesService) RemoveVariable(pid interface{}, key string, opt *RemoveProjectVariableOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go b/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go new file mode 100644 index 0000000000..f46f36fa7e --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go @@ -0,0 +1,150 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ProjectVulnerabilitiesService handles communication with the projects +// vulnerabilities related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_vulnerabilities.html +type ProjectVulnerabilitiesService struct { + client *Client +} + +// Project represents a GitLab project vulnerability. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_vulnerabilities.html +type ProjectVulnerability struct { + AuthorID int `json:"author_id"` + Confidence string `json:"confidence"` + CreatedAt *time.Time `json:"created_at"` + Description string `json:"description"` + DismissedAt *time.Time `json:"dismissed_at"` + DismissedByID int `json:"dismissed_by_id"` + DueDate *time.Time `json:"due_date"` + Finding *Finding `json:"finding"` + ID int `json:"id"` + LastEditedAt *time.Time `json:"last_edited_at"` + LastEditedByID int `json:"last_edited_by_id"` + Project *Project `json:"project"` + ProjectDefaultBranch string `json:"project_default_branch"` + ReportType string `json:"report_type"` + ResolvedAt *time.Time `json:"resolved_at"` + ResolvedByID int `json:"resolved_by_id"` + ResolvedOnDefaultBranch bool `json:"resolved_on_default_branch"` + Severity string `json:"severity"` + StartDate *time.Time `json:"start_date"` + State string `json:"state"` + Title string `json:"title"` + UpdatedAt *time.Time `json:"updated_at"` + UpdatedByID int `json:"updated_by_id"` +} + +// Project represents a GitLab project vulnerability finding. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/project_vulnerabilities.html +type Finding struct { + Confidence string `json:"confidence"` + CreatedAt *time.Time `json:"created_at"` + ID int `json:"id"` + LocationFingerprint string `json:"location_fingerprint"` + MetadataVersion string `json:"metadata_version"` + Name string `json:"name"` + PrimaryIdentifierID int `json:"primary_identifier_id"` + ProjectFingerprint string `json:"project_fingerprint"` + ProjectID int `json:"project_id"` + RawMetadata string `json:"raw_metadata"` + ReportType string `json:"report_type"` + ScannerID int `json:"scanner_id"` + Severity string `json:"severity"` + UpdatedAt *time.Time `json:"updated_at"` + UUID string `json:"uuid"` + VulnerabilityID int `json:"vulnerability_id"` +} + +// ListProjectVulnerabilitiesOptions represents the available +// ListProjectVulnerabilities() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#list-project-vulnerabilities +type ListProjectVulnerabilitiesOptions struct { + ListOptions +} + +// ListProjectVulnerabilities gets a list of all project vulnerabilities. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#list-project-vulnerabilities +func (s *ProjectVulnerabilitiesService) ListProjectVulnerabilities(pid interface{}, opt *ListProjectVulnerabilitiesOptions, options ...RequestOptionFunc) ([]*ProjectVulnerability, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/vulnerabilities", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*ProjectVulnerability + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CreateVulnerabilityOptions represents the available CreateVulnerability() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#new-vulnerability +type CreateVulnerabilityOptions struct { + FindingID *int `url:"finding_id,omitempty" json:"finding_id,omitempty"` +} + +// CreateVulnerability creates a new vulnerability on the selected project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#new-vulnerability +func (s *ProjectVulnerabilitiesService) CreateVulnerability(pid interface{}, opt *CreateVulnerabilityOptions, options ...RequestOptionFunc) (*ProjectVulnerability, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/vulnerabilities", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(ProjectVulnerability) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go new file mode 100644 index 0000000000..cc23f265d0 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/projects.go @@ -0,0 +1,2263 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/hashicorp/go-retryablehttp" +) + +// ProjectsService handles communication with the repositories related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html +type ProjectsService struct { + client *Client +} + +// Project represents a GitLab project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html +type Project struct { + ID int `json:"id"` + Description string `json:"description"` + DefaultBranch string `json:"default_branch"` + Visibility VisibilityValue `json:"visibility"` + SSHURLToRepo string `json:"ssh_url_to_repo"` + HTTPURLToRepo string `json:"http_url_to_repo"` + WebURL string `json:"web_url"` + ReadmeURL string `json:"readme_url"` + TagList []string `json:"tag_list"` + Topics []string `json:"topics"` + Owner *User `json:"owner"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + IssuesEnabled bool `json:"issues_enabled"` + OpenIssuesCount int `json:"open_issues_count"` + MergeRequestsEnabled bool `json:"merge_requests_enabled"` + ApprovalsBeforeMerge int `json:"approvals_before_merge"` + JobsEnabled bool `json:"jobs_enabled"` + WikiEnabled bool `json:"wiki_enabled"` + SnippetsEnabled bool `json:"snippets_enabled"` + ResolveOutdatedDiffDiscussions bool `json:"resolve_outdated_diff_discussions"` + ContainerExpirationPolicy *ContainerExpirationPolicy `json:"container_expiration_policy,omitempty"` + ContainerRegistryEnabled bool `json:"container_registry_enabled"` + ContainerRegistryAccessLevel AccessControlValue `json:"container_registry_access_level"` + ContainerRegistryImagePrefix string `json:"container_registry_image_prefix,omitempty"` + CreatedAt *time.Time `json:"created_at,omitempty"` + LastActivityAt *time.Time `json:"last_activity_at,omitempty"` + CreatorID int `json:"creator_id"` + Namespace *ProjectNamespace `json:"namespace"` + Permissions *Permissions `json:"permissions"` + MarkedForDeletionAt *ISOTime `json:"marked_for_deletion_at"` + EmptyRepo bool `json:"empty_repo"` + Archived bool `json:"archived"` + AvatarURL string `json:"avatar_url"` + LicenseURL string `json:"license_url"` + License *ProjectLicense `json:"license"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + GroupRunnersEnabled bool `json:"group_runners_enabled"` + RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` + ForksCount int `json:"forks_count"` + StarCount int `json:"star_count"` + RunnersToken string `json:"runners_token"` + AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` + OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` + OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` + RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` + PreventMergeWithoutJiraIssue bool `json:"prevent_merge_without_jira_issue"` + PrintingMergeRequestLinkEnabled bool `json:"printing_merge_request_link_enabled"` + LFSEnabled bool `json:"lfs_enabled"` + RepositoryStorage string `json:"repository_storage"` + RequestAccessEnabled bool `json:"request_access_enabled"` + MergeMethod MergeMethodValue `json:"merge_method"` + CanCreateMergeRequestIn bool `json:"can_create_merge_request_in"` + ForkedFromProject *ForkParent `json:"forked_from_project"` + Mirror bool `json:"mirror"` + MirrorUserID int `json:"mirror_user_id"` + MirrorTriggerBuilds bool `json:"mirror_trigger_builds"` + OnlyMirrorProtectedBranches bool `json:"only_mirror_protected_branches"` + MirrorOverwritesDivergedBranches bool `json:"mirror_overwrites_diverged_branches"` + PackagesEnabled bool `json:"packages_enabled"` + ServiceDeskEnabled bool `json:"service_desk_enabled"` + ServiceDeskAddress string `json:"service_desk_address"` + IssuesAccessLevel AccessControlValue `json:"issues_access_level"` + ReleasesAccessLevel AccessControlValue `json:"releases_access_level,omitempty"` + RepositoryAccessLevel AccessControlValue `json:"repository_access_level"` + MergeRequestsAccessLevel AccessControlValue `json:"merge_requests_access_level"` + ForkingAccessLevel AccessControlValue `json:"forking_access_level"` + WikiAccessLevel AccessControlValue `json:"wiki_access_level"` + BuildsAccessLevel AccessControlValue `json:"builds_access_level"` + SnippetsAccessLevel AccessControlValue `json:"snippets_access_level"` + PagesAccessLevel AccessControlValue `json:"pages_access_level"` + OperationsAccessLevel AccessControlValue `json:"operations_access_level"` + AnalyticsAccessLevel AccessControlValue `json:"analytics_access_level"` + EnvironmentsAccessLevel AccessControlValue `json:"environments_access_level"` + FeatureFlagsAccessLevel AccessControlValue `json:"feature_flags_access_level"` + InfrastructureAccessLevel AccessControlValue `json:"infrastructure_access_level"` + MonitorAccessLevel AccessControlValue `json:"monitor_access_level"` + AutocloseReferencedIssues bool `json:"autoclose_referenced_issues"` + SuggestionCommitMessage string `json:"suggestion_commit_message"` + SquashOption SquashOptionValue `json:"squash_option"` + EnforceAuthChecksOnUploads bool `json:"enforce_auth_checks_on_uploads,omitempty"` + SharedWithGroups []struct { + GroupID int `json:"group_id"` + GroupName string `json:"group_name"` + GroupFullPath string `json:"group_full_path"` + GroupAccessLevel int `json:"group_access_level"` + } `json:"shared_with_groups"` + Statistics *Statistics `json:"statistics"` + Links *Links `json:"_links,omitempty"` + ImportURL string `json:"import_url"` + ImportType string `json:"import_type"` + ImportStatus string `json:"import_status"` + ImportError string `json:"import_error"` + CIDefaultGitDepth int `json:"ci_default_git_depth"` + CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` + CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` + CISeperateCache bool `json:"ci_separated_caches"` + CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` + CIOptInJWT bool `json:"ci_opt_in_jwt"` + CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` + CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` + PublicJobs bool `json:"public_jobs"` + BuildTimeout int `json:"build_timeout"` + AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` + CIConfigPath string `json:"ci_config_path"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + ComplianceFrameworks []string `json:"compliance_frameworks"` + BuildCoverageRegex string `json:"build_coverage_regex"` + IssuesTemplate string `json:"issues_template"` + MergeRequestsTemplate string `json:"merge_requests_template"` + IssueBranchTemplate string `json:"issue_branch_template"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + MergePipelinesEnabled bool `json:"merge_pipelines_enabled"` + MergeTrainsEnabled bool `json:"merge_trains_enabled"` + RestrictUserDefinedVariables bool `json:"restrict_user_defined_variables"` + MergeCommitTemplate string `json:"merge_commit_template"` + SquashCommitTemplate string `json:"squash_commit_template"` + AutoDevopsDeployStrategy string `json:"auto_devops_deploy_strategy"` + AutoDevopsEnabled bool `json:"auto_devops_enabled"` + BuildGitStrategy string `json:"build_git_strategy"` + EmailsEnabled bool `json:"emails_enabled"` + ExternalAuthorizationClassificationLabel string `json:"external_authorization_classification_label"` + RequirementsEnabled bool `json:"requirements_enabled"` + RequirementsAccessLevel AccessControlValue `json:"requirements_access_level"` + SecurityAndComplianceEnabled bool `json:"security_and_compliance_enabled"` + SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` + MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` + ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` + ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` + PreReceiveSecretDetectionEnabled bool `json:"pre_receive_secret_detection_enabled"` + + // Deprecated: Use EmailsEnabled instead + EmailsDisabled bool `json:"emails_disabled"` + // Deprecated: This parameter has been renamed to PublicJobs in GitLab 9.0. + PublicBuilds bool `json:"public_builds"` +} + +// BasicProject included in other service responses (such as todos). +type BasicProject struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` +} + +// ContainerExpirationPolicy represents the container expiration policy. +type ContainerExpirationPolicy struct { + Cadence string `json:"cadence"` + KeepN int `json:"keep_n"` + OlderThan string `json:"older_than"` + NameRegex string `json:"name_regex"` + NameRegexDelete string `json:"name_regex_delete"` + NameRegexKeep string `json:"name_regex_keep"` + Enabled bool `json:"enabled"` + NextRunAt *time.Time `json:"next_run_at"` +} + +// ForkParent represents the parent project when this is a fork. +type ForkParent struct { + ID int `json:"id"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + HTTPURLToRepo string `json:"http_url_to_repo"` + WebURL string `json:"web_url"` + RepositoryStorage string `json:"repository_storage"` +} + +// GroupAccess represents group access. +type GroupAccess struct { + AccessLevel AccessLevelValue `json:"access_level"` + NotificationLevel NotificationLevelValue `json:"notification_level"` +} + +// Links represents a project web links for self, issues, merge_requests, +// repo_branches, labels, events, members. +type Links struct { + Self string `json:"self"` + Issues string `json:"issues"` + MergeRequests string `json:"merge_requests"` + RepoBranches string `json:"repo_branches"` + Labels string `json:"labels"` + Events string `json:"events"` + Members string `json:"members"` + ClusterAgents string `json:"cluster_agents"` +} + +// Permissions represents permissions. +type Permissions struct { + ProjectAccess *ProjectAccess `json:"project_access"` + GroupAccess *GroupAccess `json:"group_access"` +} + +// ProjectAccess represents project access. +type ProjectAccess struct { + AccessLevel AccessLevelValue `json:"access_level"` + NotificationLevel NotificationLevelValue `json:"notification_level"` +} + +// ProjectLicense represent the license for a project. +type ProjectLicense struct { + Key string `json:"key"` + Name string `json:"name"` + Nickname string `json:"nickname"` + HTMLURL string `json:"html_url"` + SourceURL string `json:"source_url"` +} + +// ProjectNamespace represents a project namespace. +type ProjectNamespace struct { + ID int `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Kind string `json:"kind"` + FullPath string `json:"full_path"` + ParentID int `json:"parent_id"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` +} + +// Repository represents a repository. +type Repository struct { + Name string `json:"name"` + Description string `json:"description"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` + GitSSHURL string `json:"git_ssh_url"` + GitHTTPURL string `json:"git_http_url"` + Namespace string `json:"namespace"` + Visibility VisibilityValue `json:"visibility"` + PathWithNamespace string `json:"path_with_namespace"` + DefaultBranch string `json:"default_branch"` + Homepage string `json:"homepage"` + URL string `json:"url"` + SSHURL string `json:"ssh_url"` + HTTPURL string `json:"http_url"` +} + +// Statistics represents a statistics record for a group or project. +type Statistics struct { + CommitCount int64 `json:"commit_count"` + StorageSize int64 `json:"storage_size"` + RepositorySize int64 `json:"repository_size"` + WikiSize int64 `json:"wiki_size"` + LFSObjectsSize int64 `json:"lfs_objects_size"` + JobArtifactsSize int64 `json:"job_artifacts_size"` + PipelineArtifactsSize int64 `json:"pipeline_artifacts_size"` + PackagesSize int64 `json:"packages_size"` + SnippetsSize int64 `json:"snippets_size"` + UploadsSize int64 `json:"uploads_size"` + ContainerRegistrySize int64 `json:"container_registry_size"` +} + +func (s Project) String() string { + return Stringify(s) +} + +// ProjectApprovalRule represents a GitLab project approval rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules +type ProjectApprovalRule struct { + ID int `json:"id"` + Name string `json:"name"` + RuleType string `json:"rule_type"` + ReportType string `json:"report_type"` + EligibleApprovers []*BasicUser `json:"eligible_approvers"` + ApprovalsRequired int `json:"approvals_required"` + Users []*BasicUser `json:"users"` + Groups []*Group `json:"groups"` + ContainsHiddenGroups bool `json:"contains_hidden_groups"` + ProtectedBranches []*ProtectedBranch `json:"protected_branches"` + AppliesToAllProtectedBranches bool `json:"applies_to_all_protected_branches"` +} + +func (s ProjectApprovalRule) String() string { + return Stringify(s) +} + +// ListProjectsOptions represents the available ListProjects() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-all-projects +type ListProjectsOptions struct { + ListOptions + Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` + IDAfter *int `url:"id_after,omitempty" json:"id_after,omitempty"` + IDBefore *int `url:"id_before,omitempty" json:"id_before,omitempty"` + Imported *bool `url:"imported,omitempty" json:"imported,omitempty"` + IncludeHidden *bool `url:"include_hidden,omitempty" json:"include_hidden,omitempty"` + IncludePendingDelete *bool `url:"include_pending_delete,omitempty" json:"include_pending_delete,omitempty"` + LastActivityAfter *time.Time `url:"last_activity_after,omitempty" json:"last_activity_after,omitempty"` + LastActivityBefore *time.Time `url:"last_activity_before,omitempty" json:"last_activity_before,omitempty"` + Membership *bool `url:"membership,omitempty" json:"membership,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` + RepositoryChecksumFailed *bool `url:"repository_checksum_failed,omitempty" json:"repository_checksum_failed,omitempty"` + RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + SearchNamespaces *bool `url:"search_namespaces,omitempty" json:"search_namespaces,omitempty"` + Simple *bool `url:"simple,omitempty" json:"simple,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Starred *bool `url:"starred,omitempty" json:"starred,omitempty"` + Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` + Topic *string `url:"topic,omitempty" json:"topic,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + WikiChecksumFailed *bool `url:"wiki_checksum_failed,omitempty" json:"wiki_checksum_failed,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` + WithIssuesEnabled *bool `url:"with_issues_enabled,omitempty" json:"with_issues_enabled,omitempty"` + WithMergeRequestsEnabled *bool `url:"with_merge_requests_enabled,omitempty" json:"with_merge_requests_enabled,omitempty"` + WithProgrammingLanguage *string `url:"with_programming_language,omitempty" json:"with_programming_language,omitempty"` +} + +// ListProjects gets a list of projects accessible by the authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-all-projects +func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "projects", opt, options) + if err != nil { + return nil, nil, err + } + + var p []*Project + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ListUserProjects gets a list of projects for the given user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-user-projects +func (s *ProjectsService) ListUserProjects(uid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + user, err := parseID(uid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("users/%s/projects", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*Project + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ListUserContributedProjects gets a list of visible projects a given user has contributed to. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-projects-a-user-has-contributed-to +func (s *ProjectsService) ListUserContributedProjects(uid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + user, err := parseID(uid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("users/%s/contributed_projects", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*Project + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ListUserStarredProjects gets a list of projects starred by the given user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-projects-starred-by-a-user +func (s *ProjectsService) ListUserStarredProjects(uid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + user, err := parseID(uid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("users/%s/starred_projects", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*Project + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ProjectUser represents a GitLab project user. +type ProjectUser struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` +} + +// ListProjectUserOptions represents the available ListProjectsUsers() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#get-project-users +type ListProjectUserOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` +} + +// ListProjectsUsers gets a list of users for the given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-project-users +func (s *ProjectsService) ListProjectsUsers(pid interface{}, opt *ListProjectUserOptions, options ...RequestOptionFunc) ([]*ProjectUser, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/users", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*ProjectUser + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ProjectGroup represents a GitLab project group. +type ProjectGroup struct { + ID int `json:"id"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + FullName string `json:"full_name"` + FullPath string `json:"full_path"` +} + +// ListProjectGroupOptions represents the available ListProjectsGroups() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-a-projects-groups +type ListProjectGroupOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + SharedMinAccessLevel *AccessLevelValue `url:"shared_min_access_level,omitempty" json:"shared_min_access_level,omitempty"` + SharedVisiableOnly *bool `url:"shared_visible_only,omitempty" json:"shared_visible_only,omitempty"` + SkipGroups *[]int `url:"skip_groups,omitempty" json:"skip_groups,omitempty"` + WithShared *bool `url:"with_shared,omitempty" json:"with_shared,omitempty"` +} + +// ListProjectsGroups gets a list of groups for the given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-groups +func (s *ProjectsService) ListProjectsGroups(pid interface{}, opt *ListProjectGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*ProjectGroup + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ProjectLanguages is a map of strings because the response is arbitrary +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/projects.html#languages +type ProjectLanguages map[string]float32 + +// GetProjectLanguages gets a list of languages used by the project +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#languages +func (s *ProjectsService) GetProjectLanguages(pid interface{}, options ...RequestOptionFunc) (*ProjectLanguages, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/languages", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(ProjectLanguages) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetProjectOptions represents the available GetProject() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#get-single-project +type GetProjectOptions struct { + License *bool `url:"license,omitempty" json:"license,omitempty"` + Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// GetProject gets a specific project, identified by project ID or +// NAMESPACE/PROJECT_NAME, which is owned by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-single-project +func (s *ProjectsService) GetProject(pid interface{}, opt *GetProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CreateProjectOptions represents the available CreateProject() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project +type CreateProjectOptions struct { + AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` + OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` + AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` + ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + AutoCancelPendingPipelines *string `url:"auto_cancel_pending_pipelines,omitempty" json:"auto_cancel_pending_pipelines,omitempty"` + AutoDevopsDeployStrategy *string `url:"auto_devops_deploy_strategy,omitempty" json:"auto_devops_deploy_strategy,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutocloseReferencedIssues *bool `url:"autoclose_referenced_issues,omitempty" json:"autoclose_referenced_issues,omitempty"` + Avatar *ProjectAvatar `url:"-" json:"-"` + BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` + BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` + BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` + BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` + CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` + ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` + ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` + ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` + ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` + GroupWithProjectTemplatesID *int `url:"group_with_project_templates_id,omitempty" json:"group_with_project_templates_id,omitempty"` + ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` + InitializeWithReadme *bool `url:"initialize_with_readme,omitempty" json:"initialize_with_readme,omitempty"` + IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` + IssueBranchTemplate *string `url:"issue_branch_template,omitempty" json:"issue_branch_template,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` + MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` + MergePipelinesEnabled *bool `url:"merge_pipelines_enabled,omitempty" json:"merge_pipelines_enabled,omitempty"` + MergeRequestsAccessLevel *AccessControlValue `url:"merge_requests_access_level,omitempty" json:"merge_requests_access_level,omitempty"` + MergeTrainsEnabled *bool `url:"merge_trains_enabled,omitempty" json:"merge_trains_enabled,omitempty"` + Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` + MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` + ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` + ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` + OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` + OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` + OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` + PackagesEnabled *bool `url:"packages_enabled,omitempty" json:"packages_enabled,omitempty"` + PagesAccessLevel *AccessControlValue `url:"pages_access_level,omitempty" json:"pages_access_level,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` + ReleasesAccessLevel *AccessControlValue `url:"releases_access_level,omitempty" json:"releases_access_level,omitempty"` + EnvironmentsAccessLevel *AccessControlValue `url:"environments_access_level,omitempty" json:"environments_access_level,omitempty"` + FeatureFlagsAccessLevel *AccessControlValue `url:"feature_flags_access_level,omitempty" json:"feature_flags_access_level,omitempty"` + InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` + MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` + RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` + PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` + RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` + RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + RequirementsAccessLevel *AccessControlValue `url:"requirements_access_level,omitempty" json:"requirements_access_level,omitempty"` + ResolveOutdatedDiffDiscussions *bool `url:"resolve_outdated_diff_discussions,omitempty" json:"resolve_outdated_diff_discussions,omitempty"` + SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` + ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` + SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` + SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` + SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` + SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` + TemplateName *string `url:"template_name,omitempty" json:"template_name,omitempty"` + TemplateProjectID *int `url:"template_project_id,omitempty" json:"template_project_id,omitempty"` + Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` + UseCustomTemplate *bool `url:"use_custom_template,omitempty" json:"use_custom_template,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + + // Deprecated: No longer supported in recent versions. + CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` + // Deprecated: Use ContainerRegistryAccessLevel instead. + ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"` + // Deprecated: Use EmailsEnabled instead + EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + // Deprecated: Use IssuesAccessLevel instead. + IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` + // Deprecated: No longer supported in recent versions. + IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` + // Deprecated: Use BuildsAccessLevel instead. + JobsEnabled *bool `url:"jobs_enabled,omitempty" json:"jobs_enabled,omitempty"` + // Deprecated: Use MergeRequestsAccessLevel instead. + MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"` + // Deprecated: No longer supported in recent versions. + MergeRequestsTemplate *string `url:"merge_requests_template,omitempty" json:"merge_requests_template,omitempty"` + // Deprecated: No longer supported in recent versions. + ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` + // Deprecated: Use SnippetsAccessLevel instead. + SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"` + // Deprecated: Use Topics instead. (Deprecated in GitLab 14.0) + TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` + // Deprecated: Use WikiAccessLevel instead. + WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"` +} + +// ContainerExpirationPolicyAttributes represents the available container +// expiration policy attributes. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project +type ContainerExpirationPolicyAttributes struct { + Cadence *string `url:"cadence,omitempty" json:"cadence,omitempty"` + KeepN *int `url:"keep_n,omitempty" json:"keep_n,omitempty"` + OlderThan *string `url:"older_than,omitempty" json:"older_than,omitempty"` + NameRegexDelete *string `url:"name_regex_delete,omitempty" json:"name_regex_delete,omitempty"` + NameRegexKeep *string `url:"name_regex_keep,omitempty" json:"name_regex_keep,omitempty"` + Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` + + // Deprecated: Is replaced by NameRegexDelete and is internally hardwired to its value. + NameRegex *string `url:"name_regex,omitempty" json:"name_regex,omitempty"` +} + +// ProjectAvatar represents a GitLab project avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project +type ProjectAvatar struct { + Filename string + Image io.Reader +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *ProjectAvatar) MarshalJSON() ([]byte, error) { + if a.Filename == "" && a.Image == nil { + return []byte(`""`), nil + } + type alias ProjectAvatar + return json.Marshal((*alias)(a)) +} + +// CreateProject creates a new project owned by the authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project +func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { + if opt.ContainerExpirationPolicyAttributes != nil { + // This is needed to satisfy the API. Should be deleted + // when NameRegex is removed (it's now deprecated). + opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete + } + + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, "projects", opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + "projects", + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// CreateProjectForUserOptions represents the available CreateProjectForUser() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#create-project-for-user +type CreateProjectForUserOptions CreateProjectOptions + +// CreateProjectForUser creates a new project owned by the specified user. +// Available only for admins. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#create-project-for-user +func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUserOptions, options ...RequestOptionFunc) (*Project, *Response, error) { + if opt.ContainerExpirationPolicyAttributes != nil { + // This is needed to satisfy the API. Should be deleted + // when NameRegex is removed (it's now deprecated). + opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete + } + + var err error + var req *retryablehttp.Request + u := fmt.Sprintf("projects/user/%d", user) + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// EditProjectOptions represents the available EditProject() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#edit-project +type EditProjectOptions struct { + AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` + AllowPipelineTriggerApproveDeployment *bool `url:"allow_pipeline_trigger_approve_deployment,omitempty" json:"allow_pipeline_trigger_approve_deployment,omitempty"` + OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` + AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` + ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + AutoCancelPendingPipelines *string `url:"auto_cancel_pending_pipelines,omitempty" json:"auto_cancel_pending_pipelines,omitempty"` + AutoDevopsDeployStrategy *string `url:"auto_devops_deploy_strategy,omitempty" json:"auto_devops_deploy_strategy,omitempty"` + AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutocloseReferencedIssues *bool `url:"autoclose_referenced_issues,omitempty" json:"autoclose_referenced_issues,omitempty"` + Avatar *ProjectAvatar `url:"-" json:"avatar,omitempty"` + BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` + BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` + BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` + BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` + CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` + CIDefaultGitDepth *int `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` + CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` + CIForwardDeploymentRollbackAllowed *bool `url:"ci_forward_deployment_rollback_allowed,omitempty" json:"ci_forward_deployment_rollback_allowed,omitempty"` + CISeperateCache *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` + CIRestrictPipelineCancellationRole *AccessControlValue `url:"ci_restrict_pipeline_cancellation_role,omitempty" json:"ci_restrict_pipeline_cancellation_role,omitempty"` + ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` + ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` + DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` + EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` + ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` + ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` + ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` + IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` + IssueBranchTemplate *string `url:"issue_branch_template,omitempty" json:"issue_branch_template,omitempty"` + IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` + KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` + LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` + MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` + MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` + MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` + MergePipelinesEnabled *bool `url:"merge_pipelines_enabled,omitempty" json:"merge_pipelines_enabled,omitempty"` + MergeRequestsAccessLevel *AccessControlValue `url:"merge_requests_access_level,omitempty" json:"merge_requests_access_level,omitempty"` + MergeRequestsTemplate *string `url:"merge_requests_template,omitempty" json:"merge_requests_template,omitempty"` + MergeTrainsEnabled *bool `url:"merge_trains_enabled,omitempty" json:"merge_trains_enabled,omitempty"` + Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` + MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` + MirrorOverwritesDivergedBranches *bool `url:"mirror_overwrites_diverged_branches,omitempty" json:"mirror_overwrites_diverged_branches,omitempty"` + MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` + MirrorUserID *int `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` + ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` + ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` + OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` + OnlyMirrorProtectedBranches *bool `url:"only_mirror_protected_branches,omitempty" json:"only_mirror_protected_branches,omitempty"` + OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` + PackagesEnabled *bool `url:"packages_enabled,omitempty" json:"packages_enabled,omitempty"` + PagesAccessLevel *AccessControlValue `url:"pages_access_level,omitempty" json:"pages_access_level,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` + ReleasesAccessLevel *AccessControlValue `url:"releases_access_level,omitempty" json:"releases_access_level,omitempty"` + EnvironmentsAccessLevel *AccessControlValue `url:"environments_access_level,omitempty" json:"environments_access_level,omitempty"` + FeatureFlagsAccessLevel *AccessControlValue `url:"feature_flags_access_level,omitempty" json:"feature_flags_access_level,omitempty"` + InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` + MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` + RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` + PreventMergeWithoutJiraIssue *bool `url:"prevent_merge_without_jira_issue,omitempty" json:"prevent_merge_without_jira_issue,omitempty"` + PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` + RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` + RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` + RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` + RequirementsAccessLevel *AccessControlValue `url:"requirements_access_level,omitempty" json:"requirements_access_level,omitempty"` + ResolveOutdatedDiffDiscussions *bool `url:"resolve_outdated_diff_discussions,omitempty" json:"resolve_outdated_diff_discussions,omitempty"` + RestrictUserDefinedVariables *bool `url:"restrict_user_defined_variables,omitempty" json:"restrict_user_defined_variables,omitempty"` + SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` + ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` + ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` + SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` + SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` + SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` + SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` + Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` + + // Deprecated: Use ContainerRegistryAccessLevel instead. + ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"` + // Deprecated: Use EmailsEnabled instead + EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` + // Deprecated: Use IssuesAccessLevel instead. + IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` + // Deprecated: Use BuildsAccessLevel instead. + JobsEnabled *bool `url:"jobs_enabled,omitempty" json:"jobs_enabled,omitempty"` + // Deprecated: Use MergeRequestsAccessLevel instead. + MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"` + // Deprecated: Use SnippetsAccessLevel instead. + SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"` + // Deprecated: Use Topics instead. (Deprecated in GitLab 14.0) + TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` + // Deprecated: Use WikiAccessLevel instead. + WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"` +} + +// EditProject updates an existing project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#edit-project +func (s *ProjectsService) EditProject(pid interface{}, opt *EditProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { + if opt.ContainerExpirationPolicyAttributes != nil { + // This is needed to satisfy the API. Should be deleted + // when NameRegex is removed (it's now deprecated). + opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete + } + + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s", PathEscape(project)) + + var req *retryablehttp.Request + + if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { + req, err = s.client.NewRequest(http.MethodPut, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPut, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ForkProjectOptions represents the available ForkProject() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#fork-project +type ForkProjectOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` + NamespacePath *string `url:"namespace_path,omitempty" json:"namespace_path,omitempty"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + + // Deprecated: This parameter has been split into NamespaceID and NamespacePath. + Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` +} + +// ForkProject forks a project into the user namespace of the authenticated +// user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#fork-project +func (s *ProjectsService) ForkProject(pid interface{}, opt *ForkProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/fork", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// StarProject stars a given the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#star-a-project +func (s *ProjectsService) StarProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/star", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ListProjectInvidedGroupOptions represents the available +// ListProjectsInvitedGroups() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +type ListProjectInvidedGroupOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` + MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` + Relation *[]string `url:"relation,omitempty" json:"relation,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// ListProjectsInvitedGroups lists invited groups of a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups +func (s *ProjectsService) ListProjectsInvitedGroups(pid interface{}, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pg []*ProjectGroup + resp, err := s.client.Do(req, &pg) + if err != nil { + return nil, resp, err + } + + return pg, resp, nil +} + +// UnstarProject unstars a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#unstar-a-project +func (s *ProjectsService) UnstarProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/unstar", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ArchiveProject archives the project if the user is either admin or the +// project owner of this project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#archive-a-project +func (s *ProjectsService) ArchiveProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/archive", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// UnarchiveProject unarchives the project if the user is either admin or +// the project owner of this project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#unarchive-a-project +func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/unarchive", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// DeleteProject removes a project including all associated resources +// (issues, merge requests etc.) +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#delete-project +func (s *ProjectsService) DeleteProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ShareWithGroupOptions represents options to share project with groups +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#share-project-with-group +type ShareWithGroupOptions struct { + ExpiresAt *string `url:"expires_at" json:"expires_at"` + GroupAccess *AccessLevelValue `url:"group_access" json:"group_access"` + GroupID *int `url:"group_id" json:"group_id"` +} + +// ShareProjectWithGroup allows to share a project with a group. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#share-project-with-group +func (s *ProjectsService) ShareProjectWithGroup(pid interface{}, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/share", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteSharedProjectFromGroup allows to unshare a project from a group. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#delete-a-shared-project-link-within-a-group +func (s *ProjectsService) DeleteSharedProjectFromGroup(pid interface{}, groupID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/share/%d", PathEscape(project), groupID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ProjectMember represents a project member. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project +type ProjectMember struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + AccessLevel AccessLevelValue `json:"access_level"` + WebURL string `json:"web_url"` + AvatarURL string `json:"avatar_url"` +} + +// HookCustomHeader represents a project or group hook custom header +// Note: "Key" is returned from the Get operation, but "Value" is not +// The List operation doesn't return any headers at all for Projects, +// but does return headers for Groups +type HookCustomHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// ProjectHook represents a project hook. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-project-hooks +type ProjectHook struct { + ID int `json:"id"` + URL string `json:"url"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + ProjectID int `json:"project_id"` + PushEvents bool `json:"push_events"` + PushEventsBranchFilter string `json:"push_events_branch_filter"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + JobEvents bool `json:"job_events"` + PipelineEvents bool `json:"pipeline_events"` + WikiPageEvents bool `json:"wiki_page_events"` + DeploymentEvents bool `json:"deployment_events"` + ReleasesEvents bool `json:"releases_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + CreatedAt *time.Time `json:"created_at"` + ResourceAccessTokenEvents bool `json:"resource_access_token_events"` + CustomWebhookTemplate string `json:"custom_webhook_template"` + CustomHeaders []*HookCustomHeader `json:"custom_headers"` +} + +// ListProjectHooksOptions represents the available ListProjectHooks() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-project-hooks +type ListProjectHooksOptions ListOptions + +// ListProjectHooks gets a list of project hooks. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-project-hooks +func (s *ProjectsService) ListProjectHooks(pid interface{}, opt *ListProjectHooksOptions, options ...RequestOptionFunc) ([]*ProjectHook, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/hooks", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ph []*ProjectHook + resp, err := s.client.Do(req, &ph) + if err != nil { + return nil, resp, err + } + + return ph, resp, nil +} + +// GetProjectHook gets a specific hook for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-project-hook +func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ph := new(ProjectHook) + resp, err := s.client.Do(req, ph) + if err != nil { + return nil, resp, err + } + + return ph, resp, nil +} + +// AddProjectHookOptions represents the available AddProjectHook() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#add-project-hook +type AddProjectHookOptions struct { + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// AddProjectHook adds a hook to a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#add-project-hook +func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/hooks", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ph := new(ProjectHook) + resp, err := s.client.Do(req, ph) + if err != nil { + return nil, resp, err + } + + return ph, resp, nil +} + +// EditProjectHookOptions represents the available EditProjectHook() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#edit-project-hook +type EditProjectHookOptions struct { + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` + ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` + CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` + CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` +} + +// EditProjectHook edits a hook for a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#edit-project-hook +func (s *ProjectsService) EditProjectHook(pid interface{}, hook int, opt *EditProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ph := new(ProjectHook) + resp, err := s.client.Do(req, ph) + if err != nil { + return nil, resp, err + } + + return ph, resp, nil +} + +// DeleteProjectHook removes a hook from a project. This is an idempotent +// method and can be called multiple times. Either the hook is available or not. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-project-hook +func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// TriggerTestProjectHook Trigger a test hook for a specified project. +// +// In GitLab 17.0 and later, this endpoint has a special rate limit. +// In GitLab 17.0 the rate was three requests per minute for each project hook. +// In GitLab 17.1 this was changed to five requests per minute for each project +// and authenticated user. +// +// To disable this limit on self-managed GitLab and GitLab Dedicated, +// an administrator can disable the feature flag named web_hook_test_api_endpoint_rate_limit. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#trigger-a-test-project-hook +func (s *ProjectsService) TriggerTestProjectHook(pid interface{}, hook int, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/test/%s", PathEscape(project), hook, string(event)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetHookCustomHeaderOptions represents a project or group hook custom header. +// If the header isn't present, it will be created. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header +type SetHookCustomHeaderOptions struct { + Value *string `json:"value,omitempty"` +} + +// SetProjectCustomHeader creates or updates a project custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header +func (s *ProjectsService) SetProjectCustomHeader(pid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteProjectCustomHeader deletes a project custom webhook header. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-a-custom-header +func (s *ProjectsService) DeleteProjectCustomHeader(pid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ProjectForkRelation represents a project fork relationship. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#admin-fork-relation +type ProjectForkRelation struct { + ID int `json:"id"` + ForkedToProjectID int `json:"forked_to_project_id"` + ForkedFromProjectID int `json:"forked_from_project_id"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// CreateProjectForkRelation creates a forked from/to relation between +// existing projects. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#create-a-forked-fromto-relation-between-existing-projects. +func (s *ProjectsService) CreateProjectForkRelation(pid interface{}, fork int, options ...RequestOptionFunc) (*ProjectForkRelation, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/fork/%d", PathEscape(project), fork) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + pfr := new(ProjectForkRelation) + resp, err := s.client.Do(req, pfr) + if err != nil { + return nil, resp, err + } + + return pfr, resp, nil +} + +// DeleteProjectForkRelation deletes an existing forked from relationship. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-an-existing-forked-from-relationship +func (s *ProjectsService) DeleteProjectForkRelation(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/fork", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ProjectFile represents an uploaded project file. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#upload-a-file +type ProjectFile struct { + Alt string `json:"alt"` + URL string `json:"url"` + FullPath string `json:"full_path"` + Markdown string `json:"markdown"` +} + +// UploadFile uploads a file. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#upload-a-file +func (s *ProjectsService) UploadFile(pid interface{}, content io.Reader, filename string, options ...RequestOptionFunc) (*ProjectFile, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/uploads", PathEscape(project)) + + req, err := s.client.UploadRequest( + http.MethodPost, + u, + content, + filename, + UploadFile, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + pf := new(ProjectFile) + resp, err := s.client.Do(req, pf) + if err != nil { + return nil, resp, err + } + + return pf, resp, nil +} + +// UploadAvatar uploads an avatar. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#upload-a-project-avatar +func (s *ProjectsService) UploadAvatar(pid interface{}, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s", PathEscape(project)) + + req, err := s.client.UploadRequest( + http.MethodPut, + u, + avatar, + filename, + UploadAvatar, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ListProjectForks gets a list of project forks. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#list-forks-of-a-project +func (s *ProjectsService) ListProjectForks(pid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/forks", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var forks []*Project + resp, err := s.client.Do(req, &forks) + if err != nil { + return nil, resp, err + } + + return forks, resp, nil +} + +// ProjectPushRules represents a project push rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#push-rules +type ProjectPushRules struct { + ID int `json:"id"` + ProjectID int `json:"project_id"` + CommitMessageRegex string `json:"commit_message_regex"` + CommitMessageNegativeRegex string `json:"commit_message_negative_regex"` + BranchNameRegex string `json:"branch_name_regex"` + DenyDeleteTag bool `json:"deny_delete_tag"` + CreatedAt *time.Time `json:"created_at"` + MemberCheck bool `json:"member_check"` + PreventSecrets bool `json:"prevent_secrets"` + AuthorEmailRegex string `json:"author_email_regex"` + FileNameRegex string `json:"file_name_regex"` + MaxFileSize int `json:"max_file_size"` + CommitCommitterCheck bool `json:"commit_committer_check"` + CommitCommitterNameCheck bool `json:"commit_committer_name_check"` + RejectUnsignedCommits bool `json:"reject_unsigned_commits"` + RejectNonDCOCommits bool `json:"reject_non_dco_commits"` +} + +// GetProjectPushRules gets the push rules of a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-project-push-rules +func (s *ProjectsService) GetProjectPushRules(pid interface{}, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ppr := new(ProjectPushRules) + resp, err := s.client.Do(req, ppr) + if err != nil { + return nil, resp, err + } + + return ppr, resp, nil +} + +// AddProjectPushRuleOptions represents the available AddProjectPushRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#add-project-push-rule +type AddProjectPushRuleOptions struct { + AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` + BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` + CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` + CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` + CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` + DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` + FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` + MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` + PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` + RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` +} + +// AddProjectPushRule adds a push rule to a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#add-project-push-rule +func (s *ProjectsService) AddProjectPushRule(pid interface{}, opt *AddProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + ppr := new(ProjectPushRules) + resp, err := s.client.Do(req, ppr) + if err != nil { + return nil, resp, err + } + + return ppr, resp, nil +} + +// EditProjectPushRuleOptions represents the available EditProjectPushRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#edit-project-push-rule +type EditProjectPushRuleOptions struct { + AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` + BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` + CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` + CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` + CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` + CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` + DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` + FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` + MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` + MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` + PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` + RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` + RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` +} + +// EditProjectPushRule edits a push rule for a specified project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#edit-project-push-rule +func (s *ProjectsService) EditProjectPushRule(pid interface{}, opt *EditProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ppr := new(ProjectPushRules) + resp, err := s.client.Do(req, ppr) + if err != nil { + return nil, resp, err + } + + return ppr, resp, nil +} + +// DeleteProjectPushRule removes a push rule from a project. This is an +// idempotent method and can be called multiple times. Either the push rule is +// available or not. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#delete-project-push-rule +func (s *ProjectsService) DeleteProjectPushRule(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ProjectApprovals represents GitLab project level merge request approvals. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#project-level-mr-approvals +type ProjectApprovals struct { + Approvers []*MergeRequestApproverUser `json:"approvers"` + ApproverGroups []*MergeRequestApproverGroup `json:"approver_groups"` + ApprovalsBeforeMerge int `json:"approvals_before_merge"` + ResetApprovalsOnPush bool `json:"reset_approvals_on_push"` + DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` + MergeRequestsAuthorApproval bool `json:"merge_requests_author_approval"` + MergeRequestsDisableCommittersApproval bool `json:"merge_requests_disable_committers_approval"` + RequirePasswordToApprove bool `json:"require_password_to_approve"` + SelectiveCodeOwnerRemovals bool `json:"selective_code_owner_removals,omitempty"` +} + +// GetApprovalConfiguration get the approval configuration for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-configuration +func (s *ProjectsService) GetApprovalConfiguration(pid interface{}, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approvals", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pa := new(ProjectApprovals) + resp, err := s.client.Do(req, pa) + if err != nil { + return nil, resp, err + } + + return pa, resp, nil +} + +// ChangeApprovalConfigurationOptions represents the available +// ApprovalConfiguration() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-configuration +type ChangeApprovalConfigurationOptions struct { + ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` + DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` + MergeRequestsAuthorApproval *bool `url:"merge_requests_author_approval,omitempty" json:"merge_requests_author_approval,omitempty"` + MergeRequestsDisableCommittersApproval *bool `url:"merge_requests_disable_committers_approval,omitempty" json:"merge_requests_disable_committers_approval,omitempty"` + RequirePasswordToApprove *bool `url:"require_password_to_approve,omitempty" json:"require_password_to_approve,omitempty"` + ResetApprovalsOnPush *bool `url:"reset_approvals_on_push,omitempty" json:"reset_approvals_on_push,omitempty"` + SelectiveCodeOwnerRemovals *bool `url:"selective_code_owner_removals,omitempty" json:"selective_code_owner_removals,omitempty"` +} + +// ChangeApprovalConfiguration updates the approval configuration for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-configuration +func (s *ProjectsService) ChangeApprovalConfiguration(pid interface{}, opt *ChangeApprovalConfigurationOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approvals", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pa := new(ProjectApprovals) + resp, err := s.client.Do(req, pa) + if err != nil { + return nil, resp, err + } + + return pa, resp, nil +} + +// GetProjectApprovalRulesListsOptions represents the available GetProjectApprovalRules() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules +type GetProjectApprovalRulesListsOptions ListOptions + +// GetProjectApprovalRules looks up the list of project level approver rules. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules +func (s *ProjectsService) GetProjectApprovalRules(pid interface{}, opt *GetProjectApprovalRulesListsOptions, options ...RequestOptionFunc) ([]*ProjectApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approval_rules", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var par []*ProjectApprovalRule + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// GetProjectApprovalRule gets the project level approvers. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-a-single-project-level-rule +func (s *ProjectsService) GetProjectApprovalRule(pid interface{}, ruleID int, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), ruleID) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + par := new(ProjectApprovalRule) + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// CreateProjectLevelRuleOptions represents the available CreateProjectApprovalRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-project-level-rule +type CreateProjectLevelRuleOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + ReportType *string `url:"report_type,omitempty" json:"report_type,omitempty"` + RuleType *string `url:"rule_type,omitempty" json:"rule_type,omitempty"` + UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` + ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + AppliesToAllProtectedBranches *bool `url:"applies_to_all_protected_branches,omitempty" json:"applies_to_all_protected_branches,omitempty"` +} + +// CreateProjectApprovalRule creates a new project-level approval rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-project-level-rule +func (s *ProjectsService) CreateProjectApprovalRule(pid interface{}, opt *CreateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approval_rules", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + par := new(ProjectApprovalRule) + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// UpdateProjectLevelRuleOptions represents the available UpdateProjectApprovalRule() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-project-level-rule +type UpdateProjectLevelRuleOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` + UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` + GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` + ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` + AppliesToAllProtectedBranches *bool `url:"applies_to_all_protected_branches,omitempty" json:"applies_to_all_protected_branches,omitempty"` +} + +// UpdateProjectApprovalRule updates an existing approval rule with new options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-project-level-rule +func (s *ProjectsService) UpdateProjectApprovalRule(pid interface{}, approvalRule int, opt *UpdateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), approvalRule) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + par := new(ProjectApprovalRule) + resp, err := s.client.Do(req, &par) + if err != nil { + return nil, resp, err + } + + return par, resp, nil +} + +// DeleteProjectApprovalRule deletes a project-level approval rule. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#delete-project-level-rule +func (s *ProjectsService) DeleteProjectApprovalRule(pid interface{}, approvalRule int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), approvalRule) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ChangeAllowedApproversOptions represents the available ChangeAllowedApprovers() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers +type ChangeAllowedApproversOptions struct { + ApproverGroupIDs *[]int `url:"approver_group_ids,omitempty" json:"approver_group_ids,omitempty"` + ApproverIDs *[]int `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` +} + +// ChangeAllowedApprovers updates the list of approvers and approver groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers +func (s *ProjectsService) ChangeAllowedApprovers(pid interface{}, opt *ChangeAllowedApproversOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/approvers", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pa := new(ProjectApprovals) + resp, err := s.client.Do(req, pa) + if err != nil { + return nil, resp, err + } + + return pa, resp, nil +} + +// ProjectPullMirrorDetails represent the details of the configuration pull +// mirror and its update status. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-a-projects-pull-mirror-details +type ProjectPullMirrorDetails struct { + ID int `json:"id"` + LastError string `json:"last_error"` + LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` + LastUpdateAt *time.Time `json:"last_update_at"` + LastUpdateStartedAt *time.Time `json:"last_update_started_at"` + UpdateStatus string `json:"update_status"` + URL string `json:"url"` +} + +// GetProjectPullMirrorDetails returns the pull mirror details. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-a-projects-pull-mirror-details +func (s *ProjectsService) GetProjectPullMirrorDetails(pid interface{}, options ...RequestOptionFunc) (*ProjectPullMirrorDetails, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pmd := new(ProjectPullMirrorDetails) + resp, err := s.client.Do(req, pmd) + if err != nil { + return nil, resp, err + } + + return pmd, resp, nil +} + +// StartMirroringProject start the pull mirroring process for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#start-the-pull-mirroring-process-for-a-project +func (s *ProjectsService) StartMirroringProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// TransferProjectOptions represents the available TransferProject() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace +type TransferProjectOptions struct { + Namespace interface{} `url:"namespace,omitempty" json:"namespace,omitempty"` +} + +// TransferProject transfer a project into the specified namespace +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace +func (s *ProjectsService) TransferProject(pid interface{}, opt *TransferProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/transfer", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(Project) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// StartHousekeepingProject start the Housekeeping task for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#start-the-housekeeping-task-for-a-project +func (s *ProjectsService) StartHousekeepingProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/housekeeping", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GetRepositoryStorage Get the path to repository storage. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/projects.html#get-the-path-to-repository-storage +type ProjectReposityStorage struct { + ProjectID int `json:"project_id"` + DiskPath string `json:"disk_path"` + CreatedAt *time.Time `json:"created_at"` + RepositoryStorage string `json:"repository_storage"` +} + +func (s *ProjectsService) GetRepositoryStorage(pid interface{}, options ...RequestOptionFunc) (*ProjectReposityStorage, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/storage", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + prs := new(ProjectReposityStorage) + resp, err := s.client.Do(req, prs) + if err != nil { + return nil, resp, err + } + + return prs, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/protected_branches.go b/vendor/github.com/xanzy/go-gitlab/protected_branches.go new file mode 100644 index 0000000000..d13f57a608 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/protected_branches.go @@ -0,0 +1,257 @@ +// +// Copyright 2021, Sander van Harmelen, Michael Lihs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// ProtectedBranchesService handles communication with the protected branch +// related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html +type ProtectedBranchesService struct { + client *Client +} + +// ProtectedBranch represents a protected branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches +type ProtectedBranch struct { + ID int `json:"id"` + Name string `json:"name"` + PushAccessLevels []*BranchAccessDescription `json:"push_access_levels"` + MergeAccessLevels []*BranchAccessDescription `json:"merge_access_levels"` + UnprotectAccessLevels []*BranchAccessDescription `json:"unprotect_access_levels"` + AllowForcePush bool `json:"allow_force_push"` + CodeOwnerApprovalRequired bool `json:"code_owner_approval_required"` +} + +// BranchAccessDescription represents the access description for a protected +// branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches +type BranchAccessDescription struct { + ID int `json:"id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` + UserID int `json:"user_id"` + GroupID int `json:"group_id"` +} + +// ListProtectedBranchesOptions represents the available ListProtectedBranches() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches +type ListProtectedBranchesOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` +} + +// ListProtectedBranches gets a list of protected branches from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches +func (s *ProtectedBranchesService) ListProtectedBranches(pid interface{}, opt *ListProtectedBranchesOptions, options ...RequestOptionFunc) ([]*ProtectedBranch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_branches", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var p []*ProtectedBranch + resp, err := s.client.Do(req, &p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// GetProtectedBranch gets a single protected branch or wildcard protected branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#get-a-single-protected-branch-or-wildcard-protected-branch +func (s *ProtectedBranchesService) GetProtectedBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + p := new(ProtectedBranch) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// ProtectRepositoryBranchesOptions represents the available +// ProtectRepositoryBranches() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#protect-repository-branches +type ProtectRepositoryBranchesOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + PushAccessLevel *AccessLevelValue `url:"push_access_level,omitempty" json:"push_access_level,omitempty"` + MergeAccessLevel *AccessLevelValue `url:"merge_access_level,omitempty" json:"merge_access_level,omitempty"` + UnprotectAccessLevel *AccessLevelValue `url:"unprotect_access_level,omitempty" json:"unprotect_access_level,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToPush *[]*BranchPermissionOptions `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowedToMerge *[]*BranchPermissionOptions `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + AllowedToUnprotect *[]*BranchPermissionOptions `url:"allowed_to_unprotect,omitempty" json:"allowed_to_unprotect,omitempty"` + CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` +} + +// BranchPermissionOptions represents a branch permission option. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#protect-repository-branches +type BranchPermissionOptions struct { + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + DeployKeyID *int `url:"deploy_key_id,omitempty" json:"deploy_key_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` +} + +// ProtectRepositoryBranches protects a single repository branch or several +// project repository branches using a wildcard protected branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#protect-repository-branches +func (s *ProtectedBranchesService) ProtectRepositoryBranches(pid interface{}, opt *ProtectRepositoryBranchesOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_branches", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(ProtectedBranch) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// UnprotectRepositoryBranches unprotects the given protected branch or wildcard +// protected branch. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#unprotect-repository-branches +func (s *ProtectedBranchesService) UnprotectRepositoryBranches(pid interface{}, branch string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UpdateProtectedBranchOptions represents the available +// UpdateProtectedBranch() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch +type UpdateProtectedBranchOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` + AllowedToPush *[]*BranchPermissionOptions `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowedToMerge *[]*BranchPermissionOptions `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + AllowedToUnprotect *[]*BranchPermissionOptions `url:"allowed_to_unprotect,omitempty" json:"allowed_to_unprotect,omitempty"` +} + +// UpdateProtectedBranch updates a protected branch. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch +func (s *ProtectedBranchesService) UpdateProtectedBranch(pid interface{}, branch string, opt *UpdateProtectedBranchOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) + + req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) + if err != nil { + return nil, nil, err + } + + p := new(ProtectedBranch) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// RequireCodeOwnerApprovalsOptions represents the available +// RequireCodeOwnerApprovals() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch +type RequireCodeOwnerApprovalsOptions struct { + CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` +} + +// RequireCodeOwnerApprovals updates the code owner approval option. +// +// Deprecated: Use UpdateProtectedBranch() instead. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch +func (s *ProtectedBranchesService) RequireCodeOwnerApprovals(pid interface{}, branch string, opt *RequireCodeOwnerApprovalsOptions, options ...RequestOptionFunc) (*Response, error) { + updateOptions := &UpdateProtectedBranchOptions{ + CodeOwnerApprovalRequired: opt.CodeOwnerApprovalRequired, + } + _, req, err := s.UpdateProtectedBranch(pid, branch, updateOptions, options...) + return req, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/protected_environments.go b/vendor/github.com/xanzy/go-gitlab/protected_environments.go new file mode 100644 index 0000000000..50ee31173a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/protected_environments.go @@ -0,0 +1,282 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ProtectedEnvironmentsService handles communication with the protected +// environment methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html +type ProtectedEnvironmentsService struct { + client *Client +} + +// ProtectedEnvironment represents a protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html +type ProtectedEnvironment struct { + Name string `json:"name"` + DeployAccessLevels []*EnvironmentAccessDescription `json:"deploy_access_levels"` + RequiredApprovalCount int `json:"required_approval_count"` + ApprovalRules []*EnvironmentApprovalRule `json:"approval_rules"` +} + +// EnvironmentAccessDescription represents the access decription for a protected +// environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html +type EnvironmentAccessDescription struct { + ID int `json:"id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` + UserID int `json:"user_id"` + GroupID int `json:"group_id"` + GroupInheritanceType int `json:"group_inheritance_type"` +} + +// EnvironmentApprovalRule represents the approval rules for a protected +// environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment +type EnvironmentApprovalRule struct { + ID int `json:"id"` + UserID int `json:"user_id"` + GroupID int `json:"group_id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` + RequiredApprovalCount int `json:"required_approvals"` + GroupInheritanceType int `json:"group_inheritance_type"` +} + +// ListProtectedEnvironmentsOptions represents the available +// ListProtectedEnvironments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#list-protected-environments +type ListProtectedEnvironmentsOptions ListOptions + +// ListProtectedEnvironments returns a list of protected environments from a +// project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#list-protected-environments +func (s *ProtectedEnvironmentsService) ListProtectedEnvironments(pid interface{}, opt *ListProtectedEnvironmentsOptions, options ...RequestOptionFunc) ([]*ProtectedEnvironment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_environments", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pes []*ProtectedEnvironment + resp, err := s.client.Do(req, &pes) + if err != nil { + return nil, resp, err + } + + return pes, resp, nil +} + +// GetProtectedEnvironment returns a single protected environment or wildcard +// protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#get-a-single-protected-environment +func (s *ProtectedEnvironmentsService) GetProtectedEnvironment(pid interface{}, environment string, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pe := new(ProtectedEnvironment) + resp, err := s.client.Do(req, pe) + if err != nil { + return nil, resp, err + } + + return pe, resp, nil +} + +// ProtectRepositoryEnvironmentsOptions represents the available +// ProtectRepositoryEnvironments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment +type ProtectRepositoryEnvironmentsOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + DeployAccessLevels *[]*EnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` + RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + ApprovalRules *[]*EnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` +} + +// EnvironmentAccessOptions represents the options for an access decription for +// a protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment +type EnvironmentAccessOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` +} + +// EnvironmentApprovalRuleOptions represents the approval rules for a protected +// environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment +type EnvironmentApprovalRuleOptions struct { + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` + RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` +} + +// ProtectRepositoryEnvironments protects a single repository environment or +// several project repository environments using wildcard protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment +func (s *ProtectedEnvironmentsService) ProtectRepositoryEnvironments(pid interface{}, opt *ProtectRepositoryEnvironmentsOptions, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_environments", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pe := new(ProtectedEnvironment) + resp, err := s.client.Do(req, pe) + if err != nil { + return nil, resp, err + } + + return pe, resp, nil +} + +// UpdateProtectedEnvironmentsOptions represents the available +// UpdateProtectedEnvironments() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment +type UpdateProtectedEnvironmentsOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + DeployAccessLevels *[]*UpdateEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` + RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` + ApprovalRules *[]*UpdateEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` +} + +// UpdateEnvironmentAccessOptions represents the options for updates to an +// access decription for a protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment +type UpdateEnvironmentAccessOptions struct { + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` +} + +// UpdateEnvironmentApprovalRuleOptions represents the updates to the approval +// rules for a protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment +type UpdateEnvironmentApprovalRuleOptions struct { + ID *int `url:"id,omitempty" json:"id,omitempty"` + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` + AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` + RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` + GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` + Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` +} + +// UpdateProtectedEnvironments updates a single repository environment or +// several project repository environments using wildcard protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment +func (s *ProtectedEnvironmentsService) UpdateProtectedEnvironments(pid interface{}, environment string, opt *UpdateProtectedEnvironmentsOptions, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + pe := new(ProtectedEnvironment) + resp, err := s.client.Do(req, pe) + if err != nil { + return nil, resp, err + } + + return pe, resp, nil +} + +// UnprotectEnvironment unprotects the given protected environment or wildcard +// protected environment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_environments.html#unprotect-a-single-environment +func (s *ProtectedEnvironmentsService) UnprotectEnvironment(pid interface{}, environment string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/protected_tags.go b/vendor/github.com/xanzy/go-gitlab/protected_tags.go new file mode 100644 index 0000000000..76e8ff4d2a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/protected_tags.go @@ -0,0 +1,176 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ProtectedTagsService handles communication with the protected tag methods +// of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html +type ProtectedTagsService struct { + client *Client +} + +// ProtectedTag represents a protected tag. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html +type ProtectedTag struct { + Name string `json:"name"` + CreateAccessLevels []*TagAccessDescription `json:"create_access_levels"` +} + +// TagAccessDescription reperesents the access decription for a protected tag. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html +type TagAccessDescription struct { + ID int `json:"id"` + UserID int `json:"user_id"` + GroupID int `json:"group_id"` + AccessLevel AccessLevelValue `json:"access_level"` + AccessLevelDescription string `json:"access_level_description"` +} + +// ListProtectedTagsOptions represents the available ListProtectedTags() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#list-protected-tags +type ListProtectedTagsOptions ListOptions + +// ListProtectedTags returns a list of protected tags from a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#list-protected-tags +func (s *ProtectedTagsService) ListProtectedTags(pid interface{}, opt *ListProtectedTagsOptions, options ...RequestOptionFunc) ([]*ProtectedTag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_tags", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var pts []*ProtectedTag + resp, err := s.client.Do(req, &pts) + if err != nil { + return nil, resp, err + } + + return pts, resp, nil +} + +// GetProtectedTag returns a single protected tag or wildcard protected tag. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#get-a-single-protected-tag-or-wildcard-protected-tag +func (s *ProtectedTagsService) GetProtectedTag(pid interface{}, tag string, options ...RequestOptionFunc) (*ProtectedTag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_tags/%s", PathEscape(project), PathEscape(tag)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + pt := new(ProtectedTag) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// ProtectRepositoryTagsOptions represents the available ProtectRepositoryTags() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#protect-repository-tags +type ProtectRepositoryTagsOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + CreateAccessLevel *AccessLevelValue `url:"create_access_level,omitempty" json:"create_access_level,omitempty"` + AllowedToCreate *[]*TagsPermissionOptions `url:"allowed_to_create,omitempty" json:"allowed_to_create,omitempty"` +} + +// TagsPermissionOptions represents a protected tag permission option. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#protect-repository-tags +type TagsPermissionOptions struct { + UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` +} + +// ProtectRepositoryTags protects a single repository tag or several project +// repository tags using a wildcard protected tag. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#protect-repository-tags +func (s *ProtectedTagsService) ProtectRepositoryTags(pid interface{}, opt *ProtectRepositoryTagsOptions, options ...RequestOptionFunc) (*ProtectedTag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/protected_tags", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + pt := new(ProtectedTag) + resp, err := s.client.Do(req, pt) + if err != nil { + return nil, resp, err + } + + return pt, resp, nil +} + +// UnprotectRepositoryTags unprotects the given protected tag or wildcard +// protected tag. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/protected_tags.html#unprotect-repository-tags +func (s *ProtectedTagsService) UnprotectRepositoryTags(pid interface{}, tag string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/protected_tags/%s", PathEscape(project), PathEscape(tag)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/releaselinks.go b/vendor/github.com/xanzy/go-gitlab/releaselinks.go new file mode 100644 index 0000000000..8cde15f512 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/releaselinks.go @@ -0,0 +1,201 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ReleaseLinksService handles communication with the release link methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html +type ReleaseLinksService struct { + client *Client +} + +// ReleaseLink represents a release link. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html +type ReleaseLink struct { + ID int `json:"id"` + Name string `json:"name"` + URL string `json:"url"` + DirectAssetURL string `json:"direct_asset_url"` + External bool `json:"external"` + LinkType LinkTypeValue `json:"link_type"` +} + +// ListReleaseLinksOptions represents ListReleaseLinks() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#list-links-of-a-release +type ListReleaseLinksOptions ListOptions + +// ListReleaseLinks gets assets as links from a Release. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#list-links-of-a-release +func (s *ReleaseLinksService) ListReleaseLinks(pid interface{}, tagName string, opt *ListReleaseLinksOptions, options ...RequestOptionFunc) ([]*ReleaseLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s/assets/links", PathEscape(project), PathEscape(tagName)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var rls []*ReleaseLink + resp, err := s.client.Do(req, &rls) + if err != nil { + return nil, resp, err + } + + return rls, resp, nil +} + +// GetReleaseLink returns a link from release assets. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#get-a-release-link +func (s *ReleaseLinksService) GetReleaseLink(pid interface{}, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", + PathEscape(project), + PathEscape(tagName), + link) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + rl := new(ReleaseLink) + resp, err := s.client.Do(req, rl) + if err != nil { + return nil, resp, err + } + + return rl, resp, nil +} + +// CreateReleaseLinkOptions represents CreateReleaseLink() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link +type CreateReleaseLinkOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` +} + +// CreateReleaseLink creates a link. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link +func (s *ReleaseLinksService) CreateReleaseLink(pid interface{}, tagName string, opt *CreateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s/assets/links", PathEscape(project), PathEscape(tagName)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + rl := new(ReleaseLink) + resp, err := s.client.Do(req, rl) + if err != nil { + return nil, resp, err + } + + return rl, resp, nil +} + +// UpdateReleaseLinkOptions represents UpdateReleaseLink() options. +// +// You have to specify at least one of Name of URL. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#update-a-release-link +type UpdateReleaseLinkOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` +} + +// UpdateReleaseLink updates an asset link. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#update-a-release-link +func (s *ReleaseLinksService) UpdateReleaseLink(pid interface{}, tagName string, link int, opt *UpdateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", + PathEscape(project), + PathEscape(tagName), + link) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + rl := new(ReleaseLink) + resp, err := s.client.Do(req, rl) + if err != nil { + return nil, resp, err + } + + return rl, resp, nil +} + +// DeleteReleaseLink deletes a link from release. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#delete-a-release-link +func (s *ReleaseLinksService) DeleteReleaseLink(pid interface{}, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", + PathEscape(project), + PathEscape(tagName), + link, + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, nil, err + } + + rl := new(ReleaseLink) + resp, err := s.client.Do(req, rl) + if err != nil { + return nil, resp, err + } + + return rl, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/releases.go b/vendor/github.com/xanzy/go-gitlab/releases.go new file mode 100644 index 0000000000..97cbff7bb9 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/releases.go @@ -0,0 +1,281 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ReleasesService handles communication with the releases methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/releases/index.html +type ReleasesService struct { + client *Client +} + +// Release represents a project release. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#list-releases +type Release struct { + TagName string `json:"tag_name"` + Name string `json:"name"` + Description string `json:"description"` + DescriptionHTML string `json:"description_html"` + CreatedAt *time.Time `json:"created_at"` + ReleasedAt *time.Time `json:"released_at"` + Author struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"author"` + Commit Commit `json:"commit"` + UpcomingRelease bool `json:"upcoming_release"` + CommitPath string `json:"commit_path"` + TagPath string `json:"tag_path"` + Assets struct { + Count int `json:"count"` + Sources []struct { + Format string `json:"format"` + URL string `json:"url"` + } `json:"sources"` + Links []*ReleaseLink `json:"links"` + } `json:"assets"` + Links struct { + ClosedIssueURL string `json:"closed_issues_url"` + ClosedMergeRequest string `json:"closed_merge_requests_url"` + EditURL string `json:"edit_url"` + MergedMergeRequest string `json:"merged_merge_requests_url"` + OpenedIssues string `json:"opened_issues_url"` + OpenedMergeRequest string `json:"opened_merge_requests_url"` + Self string `json:"self"` + } `json:"_links"` +} + +// ListReleasesOptions represents ListReleases() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#list-releases +type ListReleasesOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + IncludeHTMLDescription *bool `url:"include_html_description,omitempty" json:"include_html_description,omitempty"` +} + +// ListReleases gets a pagenated of releases accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#list-releases +func (s *ReleasesService) ListReleases(pid interface{}, opt *ListReleasesOptions, options ...RequestOptionFunc) ([]*Release, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var rs []*Release + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// GetRelease returns a single release, identified by a tag name. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#get-a-release-by-a-tag-name +func (s *ReleasesService) GetRelease(pid interface{}, tagName string, options ...RequestOptionFunc) (*Release, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + r := new(Release) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// GetLatestRelease returns the latest release for the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/#get-the-latest-release +func (s *ReleasesService) GetLatestRelease(pid interface{}, options ...RequestOptionFunc) (*Release, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/permalink/latest", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + r := new(Release) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, err +} + +// CreateReleaseOptions represents CreateRelease() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release +type CreateReleaseOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"` + TagMessage *string `url:"tag_message,omitempty" json:"tag_message,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Milestones *[]string `url:"milestones,omitempty" json:"milestones,omitempty"` + Assets *ReleaseAssetsOptions `url:"assets,omitempty" json:"assets,omitempty"` + ReleasedAt *time.Time `url:"released_at,omitempty" json:"released_at,omitempty"` +} + +// ReleaseAssetsOptions represents release assets in CreateRelease() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release +type ReleaseAssetsOptions struct { + Links []*ReleaseAssetLinkOptions `url:"links,omitempty" json:"links,omitempty"` +} + +// ReleaseAssetLinkOptions represents release asset link in CreateRelease() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release +type ReleaseAssetLinkOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + URL *string `url:"url,omitempty" json:"url,omitempty"` + FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` + DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` + LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` +} + +// CreateRelease creates a release. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release +func (s *ReleasesService) CreateRelease(pid interface{}, opts *CreateReleaseOptions, options ...RequestOptionFunc) (*Release, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opts, options) + if err != nil { + return nil, nil, err + } + + r := new(Release) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// UpdateReleaseOptions represents UpdateRelease() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#update-a-release +type UpdateReleaseOptions struct { + Name *string `url:"name" json:"name"` + Description *string `url:"description" json:"description"` + Milestones *[]string `url:"milestones,omitempty" json:"milestones,omitempty"` + ReleasedAt *time.Time `url:"released_at,omitempty" json:"released_at,omitempty"` +} + +// UpdateRelease updates a release. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#update-a-release +func (s *ReleasesService) UpdateRelease(pid interface{}, tagName string, opts *UpdateReleaseOptions, options ...RequestOptionFunc) (*Release, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) + + req, err := s.client.NewRequest(http.MethodPut, u, opts, options) + if err != nil { + return nil, nil, err + } + + r := new(Release) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// DeleteRelease deletes a release. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/index.html#delete-a-release +func (s *ReleasesService) DeleteRelease(pid interface{}, tagName string, options ...RequestOptionFunc) (*Release, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, nil, err + } + + r := new(Release) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/repositories.go b/vendor/github.com/xanzy/go-gitlab/repositories.go new file mode 100644 index 0000000000..dde8761749 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/repositories.go @@ -0,0 +1,421 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" +) + +// RepositoriesService handles communication with the repositories related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html +type RepositoriesService struct { + client *Client +} + +// TreeNode represents a GitLab repository file or directory. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html +type TreeNode struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Path string `json:"path"` + Mode string `json:"mode"` +} + +func (t TreeNode) String() string { + return Stringify(t) +} + +// ListTreeOptions represents the available ListTree() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#list-repository-tree +type ListTreeOptions struct { + ListOptions + Path *string `url:"path,omitempty" json:"path,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Recursive *bool `url:"recursive,omitempty" json:"recursive,omitempty"` +} + +// ListTree gets a list of repository files and directories in a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#list-repository-tree +func (s *RepositoriesService) ListTree(pid interface{}, opt *ListTreeOptions, options ...RequestOptionFunc) ([]*TreeNode, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/tree", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var t []*TreeNode + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// Blob gets information about blob in repository like size and content. Note +// that blob content is Base64 encoded. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#get-a-blob-from-repository +func (s *RepositoriesService) Blob(pid interface{}, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/blobs/%s", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} + +// RawBlobContent gets the raw file contents for a blob by blob SHA. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#raw-blob-content +func (s *RepositoriesService) RawBlobContent(pid interface{}, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/blobs/%s/raw", PathEscape(project), url.PathEscape(sha)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} + +// ArchiveOptions represents the available Archive() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#get-file-archive +type ArchiveOptions struct { + Format *string `url:"-" json:"-"` + Path *string `url:"path,omitempty" json:"path,omitempty"` + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` +} + +// Archive gets an archive of the repository. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#get-file-archive +func (s *RepositoriesService) Archive(pid interface{}, opt *ArchiveOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/archive", PathEscape(project)) + + // Set an optional format for the archive. + if opt != nil && opt.Format != nil { + u = fmt.Sprintf("%s.%s", u, *opt.Format) + } + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} + +// StreamArchive streams an archive of the repository to the provided +// io.Writer. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#get-file-archive +func (s *RepositoriesService) StreamArchive(pid interface{}, w io.Writer, opt *ArchiveOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/repository/archive", PathEscape(project)) + + // Set an optional format for the archive. + if opt != nil && opt.Format != nil { + u = fmt.Sprintf("%s.%s", u, *opt.Format) + } + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, w) +} + +// Compare represents the result of a comparison of branches, tags or commits. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits +type Compare struct { + Commit *Commit `json:"commit"` + Commits []*Commit `json:"commits"` + Diffs []*Diff `json:"diffs"` + CompareTimeout bool `json:"compare_timeout"` + CompareSameRef bool `json:"compare_same_ref"` +} + +func (c Compare) String() string { + return Stringify(c) +} + +// CompareOptions represents the available Compare() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits +type CompareOptions struct { + From *string `url:"from,omitempty" json:"from,omitempty"` + To *string `url:"to,omitempty" json:"to,omitempty"` + Straight *bool `url:"straight,omitempty" json:"straight,omitempty"` + Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` +} + +// Compare compares branches, tags or commits. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits +func (s *RepositoriesService) Compare(pid interface{}, opt *CompareOptions, options ...RequestOptionFunc) (*Compare, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/compare", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(Compare) + resp, err := s.client.Do(req, c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// Contributor represents a GitLap contributor. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html#contributors +type Contributor struct { + Name string `json:"name"` + Email string `json:"email"` + Commits int `json:"commits"` + Additions int `json:"additions"` + Deletions int `json:"deletions"` +} + +func (c Contributor) String() string { + return Stringify(c) +} + +// ListContributorsOptions represents the available ListContributors() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html#contributors +type ListContributorsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// Contributors gets the repository contributors list. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html#contributors +func (s *RepositoriesService) Contributors(pid interface{}, opt *ListContributorsOptions, options ...RequestOptionFunc) ([]*Contributor, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/contributors", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var c []*Contributor + resp, err := s.client.Do(req, &c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// MergeBaseOptions represents the available MergeBase() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#merge-base +type MergeBaseOptions struct { + Ref *[]string `url:"refs[],omitempty" json:"refs,omitempty"` +} + +// MergeBase gets the common ancestor for 2 refs (commit SHAs, branch +// names or tags). +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#merge-base +func (s *RepositoriesService) MergeBase(pid interface{}, opt *MergeBaseOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/merge_base", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + c := new(Commit) + resp, err := s.client.Do(req, c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} + +// AddChangelogOptions represents the available AddChangelog() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#add-changelog-data-to-a-changelog-file +type AddChangelogOptions struct { + Version *string `url:"version,omitempty" json:"version,omitempty"` + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + ConfigFile *string `url:"config_file,omitempty" json:"config_file,omitempty"` + Date *ISOTime `url:"date,omitempty" json:"date,omitempty"` + File *string `url:"file,omitempty" json:"file,omitempty"` + From *string `url:"from,omitempty" json:"from,omitempty"` + Message *string `url:"message,omitempty" json:"message,omitempty"` + To *string `url:"to,omitempty" json:"to,omitempty"` + Trailer *string `url:"trailer,omitempty" json:"trailer,omitempty"` +} + +// AddChangelog generates changelog data based on commits in a repository. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#add-changelog-data-to-a-changelog-file +func (s *RepositoriesService) AddChangelog(pid interface{}, opt *AddChangelogOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/repository/changelog", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ChangelogData represents the generated changelog data. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#generate-changelog-data +type ChangelogData struct { + Notes string `json:"notes"` +} + +func (c ChangelogData) String() string { + return Stringify(c) +} + +// GenerateChangelogDataOptions represents the available GenerateChangelogData() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#generate-changelog-data +type GenerateChangelogDataOptions struct { + Version *string `url:"version,omitempty" json:"version,omitempty"` + ConfigFile *string `url:"config_file,omitempty" json:"config_file,omitempty"` + Date *ISOTime `url:"date,omitempty" json:"date,omitempty"` + From *string `url:"from,omitempty" json:"from,omitempty"` + To *string `url:"to,omitempty" json:"to,omitempty"` + Trailer *string `url:"trailer,omitempty" json:"trailer,omitempty"` +} + +// GenerateChangelogData generates changelog data based on commits in a +// repository, without committing them to a changelog file. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/repositories.html#generate-changelog-data +func (s *RepositoriesService) GenerateChangelogData(pid interface{}, opt GenerateChangelogDataOptions, options ...RequestOptionFunc) (*ChangelogData, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/changelog", project) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + cd := new(ChangelogData) + resp, err := s.client.Do(req, cd) + if err != nil { + return nil, resp, err + } + + return cd, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/repository_files.go b/vendor/github.com/xanzy/go-gitlab/repository_files.go new file mode 100644 index 0000000000..7ffaa93b56 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/repository_files.go @@ -0,0 +1,385 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "net/http" + "strconv" + "time" +) + +// RepositoryFilesService handles communication with the repository files +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html +type RepositoryFilesService struct { + client *Client +} + +// File represents a GitLab repository file. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html +type File struct { + FileName string `json:"file_name"` + FilePath string `json:"file_path"` + Size int `json:"size"` + Encoding string `json:"encoding"` + Content string `json:"content"` + ExecuteFilemode bool `json:"execute_filemode"` + Ref string `json:"ref"` + BlobID string `json:"blob_id"` + CommitID string `json:"commit_id"` + SHA256 string `json:"content_sha256"` + LastCommitID string `json:"last_commit_id"` +} + +func (r File) String() string { + return Stringify(r) +} + +// GetFileOptions represents the available GetFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository +type GetFileOptions struct { + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +// GetFile allows you to receive information about a file in repository like +// name, size, content. Note that file content is Base64 encoded. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository +func (s *RepositoryFilesService) GetFile(pid interface{}, fileName string, opt *GetFileOptions, options ...RequestOptionFunc) (*File, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s", + PathEscape(project), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + f := new(File) + resp, err := s.client.Do(req, f) + if err != nil { + return nil, resp, err + } + + return f, resp, nil +} + +// GetFileMetaDataOptions represents the available GetFileMetaData() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository +type GetFileMetaDataOptions struct { + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +// GetFileMetaData allows you to receive meta information about a file in +// repository like name, size. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository +func (s *RepositoryFilesService) GetFileMetaData(pid interface{}, fileName string, opt *GetFileMetaDataOptions, options ...RequestOptionFunc) (*File, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s", + PathEscape(project), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodHead, u, opt, options) + if err != nil { + return nil, nil, err + } + + resp, err := s.client.Do(req, nil) + if err != nil { + return nil, resp, err + } + + f := &File{ + BlobID: resp.Header.Get("X-Gitlab-Blob-Id"), + CommitID: resp.Header.Get("X-Gitlab-Commit-Id"), + Encoding: resp.Header.Get("X-Gitlab-Encoding"), + FileName: resp.Header.Get("X-Gitlab-File-Name"), + FilePath: resp.Header.Get("X-Gitlab-File-Path"), + ExecuteFilemode: resp.Header.Get("X-Gitlab-Execute-Filemode") == "true", + Ref: resp.Header.Get("X-Gitlab-Ref"), + SHA256: resp.Header.Get("X-Gitlab-Content-Sha256"), + LastCommitID: resp.Header.Get("X-Gitlab-Last-Commit-Id"), + } + + if sizeString := resp.Header.Get("X-Gitlab-Size"); sizeString != "" { + f.Size, err = strconv.Atoi(sizeString) + if err != nil { + return nil, resp, err + } + } + + return f, resp, nil +} + +// FileBlameRange represents one item of blame information. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html +type FileBlameRange struct { + Commit struct { + ID string `json:"id"` + ParentIDs []string `json:"parent_ids"` + Message string `json:"message"` + AuthoredDate *time.Time `json:"authored_date"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + CommittedDate *time.Time `json:"committed_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + } `json:"commit"` + Lines []string `json:"lines"` +} + +func (b FileBlameRange) String() string { + return Stringify(b) +} + +// GetFileBlameOptions represents the available GetFileBlame() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-file-blame-from-repository +type GetFileBlameOptions struct { + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + RangeStart *int `url:"range[start],omitempty" json:"range[start],omitempty"` + RangeEnd *int `url:"range[end],omitempty" json:"range[end],omitempty"` +} + +// GetFileBlame allows you to receive blame information. Each blame range +// contains lines and corresponding commit info. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-file-blame-from-repository +func (s *RepositoryFilesService) GetFileBlame(pid interface{}, file string, opt *GetFileBlameOptions, options ...RequestOptionFunc) ([]*FileBlameRange, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s/blame", + PathEscape(project), + PathEscape(file), + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var br []*FileBlameRange + resp, err := s.client.Do(req, &br) + if err != nil { + return nil, resp, err + } + + return br, resp, nil +} + +// GetRawFileOptions represents the available GetRawFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository +type GetRawFileOptions struct { + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + LFS *bool `url:"lfs,omitempty" json:"lfs,omitempty"` +} + +// GetRawFile allows you to receive the raw file in repository. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository +func (s *RepositoryFilesService) GetRawFile(pid interface{}, fileName string, opt *GetRawFileOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s/raw", + PathEscape(project), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var f bytes.Buffer + resp, err := s.client.Do(req, &f) + if err != nil { + return nil, resp, err + } + + return f.Bytes(), resp, err +} + +// FileInfo represents file details of a GitLab repository file. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html +type FileInfo struct { + FilePath string `json:"file_path"` + Branch string `json:"branch"` +} + +func (r FileInfo) String() string { + return Stringify(r) +} + +// CreateFileOptions represents the available CreateFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#create-new-file-in-repository +type CreateFileOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` + Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"` + AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` + AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` + ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` +} + +// CreateFile creates a new file in a repository. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#create-new-file-in-repository +func (s *RepositoryFilesService) CreateFile(pid interface{}, fileName string, opt *CreateFileOptions, options ...RequestOptionFunc) (*FileInfo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s", + PathEscape(project), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + f := new(FileInfo) + resp, err := s.client.Do(req, f) + if err != nil { + return nil, resp, err + } + + return f, resp, nil +} + +// UpdateFileOptions represents the available UpdateFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#update-existing-file-in-repository +type UpdateFileOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` + Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"` + AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` + AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` + LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"` + ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` +} + +// UpdateFile updates an existing file in a repository +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#update-existing-file-in-repository +func (s *RepositoryFilesService) UpdateFile(pid interface{}, fileName string, opt *UpdateFileOptions, options ...RequestOptionFunc) (*FileInfo, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s", + PathEscape(project), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + f := new(FileInfo) + resp, err := s.client.Do(req, f) + if err != nil { + return nil, resp, err + } + + return f, resp, nil +} + +// DeleteFileOptions represents the available DeleteFile() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#delete-existing-file-in-repository +type DeleteFileOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` + AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` + AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` + CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` + LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"` +} + +// DeleteFile deletes an existing file in a repository +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_files.html#delete-existing-file-in-repository +func (s *RepositoryFilesService) DeleteFile(pid interface{}, fileName string, opt *DeleteFileOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/files/%s", + PathEscape(project), + PathEscape(fileName), + ) + + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/repository_submodules.go b/vendor/github.com/xanzy/go-gitlab/repository_submodules.go new file mode 100644 index 0000000000..7022a45521 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/repository_submodules.go @@ -0,0 +1,93 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// RepositorySubmodulesService handles communication with the repository +// submodules related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repository_submodules.html +type RepositorySubmodulesService struct { + client *Client +} + +// SubmoduleCommit represents a GitLab submodule commit. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/repository_submodules.html +type SubmoduleCommit struct { + ID string `json:"id"` + ShortID string `json:"short_id"` + Title string `json:"title"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + CreatedAt *time.Time `json:"created_at"` + Message string `json:"message"` + ParentIDs []string `json:"parent_ids"` + CommittedDate *time.Time `json:"committed_date"` + AuthoredDate *time.Time `json:"authored_date"` + Status *BuildStateValue `json:"status"` +} + +func (r SubmoduleCommit) String() string { + return Stringify(r) +} + +// UpdateSubmoduleOptions represents the available UpdateSubmodule() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_submodules.html#update-existing-submodule-reference-in-repository +type UpdateSubmoduleOptions struct { + Branch *string `url:"branch,omitempty" json:"branch,omitempty"` + CommitSHA *string `url:"commit_sha,omitempty" json:"commit_sha,omitempty"` + CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` +} + +// UpdateSubmodule updates an existing submodule reference. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/repository_submodules.html#update-existing-submodule-reference-in-repository +func (s *RepositorySubmodulesService) UpdateSubmodule(pid interface{}, submodule string, opt *UpdateSubmoduleOptions, options ...RequestOptionFunc) (*SubmoduleCommit, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf( + "projects/%s/repository/submodules/%s", + PathEscape(project), + PathEscape(submodule), + ) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + sc := new(SubmoduleCommit) + resp, err := s.client.Do(req, sc) + if err != nil { + return nil, resp, err + } + + return sc, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/request_options.go b/vendor/github.com/xanzy/go-gitlab/request_options.go new file mode 100644 index 0000000000..d158047f69 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/request_options.go @@ -0,0 +1,102 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "context" + "net/url" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// RequestOptionFunc can be passed to all API requests to customize the API request. +type RequestOptionFunc func(*retryablehttp.Request) error + +// WithContext runs the request with the provided context +func WithContext(ctx context.Context) RequestOptionFunc { + return func(req *retryablehttp.Request) error { + *req = *req.WithContext(ctx) + return nil + } +} + +// WithHeader takes a header name and value and appends it to the request headers. +func WithHeader(name, value string) RequestOptionFunc { + return func(req *retryablehttp.Request) error { + req.Header.Set(name, value) + return nil + } +} + +// WithHeaders takes a map of header name/value pairs and appends them to the +// request headers. +func WithHeaders(headers map[string]string) RequestOptionFunc { + return func(req *retryablehttp.Request) error { + for k, v := range headers { + req.Header.Set(k, v) + } + return nil + } +} + +// WithKeysetPaginationParameters takes a "next" link from the Link header of a +// response to a keyset-based paginated request and modifies the values of each +// query parameter in the request with its corresponding response parameter. +func WithKeysetPaginationParameters(nextLink string) RequestOptionFunc { + return func(req *retryablehttp.Request) error { + nextUrl, err := url.Parse(nextLink) + if err != nil { + return err + } + q := req.URL.Query() + for k, values := range nextUrl.Query() { + q.Del(k) + for _, v := range values { + q.Add(k, v) + } + } + req.URL.RawQuery = q.Encode() + return nil + } +} + +// WithSudo takes either a username or user ID and sets the SUDO request header. +func WithSudo(uid interface{}) RequestOptionFunc { + return func(req *retryablehttp.Request) error { + user, err := parseID(uid) + if err != nil { + return err + } + req.Header.Set("SUDO", user) + return nil + } +} + +// WithToken takes a token which is then used when making this one request. +func WithToken(authType AuthType, token string) RequestOptionFunc { + return func(req *retryablehttp.Request) error { + switch authType { + case JobToken: + req.Header.Set("JOB-TOKEN", token) + case OAuthToken: + req.Header.Set("Authorization", "Bearer "+token) + case PrivateToken: + req.Header.Set("PRIVATE-TOKEN", token) + } + return nil + } +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_group.go b/vendor/github.com/xanzy/go-gitlab/resource_group.go new file mode 100644 index 0000000000..b11cd8be7a --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_group.go @@ -0,0 +1,165 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceGroupService handles communication with the resource +// group related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +type ResourceGroupService struct { + client *Client +} + +// ResourceGrouop represents a GitLab Project Resource Group. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +type ResourceGroup struct { + ID int `json:"id"` + Key string `json:"key"` + ProcessMode string `json:"process_mode"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` +} + +// Gets a string representation of a ResourceGroup +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html +func (rg ResourceGroup) String() string { + return Stringify(rg) +} + +// GetAllResourceGroupsForAProject allows you to get all resource +// groups associated with a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#get-all-resource-groups-for-a-project +func (s *ResourceGroupService) GetAllResourceGroupsForAProject(pid interface{}, options ...RequestOptionFunc) ([]*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var rgs []*ResourceGroup + resp, err := s.client.Do(req, &rgs) + if err != nil { + return nil, resp, err + } + + return rgs, resp, nil +} + +// GetASpecificResourceGroup allows you to get a specific +// resource group for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#get-a-specific-resource-group +func (s *ResourceGroupService) GetASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + rg := new(ResourceGroup) + resp, err := s.client.Do(req, rg) + if err != nil { + return nil, resp, err + } + + return rg, resp, nil +} + +// ListUpcomingJobsForASpecificResourceGroup allows you to get all +// upcoming jobs for a specific resource group for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#list-upcoming-jobs-for-a-specific-resource-group +func (s *ResourceGroupService) ListUpcomingJobsForASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) ([]*Job, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s/upcoming_jobs", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var js []*Job + resp, err := s.client.Do(req, &js) + if err != nil { + return nil, resp, err + } + + return js, resp, nil +} + +// EditAnExistingResourceGroupOptions represents the available +// EditAnExistingResourceGroup options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group +type EditAnExistingResourceGroupOptions struct { + ProcessMode *ResourceGroupProcessMode `url:"process_mode,omitempty" json:"process_mode,omitempty"` +} + +// EditAnExistingResourceGroup allows you to edit a specific +// resource group for a given project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group +func (s *ResourceGroupService) EditAnExistingResourceGroup(pid interface{}, key string, opts *EditAnExistingResourceGroupOptions, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) + + req, err := s.client.NewRequest(http.MethodPut, u, opts, options) + if err != nil { + return nil, nil, err + } + + rg := new(ResourceGroup) + resp, err := s.client.Do(req, rg) + if err != nil { + return nil, resp, err + } + + return rg, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go b/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go new file mode 100644 index 0000000000..142cb9e6ee --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go @@ -0,0 +1,122 @@ +// +// Copyright 2023, Hakki Ceylan, Yavuz Turk +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceIterationEventsService handles communication with the event related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_iteration_events.html +type ResourceIterationEventsService struct { + client *Client +} + +// IterationEvent represents a resource iteration event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_iteration_events.html +type IterationEvent struct { + ID int `json:"id"` + User *BasicUser `json:"user"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int `json:"resource_id"` + Iteration *Iteration `json:"iteration"` + Action string `json:"action"` +} + +// Iteration represents a project issue iteration. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_iteration_events.html +type Iteration struct { + ID int `json:"id"` + IID int `json:"iid"` + Sequence int `json:"sequence"` + GroupID int `json:"group_id"` + Title string `json:"title"` + Description string `json:"description"` + State int `json:"state"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + DueDate *ISOTime `json:"due_date"` + StartDate *ISOTime `json:"start_date"` + WebURL string `json:"web_url"` +} + +// ListIterationEventsOptions represents the options for all resource state +// events list methods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_iteration_events.html#list-project-issue-iteration-events +type ListIterationEventsOptions struct { + ListOptions +} + +// ListIssueIterationEvents retrieves resource iteration events for the +// specified project and issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_iteration_events.html#list-project-issue-iteration-events +func (s *ResourceIterationEventsService) ListIssueIterationEvents(pid interface{}, issue int, opt *ListIterationEventsOptions, options ...RequestOptionFunc) ([]*IterationEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_iteration_events", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ies []*IterationEvent + resp, err := s.client.Do(req, &ies) + if err != nil { + return nil, resp, err + } + + return ies, resp, nil +} + +// GetIssueIterationEvent gets a single issue iteration event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_iteration_events.html#get-single-issue-iteration-event +func (s *ResourceIterationEventsService) GetIssueIterationEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*IterationEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_iteration_events/%d", PathEscape(project), issue, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ie := new(IterationEvent) + resp, err := s.client.Do(req, ie) + if err != nil { + return nil, resp, err + } + + return ie, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_label_events.go b/vendor/github.com/xanzy/go-gitlab/resource_label_events.go new file mode 100644 index 0000000000..46c96fccad --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_label_events.go @@ -0,0 +1,220 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceLabelEventsService handles communication with the event related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_label_events.html +type ResourceLabelEventsService struct { + client *Client +} + +// LabelEvent represents a resource label event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event +type LabelEvent struct { + ID int `json:"id"` + Action string `json:"action"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int `json:"resource_id"` + User struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"user"` + Label struct { + ID int `json:"id"` + Name string `json:"name"` + Color string `json:"color"` + TextColor string `json:"text_color"` + Description string `json:"description"` + } `json:"label"` +} + +// ListLabelEventsOptions represents the options for all resource label events +// list methods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events +type ListLabelEventsOptions struct { + ListOptions +} + +// ListIssueLabelEvents retrieves resource label events for the +// specified project and issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events +func (s *ResourceLabelEventsService) ListIssueLabelEvents(pid interface{}, issue int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ls []*LabelEvent + resp, err := s.client.Do(req, &ls) + if err != nil { + return nil, resp, err + } + + return ls, resp, nil +} + +// GetIssueLabelEvent gets a single issue-label-event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event +func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events/%d", PathEscape(project), issue, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + l := new(LabelEvent) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// ListGroupEpicLabelEvents retrieves resource label events for the specified +// group and epic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#list-group-epic-label-events +func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid interface{}, epic int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events", PathEscape(group), epic) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ls []*LabelEvent + resp, err := s.client.Do(req, &ls) + if err != nil { + return nil, resp, err + } + + return ls, resp, nil +} + +// GetGroupEpicLabelEvent gets a single group epic label event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-epic-label-event +func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid interface{}, epic int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events/%d", PathEscape(group), epic, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + l := new(LabelEvent) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// ListMergeRequestsLabelEvents retrieves resource label events for the specified +// project and merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-merge-request-label-events +func (s *ResourceLabelEventsService) ListMergeRequestsLabelEvents(pid interface{}, request int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events", PathEscape(project), request) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ls []*LabelEvent + resp, err := s.client.Do(req, &ls) + if err != nil { + return nil, resp, err + } + + return ls, resp, nil +} + +// GetMergeRequestLabelEvent gets a single merge request label event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-merge-request-label-event +func (s *ResourceLabelEventsService) GetMergeRequestLabelEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events/%d", PathEscape(project), request, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + l := new(LabelEvent) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go b/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go new file mode 100644 index 0000000000..22925bb0a0 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go @@ -0,0 +1,155 @@ +// +// Copyright 2022, Mai Lapyst +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceMilestoneEventsService handles communication with the event related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_milestone_events.html +type ResourceMilestoneEventsService struct { + client *Client +} + +// MilestoneEvent represents a resource milestone event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_milestone_events.html +type MilestoneEvent struct { + ID int `json:"id"` + User *BasicUser `json:"user"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int `json:"resource_id"` + Milestone *Milestone `json:"milestone"` + Action string `json:"action"` +} + +// ListMilestoneEventsOptions represents the options for all resource state events +// list methods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-issue-milestone-events +type ListMilestoneEventsOptions struct { + ListOptions +} + +// ListIssueMilestoneEvents retrieves resource milestone events for the specified +// project and issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-issue-milestone-events +func (s *ResourceMilestoneEventsService) ListIssueMilestoneEvents(pid interface{}, issue int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mes []*MilestoneEvent + resp, err := s.client.Do(req, &mes) + if err != nil { + return nil, resp, err + } + + return mes, resp, nil +} + +// GetIssueMilestoneEvent gets a single issue milestone event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#get-single-issue-milestone-event +func (s *ResourceMilestoneEventsService) GetIssueMilestoneEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events/%d", PathEscape(project), issue, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + me := new(MilestoneEvent) + resp, err := s.client.Do(req, me) + if err != nil { + return nil, resp, err + } + + return me, resp, nil +} + +// ListMergeMilestoneEvents retrieves resource milestone events for the specified +// project and merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-merge-request-milestone-events +func (s *ResourceMilestoneEventsService) ListMergeMilestoneEvents(pid interface{}, request int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events", PathEscape(project), request) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mes []*MilestoneEvent + resp, err := s.client.Do(req, &mes) + if err != nil { + return nil, resp, err + } + + return mes, resp, nil +} + +// GetMergeRequestMilestoneEvent gets a single merge request milestone event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#get-single-merge-request-milestone-event +func (s *ResourceMilestoneEventsService) GetMergeRequestMilestoneEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events/%d", PathEscape(project), request, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + me := new(MilestoneEvent) + resp, err := s.client.Do(req, me) + if err != nil { + return nil, resp, err + } + + return me, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_state_events.go b/vendor/github.com/xanzy/go-gitlab/resource_state_events.go new file mode 100644 index 0000000000..867fd6d7e6 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_state_events.go @@ -0,0 +1,154 @@ +// +// Copyright 2021, Matthias Simon +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceStateEventsService handles communication with the event related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_state_events.html +type ResourceStateEventsService struct { + client *Client +} + +// StateEvent represents a resource state event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_state_events.html +type StateEvent struct { + ID int `json:"id"` + User *BasicUser `json:"user"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int `json:"resource_id"` + State EventTypeValue `json:"state"` +} + +// ListStateEventsOptions represents the options for all resource state events +// list methods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_state_events.html#list-project-issue-state-events +type ListStateEventsOptions struct { + ListOptions +} + +// ListIssueStateEvents retrieves resource state events for the specified +// project and issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_state_events.html#list-project-issue-state-events +func (s *ResourceStateEventsService) ListIssueStateEvents(pid interface{}, issue int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_state_events", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ses []*StateEvent + resp, err := s.client.Do(req, &ses) + if err != nil { + return nil, resp, err + } + + return ses, resp, nil +} + +// GetIssueStateEvent gets a single issue-state-event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_state_events.html#get-single-issue-state-event +func (s *ResourceStateEventsService) GetIssueStateEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_state_events/%d", PathEscape(project), issue, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + se := new(StateEvent) + resp, err := s.client.Do(req, se) + if err != nil { + return nil, resp, err + } + + return se, resp, nil +} + +// ListMergeStateEvents retrieves resource state events for the specified +// project and merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_state_events.html#list-project-merge-request-state-events +func (s *ResourceStateEventsService) ListMergeStateEvents(pid interface{}, request int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_state_events", PathEscape(project), request) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ses []*StateEvent + resp, err := s.client.Do(req, &ses) + if err != nil { + return nil, resp, err + } + + return ses, resp, nil +} + +// GetMergeRequestStateEvent gets a single merge request state event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_state_events.html#get-single-merge-request-state-event +func (s *ResourceStateEventsService) GetMergeRequestStateEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_state_events/%d", PathEscape(project), request, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + se := new(StateEvent) + resp, err := s.client.Do(req, se) + if err != nil { + return nil, resp, err + } + + return se, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go b/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go new file mode 100644 index 0000000000..1251836fff --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go @@ -0,0 +1,80 @@ +// +// Copyright 2021, Matthias Simon +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceWeightEventsService handles communication with the event related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_weight_events.html +type ResourceWeightEventsService struct { + client *Client +} + +// WeightEvent represents a resource weight event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_weight_events.html +type WeightEvent struct { + ID int `json:"id"` + User *BasicUser `json:"user"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int `json:"resource_id"` + State EventTypeValue `json:"state"` + IssueID int `json:"issue_id"` + Weight int `json:"weight"` +} + +// ListWeightEventsOptions represents the options for all resource weight events +// list methods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_weight_events.html#list-project-issue-weight-events +type ListWeightEventsOptions struct { + ListOptions +} + +// ListIssueWeightEvents retrieves resource weight events for the specified +// project and issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_weight_events.html#list-project-issue-weight-events +func (s *ResourceWeightEventsService) ListIssueWeightEvents(pid interface{}, issue int, opt *ListWeightEventsOptions, options ...RequestOptionFunc) ([]*WeightEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_weight_events", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var wes []*WeightEvent + resp, err := s.client.Do(req, &wes) + if err != nil { + return nil, resp, err + } + + return wes, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/runners.go b/vendor/github.com/xanzy/go-gitlab/runners.go new file mode 100644 index 0000000000..5224cf91b8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/runners.go @@ -0,0 +1,597 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// RunnersService handles communication with the runner related methods of the +// GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/runners.html +type RunnersService struct { + client *Client +} + +// Runner represents a GitLab CI Runner. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/runners.html +type Runner struct { + ID int `json:"id"` + Description string `json:"description"` + Active bool `json:"active"` + Paused bool `json:"paused"` + IsShared bool `json:"is_shared"` + IPAddress string `json:"ip_address"` + RunnerType string `json:"runner_type"` + Name string `json:"name"` + Online bool `json:"online"` + Status string `json:"status"` + Token string `json:"token"` + TokenExpiresAt *time.Time `json:"token_expires_at"` +} + +// RunnerDetails represents the GitLab CI runner details. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/runners.html +type RunnerDetails struct { + Paused bool `json:"paused"` + Architecture string `json:"architecture"` + Description string `json:"description"` + ID int `json:"id"` + IPAddress string `json:"ip_address"` + IsShared bool `json:"is_shared"` + RunnerType string `json:"runner_type"` + ContactedAt *time.Time `json:"contacted_at"` + Name string `json:"name"` + Online bool `json:"online"` + Status string `json:"status"` + Platform string `json:"platform"` + Projects []struct { + ID int `json:"id"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + } `json:"projects"` + Token string `json:"token"` + Revision string `json:"revision"` + TagList []string `json:"tag_list"` + RunUntagged bool `json:"run_untagged"` + Version string `json:"version"` + Locked bool `json:"locked"` + AccessLevel string `json:"access_level"` + MaximumTimeout int `json:"maximum_timeout"` + Groups []struct { + ID int `json:"id"` + Name string `json:"name"` + WebURL string `json:"web_url"` + } `json:"groups"` + + // Deprecated: Use Paused instead. (Deprecated in GitLab 14.8) + Active bool `json:"active"` +} + +// ListRunnersOptions represents the available ListRunners() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-owned-runners +type ListRunnersOptions struct { + ListOptions + Type *string `url:"type,omitempty" json:"type,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` + Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` + TagList *[]string `url:"tag_list,comma,omitempty" json:"tag_list,omitempty"` + + // Deprecated: Use Type or Status instead. + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` +} + +// ListRunners gets a list of runners accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-owned-runners +func (s *RunnersService) ListRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "runners", opt, options) + if err != nil { + return nil, nil, err + } + + var rs []*Runner + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// ListAllRunners gets a list of all runners in the GitLab instance. Access is +// restricted to users with admin privileges. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-all-runners +func (s *RunnersService) ListAllRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "runners/all", opt, options) + if err != nil { + return nil, nil, err + } + + var rs []*Runner + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// GetRunnerDetails returns details for given runner. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#get-runners-details +func (s *RunnersService) GetRunnerDetails(rid interface{}, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) { + runner, err := parseID(rid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("runners/%s", runner) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + rs := new(RunnerDetails) + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// UpdateRunnerDetailsOptions represents the available UpdateRunnerDetails() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#update-runners-details +type UpdateRunnerDetailsOptions struct { + Description *string `url:"description,omitempty" json:"description,omitempty"` + Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` + TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` + RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` + Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` + AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` + MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + + // Deprecated: Use Paused instead. (Deprecated in GitLab 14.8) + Active *bool `url:"active,omitempty" json:"active,omitempty"` +} + +// UpdateRunnerDetails updates details for a given runner. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#update-runners-details +func (s *RunnersService) UpdateRunnerDetails(rid interface{}, opt *UpdateRunnerDetailsOptions, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) { + runner, err := parseID(rid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("runners/%s", runner) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + rs := new(RunnerDetails) + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// RemoveRunner removes a runner. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner +func (s *RunnersService) RemoveRunner(rid interface{}, options ...RequestOptionFunc) (*Response, error) { + runner, err := parseID(rid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("runners/%s", runner) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListRunnerJobsOptions represents the available ListRunnerJobs() +// options. Status can be one of: running, success, failed, canceled. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-runners-jobs +type ListRunnerJobsOptions struct { + ListOptions + Status *string `url:"status,omitempty" json:"status,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListRunnerJobs gets a list of jobs that are being processed or were processed by specified Runner. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-runners-jobs +func (s *RunnersService) ListRunnerJobs(rid interface{}, opt *ListRunnerJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { + runner, err := parseID(rid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("runners/%s/jobs", runner) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var rs []*Job + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// ListProjectRunnersOptions represents the available ListProjectRunners() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-projects-runners +type ListProjectRunnersOptions ListRunnersOptions + +// ListProjectRunners gets a list of runners accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-projects-runners +func (s *RunnersService) ListProjectRunners(pid interface{}, opt *ListProjectRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/runners", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var rs []*Runner + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// EnableProjectRunnerOptions represents the available EnableProjectRunner() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#enable-a-runner-in-project +type EnableProjectRunnerOptions struct { + RunnerID int `json:"runner_id"` +} + +// EnableProjectRunner enables an available specific runner in the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#enable-a-runner-in-project +func (s *RunnersService) EnableProjectRunner(pid interface{}, opt *EnableProjectRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/runners", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + r := new(Runner) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// DisableProjectRunner disables a specific runner from project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#disable-a-runner-from-project +func (s *RunnersService) DisableProjectRunner(pid interface{}, runner int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/runners/%d", PathEscape(project), runner) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListGroupsRunnersOptions represents the available ListGroupsRunners() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-groups-runners +type ListGroupsRunnersOptions struct { + ListOptions + Type *string `url:"type,omitempty" json:"type,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` + TagList *[]string `url:"tag_list,comma,omitempty" json:"tag_list,omitempty"` +} + +// ListGroupsRunners lists all runners (specific and shared) available in the +// group as well it’s ancestor groups. Shared runners are listed if at least one +// shared runner is defined. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#list-groups-runners +func (s *RunnersService) ListGroupsRunners(gid interface{}, opt *ListGroupsRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/runners", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var rs []*Runner + resp, err := s.client.Do(req, &rs) + if err != nil { + return nil, resp, err + } + + return rs, resp, nil +} + +// RegisterNewRunnerOptions represents the available RegisterNewRunner() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner +type RegisterNewRunnerOptions struct { + Token *string `url:"token" json:"token"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Info *RegisterNewRunnerInfoOptions `url:"info,omitempty" json:"info,omitempty"` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` + Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` + RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` + TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` + AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` + MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` +} + +// RegisterNewRunnerInfoOptions represents the info hashmap parameter in +// RegisterNewRunnerOptions. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner +type RegisterNewRunnerInfoOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Version *string `url:"version,omitempty" json:"version,omitempty"` + Revision *string `url:"revision,omitempty" json:"revision,omitempty"` + Platform *string `url:"platform,omitempty" json:"platform,omitempty"` + Architecture *string `url:"architecture,omitempty" json:"architecture,omitempty"` +} + +// RegisterNewRunner registers a new Runner for the instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner +func (s *RunnersService) RegisterNewRunner(opt *RegisterNewRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "runners", opt, options) + if err != nil { + return nil, nil, err + } + + r := new(Runner) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// DeleteRegisteredRunnerOptions represents the available +// DeleteRegisteredRunner() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner-by-authentication-token +type DeleteRegisteredRunnerOptions struct { + Token *string `url:"token" json:"token"` +} + +// DeleteRegisteredRunner deletes a Runner by Token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner-by-authentication-token +func (s *RunnersService) DeleteRegisteredRunner(opt *DeleteRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodDelete, "runners", opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteRegisteredRunnerByID deletes a runner by ID. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner-by-id +func (s *RunnersService) DeleteRegisteredRunnerByID(rid int, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodDelete, fmt.Sprintf("runners/%d", rid), nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// VerifyRegisteredRunnerOptions represents the available +// VerifyRegisteredRunner() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#verify-authentication-for-a-registered-runner +type VerifyRegisteredRunnerOptions struct { + Token *string `url:"token" json:"token"` +} + +// VerifyRegisteredRunner registers a new runner for the instance. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#verify-authentication-for-a-registered-runner +func (s *RunnersService) VerifyRegisteredRunner(opt *VerifyRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "runners/verify", opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +type RunnerRegistrationToken struct { + Token *string `url:"token" json:"token"` + TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` +} + +// ResetInstanceRunnerRegistrationToken resets the instance runner registration +// token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#reset-instances-runner-registration-token +func (s *RunnersService) ResetInstanceRunnerRegistrationToken(options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "runners/reset_registration_token", nil, options) + if err != nil { + return nil, nil, err + } + + r := new(RunnerRegistrationToken) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// ResetGroupRunnerRegistrationToken resets a group's runner registration token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#reset-groups-runner-registration-token +func (s *RunnersService) ResetGroupRunnerRegistrationToken(gid interface{}, options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/runners/reset_registration_token", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + r := new(RunnerRegistrationToken) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// ResetGroupRunnerRegistrationToken resets a projects's runner registration token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#reset-projects-runner-registration-token +func (s *RunnersService) ResetProjectRunnerRegistrationToken(pid interface{}, options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/runners/reset_registration_token", PathEscape(project)) + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + r := new(RunnerRegistrationToken) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +type RunnerAuthenticationToken struct { + Token *string `url:"token" json:"token"` + TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` +} + +// ResetRunnerAuthenticationToken resets a runner's authentication token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/runners.html#reset-runners-authentication-token-by-using-the-runner-id +func (s *RunnersService) ResetRunnerAuthenticationToken(rid int, options ...RequestOptionFunc) (*RunnerAuthenticationToken, *Response, error) { + u := fmt.Sprintf("runners/%d/reset_authentication_token", rid) + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + r := new(RunnerAuthenticationToken) + resp, err := s.client.Do(req, &r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/search.go b/vendor/github.com/xanzy/go-gitlab/search.go new file mode 100644 index 0000000000..133592bd12 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/search.go @@ -0,0 +1,359 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// SearchService handles communication with the search related methods of the +// GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html +type SearchService struct { + client *Client +} + +// SearchOptions represents the available options for all search methods. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html +type SearchOptions struct { + ListOptions + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +type searchOptions struct { + SearchOptions + Scope string `url:"scope" json:"scope"` + Search string `url:"search" json:"search"` +} + +// Projects searches the expression within projects +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-projects +func (s *SearchService) Projects(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + var ps []*Project + resp, err := s.search("projects", query, &ps, opt, options...) + return ps, resp, err +} + +// ProjectsByGroup searches the expression within projects for +// the specified group +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#group-search-api +func (s *SearchService) ProjectsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { + var ps []*Project + resp, err := s.searchByGroup(gid, "projects", query, &ps, opt, options...) + return ps, resp, err +} + +// Issues searches the expression within issues +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-issues +func (s *SearchService) Issues(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + var is []*Issue + resp, err := s.search("issues", query, &is, opt, options...) + return is, resp, err +} + +// IssuesByGroup searches the expression within issues for +// the specified group +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-issues-1 +func (s *SearchService) IssuesByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + var is []*Issue + resp, err := s.searchByGroup(gid, "issues", query, &is, opt, options...) + return is, resp, err +} + +// IssuesByProject searches the expression within issues for +// the specified project +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-issues-2 +func (s *SearchService) IssuesByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { + var is []*Issue + resp, err := s.searchByProject(pid, "issues", query, &is, opt, options...) + return is, resp, err +} + +// MergeRequests searches the expression within merge requests +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-merge_requests +func (s *SearchService) MergeRequests(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + var ms []*MergeRequest + resp, err := s.search("merge_requests", query, &ms, opt, options...) + return ms, resp, err +} + +// MergeRequestsByGroup searches the expression within merge requests for +// the specified group +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-merge_requests-1 +func (s *SearchService) MergeRequestsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + var ms []*MergeRequest + resp, err := s.searchByGroup(gid, "merge_requests", query, &ms, opt, options...) + return ms, resp, err +} + +// MergeRequestsByProject searches the expression within merge requests for +// the specified project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-merge_requests-2 +func (s *SearchService) MergeRequestsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + var ms []*MergeRequest + resp, err := s.searchByProject(pid, "merge_requests", query, &ms, opt, options...) + return ms, resp, err +} + +// Milestones searches the expression within milestones +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-milestones +func (s *SearchService) Milestones(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { + var ms []*Milestone + resp, err := s.search("milestones", query, &ms, opt, options...) + return ms, resp, err +} + +// MilestonesByGroup searches the expression within milestones for +// the specified group +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-milestones-1 +func (s *SearchService) MilestonesByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { + var ms []*Milestone + resp, err := s.searchByGroup(gid, "milestones", query, &ms, opt, options...) + return ms, resp, err +} + +// MilestonesByProject searches the expression within milestones for +// the specified project +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-milestones-2 +func (s *SearchService) MilestonesByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { + var ms []*Milestone + resp, err := s.searchByProject(pid, "milestones", query, &ms, opt, options...) + return ms, resp, err +} + +// SnippetTitles searches the expression within snippet titles +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-snippet_titles +func (s *SearchService) SnippetTitles(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { + var ss []*Snippet + resp, err := s.search("snippet_titles", query, &ss, opt, options...) + return ss, resp, err +} + +// SnippetBlobs searches the expression within snippet blobs +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-snippet_blobs +func (s *SearchService) SnippetBlobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { + var ss []*Snippet + resp, err := s.search("snippet_blobs", query, &ss, opt, options...) + return ss, resp, err +} + +// NotesByProject searches the expression within notes for the specified +// project +// +// GitLab API docs: // https://docs.gitlab.com/ee/api/search.html#scope-notes +func (s *SearchService) NotesByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { + var ns []*Note + resp, err := s.searchByProject(pid, "notes", query, &ns, opt, options...) + return ns, resp, err +} + +// WikiBlobs searches the expression within all wiki blobs +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-wiki_blobs +func (s *SearchService) WikiBlobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { + var ws []*Wiki + resp, err := s.search("wiki_blobs", query, &ws, opt, options...) + return ws, resp, err +} + +// WikiBlobsByGroup searches the expression within wiki blobs for +// specified group +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-wiki_blobs-premium-1 +func (s *SearchService) WikiBlobsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { + var ws []*Wiki + resp, err := s.searchByGroup(gid, "wiki_blobs", query, &ws, opt, options...) + return ws, resp, err +} + +// WikiBlobsByProject searches the expression within wiki blobs for +// the specified project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/search.html#scope-wiki_blobs-premium-2 +func (s *SearchService) WikiBlobsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { + var ws []*Wiki + resp, err := s.searchByProject(pid, "wiki_blobs", query, &ws, opt, options...) + return ws, resp, err +} + +// Commits searches the expression within all commits +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-commits +func (s *SearchService) Commits(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + var cs []*Commit + resp, err := s.search("commits", query, &cs, opt, options...) + return cs, resp, err +} + +// CommitsByGroup searches the expression within commits for the specified +// group +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-commits-premium-1 +func (s *SearchService) CommitsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + var cs []*Commit + resp, err := s.searchByGroup(gid, "commits", query, &cs, opt, options...) + return cs, resp, err +} + +// CommitsByProject searches the expression within commits for the +// specified project +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-commits-premium-2 +func (s *SearchService) CommitsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { + var cs []*Commit + resp, err := s.searchByProject(pid, "commits", query, &cs, opt, options...) + return cs, resp, err +} + +// Blob represents a single blob. +type Blob struct { + Basename string `json:"basename"` + Data string `json:"data"` + Path string `json:"path"` + Filename string `json:"filename"` + ID string `json:"id"` + Ref string `json:"ref"` + Startline int `json:"startline"` + ProjectID int `json:"project_id"` +} + +// Blobs searches the expression within all blobs +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-blobs +func (s *SearchService) Blobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { + var bs []*Blob + resp, err := s.search("blobs", query, &bs, opt, options...) + return bs, resp, err +} + +// BlobsByGroup searches the expression within blobs for the specified +// group +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-blobs-premium-1 +func (s *SearchService) BlobsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { + var bs []*Blob + resp, err := s.searchByGroup(gid, "blobs", query, &bs, opt, options...) + return bs, resp, err +} + +// BlobsByProject searches the expression within blobs for the specified +// project +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-blobs-premium-2 +func (s *SearchService) BlobsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { + var bs []*Blob + resp, err := s.searchByProject(pid, "blobs", query, &bs, opt, options...) + return bs, resp, err +} + +// Users searches the expression within all users +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-users +func (s *SearchService) Users(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { + var ret []*User + resp, err := s.search("users", query, &ret, opt, options...) + return ret, resp, err +} + +// UsersByGroup searches the expression within users for the specified +// group +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-users-1 +func (s *SearchService) UsersByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { + var ret []*User + resp, err := s.searchByGroup(gid, "users", query, &ret, opt, options...) + return ret, resp, err +} + +// UsersByProject searches the expression within users for the +// specified project +// +// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-users-2 +func (s *SearchService) UsersByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { + var ret []*User + resp, err := s.searchByProject(pid, "users", query, &ret, opt, options...) + return ret, resp, err +} + +func (s *SearchService) search(scope, query string, result interface{}, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { + opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} + + req, err := s.client.NewRequest(http.MethodGet, "search", opts, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, result) +} + +func (s *SearchService) searchByGroup(gid interface{}, scope, query string, result interface{}, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/-/search", PathEscape(group)) + + opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, result) +} + +func (s *SearchService) searchByProject(pid interface{}, scope, query string, result interface{}, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/-/search", PathEscape(project)) + + opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, result) +} diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go new file mode 100644 index 0000000000..fcaa71ecc4 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/services.go @@ -0,0 +1,2179 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" +) + +// ServicesService handles communication with the services related methods of +// the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html +type ServicesService struct { + client *Client +} + +// Service represents a GitLab service. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html +type Service struct { + ID int `json:"id"` + Title string `json:"title"` + Slug string `json:"slug"` + CreatedAt *time.Time `json:"created_at"` + UpdatedAt *time.Time `json:"updated_at"` + Active bool `json:"active"` + PushEvents bool `json:"push_events"` + IssuesEvents bool `json:"issues_events"` + AlertEvents bool `json:"alert_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + CommitEvents bool `json:"commit_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + TagPushEvents bool `json:"tag_push_events"` + NoteEvents bool `json:"note_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + PipelineEvents bool `json:"pipeline_events"` + JobEvents bool `json:"job_events"` + WikiPageEvents bool `json:"wiki_page_events"` + VulnerabilityEvents bool `json:"vulnerability_events"` + DeploymentEvents bool `json:"deployment_events"` +} + +// ListServices gets a list of all active services. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html#list-all-active-integrations +func (s *ServicesService) ListServices(pid interface{}, options ...RequestOptionFunc) ([]*Service, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var svcs []*Service + resp, err := s.client.Do(req, &svcs) + if err != nil { + return nil, resp, err + } + + return svcs, resp, nil +} + +// CustomIssueTrackerService represents Custom Issue Tracker service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#custom-issue-tracker +type CustomIssueTrackerService struct { + Service + Properties *CustomIssueTrackerServiceProperties `json:"properties"` +} + +// CustomIssueTrackerServiceProperties represents Custom Issue Tracker specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#custom-issue-tracker +type CustomIssueTrackerServiceProperties struct { + ProjectURL string `json:"project_url,omitempty"` + IssuesURL string `json:"issues_url,omitempty"` + NewIssueURL string `json:"new_issue_url,omitempty"` +} + +// GetCustomIssueTrackerService gets Custom Issue Tracker service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-custom-issue-tracker-settings +func (s *ServicesService) GetCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(CustomIssueTrackerService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetCustomIssueTrackerServiceOptions represents the available SetCustomIssueTrackerService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker +type SetCustomIssueTrackerServiceOptions struct { + NewIssueURL *string `url:"new_issue_url,omitempty" json:"new_issue_url,omitempty"` + IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` + ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` +} + +// SetCustomIssueTrackerService sets Custom Issue Tracker service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker +func (s *ServicesService) SetCustomIssueTrackerService(pid interface{}, opt *SetCustomIssueTrackerServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteCustomIssueTrackerService deletes Custom Issue Tracker service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-a-custom-issue-tracker +func (s *ServicesService) DeleteCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DataDogService represents DataDog service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#datadog +type DataDogService struct { + Service + Properties *DataDogServiceProperties `json:"properties"` +} + +// DataDogServiceProperties represents DataDog specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#datadog +type DataDogServiceProperties struct { + APIURL string `url:"api_url,omitempty" json:"api_url,omitempty"` + DataDogEnv string `url:"datadog_env,omitempty" json:"datadog_env,omitempty"` + DataDogService string `url:"datadog_service,omitempty" json:"datadog_service,omitempty"` + DataDogSite string `url:"datadog_site,omitempty" json:"datadog_site,omitempty"` + DataDogTags string `url:"datadog_tags,omitempty" json:"datadog_tags,omitempty"` + ArchiveTraceEvents bool `url:"archive_trace_events,omitempty" json:"archive_trace_events,omitempty"` +} + +// GetDataDogService gets DataDog service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-datadog-settings +func (s *ServicesService) GetDataDogService(pid interface{}, options ...RequestOptionFunc) (*DataDogService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(DataDogService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetDataDogServiceOptions represents the available SetDataDogService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog +type SetDataDogServiceOptions struct { + APIKey *string `url:"api_key,omitempty" json:"api_key,omitempty"` + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + DataDogEnv *string `url:"datadog_env,omitempty" json:"datadog_env,omitempty"` + DataDogService *string `url:"datadog_service,omitempty" json:"datadog_service,omitempty"` + DataDogSite *string `url:"datadog_site,omitempty" json:"datadog_site,omitempty"` + DataDogTags *string `url:"datadog_tags,omitempty" json:"datadog_tags,omitempty"` + ArchiveTraceEvents *bool `url:"archive_trace_events,omitempty" json:"archive_trace_events,omitempty"` +} + +// SetDataDogService sets DataDog service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog +func (s *ServicesService) SetDataDogService(pid interface{}, opt *SetDataDogServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteDataDogService deletes the DataDog service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-datadog +func (s *ServicesService) DeleteDataDogService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DiscordService represents Discord service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications +type DiscordService struct { + Service + Properties *DiscordServiceProperties `json:"properties"` +} + +// DiscordServiceProperties represents Discord specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications +type DiscordServiceProperties struct { + BranchesToBeNotified string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + NotifyOnlyBrokenPipelines bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` +} + +// GetDiscordService gets Discord service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-discord-notifications-settings +func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestOptionFunc) (*DiscordService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(DiscordService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetDiscordServiceOptions represents the available SetDiscordService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications +type SetDiscordServiceOptions struct { + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialIssuesChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + GroupConfidentialMentionsEvents *bool `url:"group_confidential_mentions_events,omitempty" json:"group_confidential_mentions_events,omitempty"` + GroupConfidentialMentionsChannel *string `url:"group_confidential_mentions_channel,omitempty" json:"group_confidential_mentions_channel,omitempty"` + GroupMentionsEvents *bool `url:"group_mentions_events,omitempty" json:"group_mentions_events,omitempty"` + GroupMentionsChannel *string `url:"group_mentions_channel,omitempty" json:"group_mentions_channel,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` +} + +// SetDiscordService sets Discord service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications +func (s *ServicesService) SetDiscordService(pid interface{}, opt *SetDiscordServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteDiscordService deletes Discord service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-discord-notifications +func (s *ServicesService) DeleteDiscordService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DroneCIService represents Drone CI service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#drone +type DroneCIService struct { + Service + Properties *DroneCIServiceProperties `json:"properties"` +} + +// DroneCIServiceProperties represents Drone CI specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#drone +type DroneCIServiceProperties struct { + DroneURL string `json:"drone_url"` + EnableSSLVerification bool `json:"enable_ssl_verification"` +} + +// GetDroneCIService gets Drone CI service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-drone-settings +func (s *ServicesService) GetDroneCIService(pid interface{}, options ...RequestOptionFunc) (*DroneCIService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(DroneCIService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetDroneCIServiceOptions represents the available SetDroneCIService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-drone +type SetDroneCIServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` + DroneURL *string `url:"drone_url,omitempty" json:"drone_url,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` +} + +// SetDroneCIService sets Drone CI service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-drone +func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteDroneCIService deletes Drone CI service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-drone +func (s *ServicesService) DeleteDroneCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// EmailsOnPushService represents Emails on Push service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#emails-on-push +type EmailsOnPushService struct { + Service + Properties *EmailsOnPushServiceProperties `json:"properties"` +} + +// EmailsOnPushServiceProperties represents Emails on Push specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#emails-on-push +type EmailsOnPushServiceProperties struct { + Recipients string `json:"recipients"` + DisableDiffs bool `json:"disable_diffs"` + SendFromCommitterEmail bool `json:"send_from_committer_email"` + PushEvents bool `json:"push_events"` + TagPushEvents bool `json:"tag_push_events"` + BranchesToBeNotified string `json:"branches_to_be_notified"` +} + +// GetEmailsOnPushService gets Emails on Push service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-emails-on-push-integration-settings +func (s *ServicesService) GetEmailsOnPushService(pid interface{}, options ...RequestOptionFunc) (*EmailsOnPushService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(EmailsOnPushService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetEmailsOnPushServiceOptions represents the available SetEmailsOnPushService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push +type SetEmailsOnPushServiceOptions struct { + Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` + DisableDiffs *bool `url:"disable_diffs,omitempty" json:"disable_diffs,omitempty"` + SendFromCommitterEmail *bool `url:"send_from_committer_email,omitempty" json:"send_from_committer_email,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` +} + +// SetEmailsOnPushService sets Emails on Push service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push +func (s *ServicesService) SetEmailsOnPushService(pid interface{}, opt *SetEmailsOnPushServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteEmailsOnPushService deletes Emails on Push service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-emails-on-push +func (s *ServicesService) DeleteEmailsOnPushService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ExternalWikiService represents External Wiki service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#external-wiki +type ExternalWikiService struct { + Service + Properties *ExternalWikiServiceProperties `json:"properties"` +} + +// ExternalWikiServiceProperties represents External Wiki specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#external-wiki +type ExternalWikiServiceProperties struct { + ExternalWikiURL string `json:"external_wiki_url"` +} + +// GetExternalWikiService gets External Wiki service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-external-wiki-settings +func (s *ServicesService) GetExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(ExternalWikiService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetExternalWikiServiceOptions represents the available SetExternalWikiService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki +type SetExternalWikiServiceOptions struct { + ExternalWikiURL *string `url:"external_wiki_url,omitempty" json:"external_wiki_url,omitempty"` +} + +// SetExternalWikiService sets External Wiki service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki +func (s *ServicesService) SetExternalWikiService(pid interface{}, opt *SetExternalWikiServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteExternalWikiService deletes External Wiki service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-an-external-wiki +func (s *ServicesService) DeleteExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GithubService represents Github service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#github +type GithubService struct { + Service + Properties *GithubServiceProperties `json:"properties"` +} + +// GithubServiceProperties represents Github specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#github +type GithubServiceProperties struct { + RepositoryURL string `json:"repository_url"` + StaticContext bool `json:"static_context"` +} + +// GetGithubService gets Github service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-github-settings +func (s *ServicesService) GetGithubService(pid interface{}, options ...RequestOptionFunc) (*GithubService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(GithubService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetGithubServiceOptions represents the available SetGithubService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-github +type SetGithubServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` + RepositoryURL *string `url:"repository_url,omitempty" json:"repository_url,omitempty"` + StaticContext *bool `url:"static_context,omitempty" json:"static_context,omitempty"` +} + +// SetGithubService sets Github service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-github +func (s *ServicesService) SetGithubService(pid interface{}, opt *SetGithubServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGithubService deletes Github service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-github +func (s *ServicesService) DeleteGithubService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SlackApplication represents GitLab for slack application settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app +type SlackApplication struct { + Service + Properties *SlackApplicationProperties `json:"properties"` +} + +// SlackApplicationProperties represents GitLab for slack application specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app +type SlackApplicationProperties struct { + Channel string `json:"channel"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + AlertEvents bool `json:"alert_events"` + IssuesEvents bool `json:"issues_events"` + ConfidentialIssuesEvents bool `json:"confidential_issues_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + NoteEvents bool `json:"note_events"` + ConfidentialNoteEvents bool `json:"confidential_note_events"` + DeploymentEvents bool `json:"deployment_events"` + IncidentsEvents bool `json:"incidents_events"` + PipelineEvents bool `json:"pipeline_events"` + PushEvents bool `json:"push_events"` + TagPushEvents bool `json:"tag_push_events"` + VulnerabilityEvents bool `json:"vulnerability_events"` + WikiPageEvents bool `json:"wiki_page_events"` + + // Deprecated: This parameter has been replaced with BranchesToBeNotified. + NotifyOnlyDefaultBranch bool `json:"notify_only_default_branch"` +} + +// GetSlackApplication gets the GitLab for Slack app integration settings for a +// project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-gitlab-for-slack-app-settings +func (s *ServicesService) GetSlackApplication(pid interface{}, options ...RequestOptionFunc) (*SlackApplication, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(SlackApplication) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetSlackApplicationOptions represents the available SetSlackApplication() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app +type SetSlackApplicationOptions struct { + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + IncidentsEvents *bool `url:"incidents_events,omitempty" json:"incidents_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + + // Deprecated: This parameter has been replaced with BranchesToBeNotified. + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` +} + +// SetSlackApplication update the GitLab for Slack app integration for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app +func (s *ServicesService) SetSlackApplication(pid interface{}, opt *SetSlackApplicationOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DisableSlackApplication disable the GitLab for Slack app integration for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-gitlab-for-slack-app +func (s *ServicesService) DisableSlackApplication(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetGitLabCIServiceOptions represents the available SetGitLabCIService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-gitlab-ci-service +type SetGitLabCIServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` + ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` +} + +// SetGitLabCIService sets GitLab CI service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-gitlab-ci-service +func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/gitlab-ci", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteGitLabCIService deletes GitLab CI service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-gitlab-ci-service +func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/gitlab-ci", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetHipChatServiceOptions represents the available SetHipChatService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-hipchat-service +type SetHipChatServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty" ` + Room *string `url:"room,omitempty" json:"room,omitempty"` +} + +// SetHipChatService sets HipChat service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-hipchat-service +func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/hipchat", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteHipChatService deletes HipChat service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-hipchat-service +func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/hipchat", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// JenkinsCIService represents Jenkins CI service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#jenkins +type JenkinsCIService struct { + Service + Properties *JenkinsCIServiceProperties `json:"properties"` +} + +// JenkinsCIServiceProperties represents Jenkins CI specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#jenkins +type JenkinsCIServiceProperties struct { + URL string `json:"jenkins_url"` + EnableSSLVerification bool `json:"enable_ssl_verification"` + ProjectName string `json:"project_name"` + Username string `json:"username"` +} + +// GetJenkinsCIService gets Jenkins CI service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-jenkins-settings +func (s *ServicesService) GetJenkinsCIService(pid interface{}, options ...RequestOptionFunc) (*JenkinsCIService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(JenkinsCIService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetJenkinsCIServiceOptions represents the available SetJenkinsCIService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#jenkins +type SetJenkinsCIServiceOptions struct { + URL *string `url:"jenkins_url,omitempty" json:"jenkins_url,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` + ProjectName *string `url:"project_name,omitempty" json:"project_name,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` +} + +// SetJenkinsCIService sets Jenkins service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-jenkins +func (s *ServicesService) SetJenkinsCIService(pid interface{}, opt *SetJenkinsCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteJenkinsCIService deletes Jenkins CI service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-jenkins +func (s *ServicesService) DeleteJenkinsCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// JiraService represents Jira service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#jira +type JiraService struct { + Service + Properties *JiraServiceProperties `json:"properties"` +} + +// JiraServiceProperties represents Jira specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#jira +type JiraServiceProperties struct { + URL string `json:"url"` + APIURL string `json:"api_url"` + Username string `json:"username" ` + Password string `json:"password" ` + Active bool `json:"active"` + JiraAuthType int `json:"jira_auth_type"` + JiraIssuePrefix string `json:"jira_issue_prefix"` + JiraIssueRegex string `json:"jira_issue_regex"` + JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` + JiraIssueTransitionID string `json:"jira_issue_transition_id"` + CommitEvents bool `json:"commit_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + CommentOnEventEnabled bool `json:"comment_on_event_enabled"` + IssuesEnabled bool `json:"issues_enabled"` + ProjectKeys []string `json:"project_keys" ` + UseInheritedSettings bool `json:"use_inherited_settings"` + + // Deprecated: This parameter was removed in GitLab 17.0 + ProjectKey string `json:"project_key" ` +} + +// UnmarshalJSON decodes the Jira Service Properties. +// +// This allows support of JiraIssueTransitionID for both type string (>11.9) and float64 (<11.9) +func (p *JiraServiceProperties) UnmarshalJSON(b []byte) error { + type Alias JiraServiceProperties + raw := struct { + *Alias + JiraIssueTransitionID interface{} `json:"jira_issue_transition_id"` + }{ + Alias: (*Alias)(p), + } + + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + + switch id := raw.JiraIssueTransitionID.(type) { + case nil: + // No action needed. + case string: + p.JiraIssueTransitionID = id + case float64: + p.JiraIssueTransitionID = strconv.Itoa(int(id)) + default: + return fmt.Errorf("failed to unmarshal JiraTransitionID of type: %T", id) + } + + return nil +} + +// GetJiraService gets Jira service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-jira-service-settings +func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOptionFunc) (*JiraService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(JiraService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetJiraServiceOptions represents the available SetJiraService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service +type SetJiraServiceOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty" ` + Password *string `url:"password,omitempty" json:"password,omitempty" ` + Active *bool `url:"active,omitempty" json:"active,omitempty"` + JiraAuthType *int `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` + JiraIssuePrefix *string `url:"jira_issue_prefix,omitempty" json:"jira_issue_prefix,omitempty"` + JiraIssueRegex *string `url:"jira_issue_regex,omitempty" json:"jira_issue_regex,omitempty"` + JiraIssueTransitionAutomatic *bool `url:"jira_issue_transition_automatic,omitempty" json:"jira_issue_transition_automatic,omitempty"` + JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` + CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` + IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` + ProjectKeys *[]string `url:"project_keys,comma,omitempty" json:"project_keys,omitempty" ` + UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` + + // Deprecated: This parameter was removed in GitLab 17.0 + ProjectKey *string `url:"project_key,omitempty" json:"project_key,omitempty" ` +} + +// SetJiraService sets Jira service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service +func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteJiraService deletes Jira service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-jira-service +func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// MattermostService represents Mattermost service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#mattermost-notifications +type MattermostService struct { + Service + Properties *MattermostServiceProperties `json:"properties"` +} + +// MattermostServiceProperties represents Mattermost specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#mattermost-notifications +type MattermostServiceProperties struct { + WebHook string `json:"webhook"` + Username string `json:"username"` + Channel string `json:"channel"` + NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + ConfidentialIssueChannel string `json:"confidential_issue_channel"` + ConfidentialNoteChannel string `json:"confidential_note_channel"` + IssueChannel string `json:"issue_channel"` + MergeRequestChannel string `json:"merge_request_channel"` + NoteChannel string `json:"note_channel"` + TagPushChannel string `json:"tag_push_channel"` + PipelineChannel string `json:"pipeline_channel"` + PushChannel string `json:"push_channel"` + VulnerabilityChannel string `json:"vulnerability_channel"` + WikiPageChannel string `json:"wiki_page_channel"` +} + +// GetMattermostService gets Mattermost service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-slack-service-settings +func (s *ServicesService) GetMattermostService(pid interface{}, options ...RequestOptionFunc) (*MattermostService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(MattermostService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetMattermostServiceOptions represents the available SetMattermostService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service +type SetMattermostServiceOptions struct { + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` +} + +// MattermostSlashCommandsService represents Mattermost slash commands settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#mattermost-slash-commands +type MattermostSlashCommandsService struct { + Service + Properties *MattermostSlashCommandsProperties `json:"properties"` +} + +// MattermostSlashCommandsProperties represents Mattermost slash commands specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#mattermost-slash-commands +type MattermostSlashCommandsProperties struct { + Token string `json:"token"` + Username string `json:"username,omitempty"` +} + +// GetMattermostSlashCommandsService gets Slack Mattermost commands service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-mattermost-slash-command-integration-settings +func (s *ServicesService) GetMattermostSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*MattermostSlashCommandsService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(MattermostSlashCommandsService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetMattermostSlashCommandsServiceOptions represents the available SetSlackSlashCommandsService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-mattermost-slash-command-integration-settings +type SetMattermostSlashCommandsServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` +} + +// SetMattermostSlashCommandsService sets Mattermost slash commands service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-slash-command-integration +func (s *ServicesService) SetMattermostSlashCommandsService(pid interface{}, opt *SetMattermostSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteMattermostSlashCommandsService deletes Mattermost slash commands service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-mattermost-slash-command-integration +func (s *ServicesService) DeleteMattermostSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SetMattermostService sets Mattermost service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service +func (s *ServicesService) SetMattermostService(pid interface{}, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteMattermostService deletes Mattermost service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-mattermost-notifications-service +func (s *ServicesService) DeleteMattermostService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// MicrosoftTeamsService represents Microsoft Teams service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#microsoft-teams +type MicrosoftTeamsService struct { + Service + Properties *MicrosoftTeamsServiceProperties `json:"properties"` +} + +// MicrosoftTeamsServiceProperties represents Microsoft Teams specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#microsoft-teams +type MicrosoftTeamsServiceProperties struct { + WebHook string `json:"webhook"` + NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + IssuesEvents BoolValue `json:"issues_events"` + ConfidentialIssuesEvents BoolValue `json:"confidential_issues_events"` + MergeRequestsEvents BoolValue `json:"merge_requests_events"` + TagPushEvents BoolValue `json:"tag_push_events"` + NoteEvents BoolValue `json:"note_events"` + ConfidentialNoteEvents BoolValue `json:"confidential_note_events"` + PipelineEvents BoolValue `json:"pipeline_events"` + WikiPageEvents BoolValue `json:"wiki_page_events"` +} + +// GetMicrosoftTeamsService gets MicrosoftTeams service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-microsoft-teams-service-settings +func (s *ServicesService) GetMicrosoftTeamsService(pid interface{}, options ...RequestOptionFunc) (*MicrosoftTeamsService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(MicrosoftTeamsService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetMicrosoftTeamsServiceOptions represents the available SetMicrosoftTeamsService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service +type SetMicrosoftTeamsServiceOptions struct { + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` +} + +// SetMicrosoftTeamsService sets Microsoft Teams service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service +func (s *ServicesService) SetMicrosoftTeamsService(pid interface{}, opt *SetMicrosoftTeamsServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + return s.client.Do(req, nil) +} + +// DeleteMicrosoftTeamsService deletes Microsoft Teams service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-microsoft-teams-service +func (s *ServicesService) DeleteMicrosoftTeamsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PipelinesEmailService represents Pipelines Email service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails +type PipelinesEmailService struct { + Service + Properties *PipelinesEmailProperties `json:"properties"` +} + +// PipelinesEmailProperties represents PipelinesEmail specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails +type PipelinesEmailProperties struct { + Recipients string `json:"recipients"` + NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` + NotifyOnlyDefaultBranch BoolValue `json:"notify_only_default_branch"` + BranchesToBeNotified string `json:"branches_to_be_notified"` +} + +// GetPipelinesEmailService gets Pipelines Email service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-pipeline-emails-service-settings +func (s *ServicesService) GetPipelinesEmailService(pid interface{}, options ...RequestOptionFunc) (*PipelinesEmailService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(PipelinesEmailService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetPipelinesEmailServiceOptions represents the available +// SetPipelinesEmailService() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails +type SetPipelinesEmailServiceOptions struct { + Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` + AddPusher *bool `url:"add_pusher,omitempty" json:"add_pusher,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` +} + +// SetPipelinesEmailService sets Pipelines Email service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails +func (s *ServicesService) SetPipelinesEmailService(pid interface{}, opt *SetPipelinesEmailServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeletePipelinesEmailService deletes Pipelines Email service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-pipeline-emails-service +func (s *ServicesService) DeletePipelinesEmailService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// PrometheusService represents Prometheus service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#prometheus +type PrometheusService struct { + Service + Properties *PrometheusServiceProperties `json:"properties"` +} + +// PrometheusServiceProperties represents Prometheus specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#prometheus +type PrometheusServiceProperties struct { + APIURL string `json:"api_url"` + GoogleIAPAudienceClientID string `json:"google_iap_audience_client_id"` + GoogleIAPServiceAccountJSON string `json:"google_iap_service_account_json"` +} + +// GetPrometheusService gets Prometheus service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-prometheus-service-settings +func (s *ServicesService) GetPrometheusService(pid interface{}, options ...RequestOptionFunc) (*PrometheusService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(PrometheusService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetPrometheusServiceOptions represents the available SetPrometheusService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service +type SetPrometheusServiceOptions struct { + APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` + GoogleIAPAudienceClientID *string `url:"google_iap_audience_client_id,omitempty" json:"google_iap_audience_client_id,omitempty"` + GoogleIAPServiceAccountJSON *string `url:"google_iap_service_account_json,omitempty" json:"google_iap_service_account_json,omitempty"` +} + +// SetPrometheusService sets Prometheus service for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service +func (s *ServicesService) SetPrometheusService(pid interface{}, opt *SetPrometheusServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeletePrometheusService deletes Prometheus service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-prometheus-service +func (s *ServicesService) DeletePrometheusService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SlackService represents Slack service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#slack +type SlackService struct { + Service + Properties *SlackServiceProperties `json:"properties"` +} + +// SlackServiceProperties represents Slack specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#slack +type SlackServiceProperties struct { + WebHook string `json:"webhook"` + Username string `json:"username"` + Channel string `json:"channel"` + NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` + NotifyOnlyDefaultBranch BoolValue `json:"notify_only_default_branch"` + BranchesToBeNotified string `json:"branches_to_be_notified"` + AlertChannel string `json:"alert_channel"` + ConfidentialIssueChannel string `json:"confidential_issue_channel"` + ConfidentialNoteChannel string `json:"confidential_note_channel"` + DeploymentChannel string `json:"deployment_channel"` + IssueChannel string `json:"issue_channel"` + MergeRequestChannel string `json:"merge_request_channel"` + NoteChannel string `json:"note_channel"` + TagPushChannel string `json:"tag_push_channel"` + PipelineChannel string `json:"pipeline_channel"` + PushChannel string `json:"push_channel"` + VulnerabilityChannel string `json:"vulnerability_channel"` + WikiPageChannel string `json:"wiki_page_channel"` +} + +// GetSlackService gets Slack service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-slack-service-settings +func (s *ServicesService) GetSlackService(pid interface{}, options ...RequestOptionFunc) (*SlackService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(SlackService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetSlackServiceOptions represents the available SetSlackService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service +type SetSlackServiceOptions struct { + WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + Channel *string `url:"channel,omitempty" json:"channel,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + AlertChannel *string `url:"alert_channel,omitempty" json:"alert_channel,omitempty"` + AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` + ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` + DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` + IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` +} + +// SetSlackService sets Slack service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service +func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteSlackService deletes Slack service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-slack-service +func (s *ServicesService) DeleteSlackService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// SlackSlashCommandsService represents Slack slash commands settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#slack-slash-commands +type SlackSlashCommandsService struct { + Service + Properties *SlackSlashCommandsProperties `json:"properties"` +} + +// SlackSlashCommandsProperties represents Slack slash commands specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#slack-slash-commands +type SlackSlashCommandsProperties struct { + Token string `json:"token"` +} + +// GetSlackSlashCommandsService gets Slack slash commands service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-slack-slash-command-integration-settings +func (s *ServicesService) GetSlackSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*SlackSlashCommandsService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(SlackSlashCommandsService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetSlackSlashCommandsServiceOptions represents the available SetSlackSlashCommandsService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-slack-slash-command-service +type SetSlackSlashCommandsServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` +} + +// SetSlackSlashCommandsService sets Slack slash commands service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/13.12/ee/api/integrations.html#createedit-slack-slash-command-service +func (s *ServicesService) SetSlackSlashCommandsService(pid interface{}, opt *SetSlackSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteSlackSlashCommandsService deletes Slack slash commands service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/13.12/ee/api/integrations.html#delete-slack-slash-command-service +func (s *ServicesService) DeleteSlackSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// TelegramService represents Telegram service settings. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#telegram +type TelegramService struct { + Service + Properties *TelegramServiceProperties `json:"properties"` +} + +// TelegramServiceProperties represents Telegram specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram +type TelegramServiceProperties struct { + Room string `json:"room"` + NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` + BranchesToBeNotified string `json:"branches_to_be_notified"` +} + +// GetTelegramService gets MicrosoftTeams service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-telegram-settings +func (s *ServicesService) GetTelegramService(pid interface{}, options ...RequestOptionFunc) (*TelegramService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(TelegramService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetTelegramServiceOptions represents the available SetTelegramService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram +type SetTelegramServiceOptions struct { + Token *string `url:"token,omitempty" json:"token,omitempty"` + Room *string `url:"room,omitempty" json:"room,omitempty"` + NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` + BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` + ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` + ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` + PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` + WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` +} + +// SetTelegramService sets Telegram service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram +func (s *ServicesService) SetTelegramService(pid interface{}, opt *SetTelegramServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteTelegramService deletes Telegram service for project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#disable-telegram +func (s *ServicesService) DeleteTelegramService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// YouTrackService represents YouTrack service settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#youtrack +type YouTrackService struct { + Service + Properties *YouTrackServiceProperties `json:"properties"` +} + +// YouTrackServiceProperties represents YouTrack specific properties. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#youtrack +type YouTrackServiceProperties struct { + IssuesURL string `json:"issues_url"` + ProjectURL string `json:"project_url"` + Description string `json:"description"` + PushEvents bool `json:"push_events"` +} + +// GetYouTrackService gets YouTrack service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#get-youtrack-service-settings +func (s *ServicesService) GetYouTrackService(pid interface{}, options ...RequestOptionFunc) (*YouTrackService, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + svc := new(YouTrackService) + resp, err := s.client.Do(req, svc) + if err != nil { + return nil, resp, err + } + + return svc, resp, nil +} + +// SetYouTrackServiceOptions represents the available SetYouTrackService() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service +type SetYouTrackServiceOptions struct { + IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` + ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` +} + +// SetYouTrackService sets YouTrack service for a project +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service +func (s *ServicesService) SetYouTrackService(pid interface{}, opt *SetYouTrackServiceOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteYouTrackService deletes YouTrack service settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/integrations.html#delete-youtrack-service +func (s *ServicesService) DeleteYouTrackService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go new file mode 100644 index 0000000000..f4d67a4f04 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/settings.go @@ -0,0 +1,965 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "net/http" + "time" +) + +// SettingsService handles communication with the application SettingsService +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html +type SettingsService struct { + client *Client +} + +// Settings represents the GitLab application settings. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html +// +// The available parameters have been modeled directly after the code, as the +// documentation seems to be inaccurate. +// +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/settings.rb +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/entities/application_setting.rb#L5 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/app/helpers/application_settings_helper.rb#L192 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 +type Settings struct { + ID int `json:"id"` + AbuseNotificationEmail string `json:"abuse_notification_email"` + AdminMode bool `json:"admin_mode"` + AfterSignOutPath string `json:"after_sign_out_path"` + AfterSignUpText string `json:"after_sign_up_text"` + AkismetAPIKey string `json:"akismet_api_key"` + AkismetEnabled bool `json:"akismet_enabled"` + AllowAccountDeletion bool `json:"allow_account_deletion"` + AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` + AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` + AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` + AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` + AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` + ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` + ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` + AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` + AssetProxyEnabled bool `json:"asset_proxy_enabled"` + AssetProxyURL string `json:"asset_proxy_url"` + AssetProxySecretKey string `json:"asset_proxy_secret_key"` + AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` + AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` + AutoDevOpsDomain string `json:"auto_devops_domain"` + AutoDevOpsEnabled bool `json:"auto_devops_enabled"` + AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` + BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` + BulkImportEnabled bool `json:"bulk_import_enabled"` + BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` + CanCreateGroup bool `json:"can_create_group"` + CheckNamespacePlan bool `json:"check_namespace_plan"` + CIMaxIncludes int `json:"ci_max_includes"` + CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` + CommitEmailHostname string `json:"commit_email_hostname"` + ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` + ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` + ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` + ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` + ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` + ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` + ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` + ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` + ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` + ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` + ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` + ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` + ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` + ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` + ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` + CreatedAt *time.Time `json:"created_at"` + CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` + DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` + DSAKeyRestriction int `json:"dsa_key_restriction"` + DeactivateDormantUsers bool `json:"deactivate_dormant_users"` + DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` + DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` + DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` + DefaultBranchName string `json:"default_branch_name"` + DefaultBranchProtection int `json:"default_branch_protection"` + DefaultBranchProtectionDefaults BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath string `json:"default_ci_config_path"` + DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` + DefaultPreferredLanguage string `json:"default_preferred_language"` + DefaultProjectCreation int `json:"default_project_creation"` + DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` + DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` + DefaultProjectsLimit int `json:"default_projects_limit"` + DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` + DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` + DelayedGroupDeletion bool `json:"delayed_group_deletion"` + DelayedProjectDeletion bool `json:"delayed_project_deletion"` + DeleteInactiveProjects bool `json:"delete_inactive_projects"` + DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` + DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` + DiagramsnetEnabled bool `json:"diagramsnet_enabled"` + DiagramsnetURL string `json:"diagramsnet_url"` + DiffMaxFiles int `json:"diff_max_files"` + DiffMaxLines int `json:"diff_max_lines"` + DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` + DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` + DisableFeedToken bool `json:"disable_feed_token"` + DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` + DisablePersonalAccessTokens bool `json:"disable_personal_access_tokens"` + DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` + DomainAllowlist []string `json:"domain_allowlist"` + DomainDenylist []string `json:"domain_denylist"` + DomainDenylistEnabled bool `json:"domain_denylist_enabled"` + DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` + DuoFeaturesEnabled bool `json:"duo_features_enabled"` + ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` + ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` + EKSAccessKeyID string `json:"eks_access_key_id"` + EKSAccountID string `json:"eks_account_id"` + EKSIntegrationEnabled bool `json:"eks_integration_enabled"` + EKSSecretAccessKey string `json:"eks_secret_access_key"` + Ed25519KeyRestriction int `json:"ed25519_key_restriction"` + Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` + ElasticsearchAWS bool `json:"elasticsearch_aws"` + ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` + ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` + ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` + ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` + ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` + ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` + ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` + ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` + ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` + ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` + ElasticsearchIndexing bool `json:"elasticsearch_indexing"` + ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` + ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` + ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` + ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` + ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` + ElasticsearchPassword string `json:"elasticsearch_password"` + ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` + ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` + ElasticsearchReplicas int `json:"elasticsearch_replicas"` + ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` + ElasticsearchSearch bool `json:"elasticsearch_search"` + ElasticsearchShards int `json:"elasticsearch_shards"` + ElasticsearchURL []string `json:"elasticsearch_url"` + ElasticsearchUsername string `json:"elasticsearch_username"` + ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` + EmailAdditionalText string `json:"email_additional_text"` + EmailAuthorInBody bool `json:"email_author_in_body"` + EmailConfirmationSetting string `json:"email_confirmation_setting"` + EmailRestrictions string `json:"email_restrictions"` + EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` + EnableArtifactExternalRedirectWarningPage bool `json:"enable_artifact_external_redirect_warning_page"` + EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` + EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` + EnforcePATExpiration bool `json:"enforce_pat_expiration"` + EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` + EnforceTerms bool `json:"enforce_terms"` + ExternalAuthClientCert string `json:"external_auth_client_cert"` + ExternalAuthClientKey string `json:"external_auth_client_key"` + ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` + ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` + ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` + ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` + ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` + ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` + ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` + ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` + FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` + FileTemplateProjectID int `json:"file_template_project_id"` + FirstDayOfWeek int `json:"first_day_of_week"` + FlocEnabled bool `json:"floc_enabled"` + GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` + GeoStatusTimeout int `json:"geo_status_timeout"` + GitRateLimitUsersAlertlist []string `json:"git_rate_limit_users_alertlist"` + GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` + GitalyTimeoutDefault int `json:"gitaly_timeout_default"` + GitalyTimeoutFast int `json:"gitaly_timeout_fast"` + GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` + GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` + GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` + GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` + GitpodEnabled bool `json:"gitpod_enabled"` + GitpodURL string `json:"gitpod_url"` + GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` + GloballyAllowedIPs string `json:"globally_allowed_ips"` + GrafanaEnabled bool `json:"grafana_enabled"` + GrafanaURL string `json:"grafana_url"` + GravatarEnabled bool `json:"gravatar_enabled"` + GroupDownloadExportLimit int `json:"group_download_export_limit"` + GroupExportLimit int `json:"group_export_limit"` + GroupImportLimit int `json:"group_import_limit"` + GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` + GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` + HTMLEmailsEnabled bool `json:"html_emails_enabled"` + HashedStorageEnabled bool `json:"hashed_storage_enabled"` + HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` + HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` + HelpPageSupportURL string `json:"help_page_support_url"` + HelpPageText string `json:"help_page_text"` + HelpText string `json:"help_text"` + HideThirdPartyOffers bool `json:"hide_third_party_offers"` + HomePageURL string `json:"home_page_url"` + HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` + HousekeepingEnabled bool `json:"housekeeping_enabled"` + HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` + HousekeepingGcPeriod int `json:"housekeeping_gc_period"` + HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` + HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` + ImportSources []string `json:"import_sources"` + InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` + InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` + InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` + IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` + InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` + InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` + IssuesCreateLimit int `json:"issues_create_limit"` + JiraConnectApplicationKey string `json:"jira_connect_application_key"` + JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` + JiraConnectProxyURL string `json:"jira_connect_proxy_url"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + KrokiEnabled bool `json:"kroki_enabled"` + KrokiFormats map[string]bool `json:"kroki_formats"` + KrokiURL string `json:"kroki_url"` + LocalMarkdownVersion int `json:"local_markdown_version"` + LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` + LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` + LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` + MailgunEventsEnabled bool `json:"mailgun_events_enabled"` + MailgunSigningKey string `json:"mailgun_signing_key"` + MaintenanceMode bool `json:"maintenance_mode"` + MaintenanceModeMessage string `json:"maintenance_mode_message"` + MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` + MaxArtifactsSize int `json:"max_artifacts_size"` + MaxAttachmentSize int `json:"max_attachment_size"` + MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` + MaxExportSize int `json:"max_export_size"` + MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` + MaxImportSize int `json:"max_import_size"` + MaxLoginAttempts int `json:"max_login_attempts"` + MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` + MaxPagesSize int `json:"max_pages_size"` + MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` + MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` + MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` + MaxYAMLDepth int `json:"max_yaml_depth"` + MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` + MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` + MinimumPasswordLength int `json:"minimum_password_length"` + MirrorAvailable bool `json:"mirror_available"` + MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` + MirrorMaxCapacity int `json:"mirror_max_capacity"` + MirrorMaxDelay int `json:"mirror_max_delay"` + NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` + NotesCreateLimit int `json:"notes_create_limit"` + NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` + NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` + OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` + OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` + PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` + PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` + PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` + PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` + PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` + PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` + PasswordNumberRequired bool `json:"password_number_required"` + PasswordSymbolRequired bool `json:"password_symbol_required"` + PasswordUppercaseRequired bool `json:"password_uppercase_required"` + PasswordLowercaseRequired bool `json:"password_lowercase_required"` + PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` + PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` + PerformanceBarEnabled bool `json:"performance_bar_enabled"` + PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` + PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` + PlantumlEnabled bool `json:"plantuml_enabled"` + PlantumlURL string `json:"plantuml_url"` + PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` + PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` + PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` + ProjectDownloadExportLimit int `json:"project_download_export_limit"` + ProjectExportEnabled bool `json:"project_export_enabled"` + ProjectExportLimit int `json:"project_export_limit"` + ProjectImportLimit int `json:"project_import_limit"` + ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` + ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` + ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` + PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` + ProtectedCIVariables bool `json:"protected_ci_variables"` + PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` + PushEventActivitiesLimit int `json:"push_event_activities_limit"` + PushEventHooksLimit int `json:"push_event_hooks_limit"` + PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` + RSAKeyRestriction int `json:"rsa_key_restriction"` + RateLimitingResponseText string `json:"rate_limiting_response_text"` + RawBlobRequestLimit int `json:"raw_blob_request_limit"` + RecaptchaEnabled bool `json:"recaptcha_enabled"` + RecaptchaPrivateKey string `json:"recaptcha_private_key"` + RecaptchaSiteKey string `json:"recaptcha_site_key"` + ReceiveMaxInputSize int `json:"receive_max_input_size"` + ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` + RememberMeEnabled bool `json:"remember_me_enabled"` + RepositoryChecksEnabled bool `json:"repository_checks_enabled"` + RepositorySizeLimit int `json:"repository_size_limit"` + RepositoryStorages []string `json:"repository_storages"` + RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` + RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` + RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` + RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` + RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` + RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` + RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` + SearchRateLimit int `json:"search_rate_limit"` + SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` + SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` + SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` + SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` + SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` + SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` + SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` + SecurityTXTContent string `json:"security_txt_content"` + SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` + SentryClientsideDSN string `json:"sentry_clientside_dsn"` + SentryDSN string `json:"sentry_dsn"` + SentryEnabled bool `json:"sentry_enabled"` + SentryEnvironment string `json:"sentry_environment"` + ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` + SessionExpireDelay int `json:"session_expire_delay"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + SharedRunnersMinutes int `json:"shared_runners_minutes"` + SharedRunnersText string `json:"shared_runners_text"` + SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` + SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` + SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` + SignInText string `json:"sign_in_text"` + SignupEnabled bool `json:"signup_enabled"` + SilentAdminExportsEnabled bool `json:"silent_admin_exports_enabled"` + SilentModeEnabled bool `json:"silent_mode_enabled"` + SlackAppEnabled bool `json:"slack_app_enabled"` + SlackAppID string `json:"slack_app_id"` + SlackAppSecret string `json:"slack_app_secret"` + SlackAppSigningSecret string `json:"slack_app_signing_secret"` + SlackAppVerificationToken string `json:"slack_app_verification_token"` + SnippetSizeLimit int `json:"snippet_size_limit"` + SnowplowAppID string `json:"snowplow_app_id"` + SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` + SnowplowCookieDomain string `json:"snowplow_cookie_domain"` + SnowplowDatabaseCollectorHostname string `json:"snowplow_database_collector_hostname"` + SnowplowEnabled bool `json:"snowplow_enabled"` + SourcegraphEnabled bool `json:"sourcegraph_enabled"` + SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` + SourcegraphURL string `json:"sourcegraph_url"` + SpamCheckAPIKey string `json:"spam_check_api_key"` + SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` + SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` + StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` + StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` + SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` + TerminalMaxSessionTime int `json:"terminal_max_session_time"` + Terms string `json:"terms"` + ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` + ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` + ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` + ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` + ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` + ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` + ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` + ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` + ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` + ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` + ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` + ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` + ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` + ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` + ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` + ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` + ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` + ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` + ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` + ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` + ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` + ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` + ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` + ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` + ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` + ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` + ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` + TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` + UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` + UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` + UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` + UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` + UpdatedAt *time.Time `json:"updated_at"` + UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` + UsagePingEnabled bool `json:"usage_ping_enabled"` + UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` + UseClickhouseForAnalytics bool `json:"use_clickhouse_for_analytics"` + UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` + UserDefaultExternal bool `json:"user_default_external"` + UserDefaultInternalRegex string `json:"user_default_internal_regex"` + UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` + UserOauthApplications bool `json:"user_oauth_applications"` + UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` + UsersGetByIDLimit int `json:"users_get_by_id_limit"` + UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` + ValidRunnerRegistrars []string `json:"valid_runner_registrars"` + VersionCheckEnabled bool `json:"version_check_enabled"` + WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` + WhatsNewVariant string `json:"whats_new_variant"` + WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` + + // Deprecated: Use AbuseNotificationEmail instead. + AdminNotificationEmail string `json:"admin_notification_email"` + // Deprecated: Use AllowLocalRequestsFromWebHooksAndServices instead. + AllowLocalRequestsFromHooksAndServices bool `json:"allow_local_requests_from_hooks_and_services"` + // Deprecated: Use AssetProxyAllowlist instead. + AssetProxyWhitelist []string `json:"asset_proxy_whitelist"` + // Deprecated: Use ThrottleUnauthenticatedWebEnabled or ThrottleUnauthenticatedAPIEnabled instead. (Deprecated in GitLab 14.3) + ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` + // Deprecated: Use ThrottleUnauthenticatedWebPeriodInSeconds or ThrottleUnauthenticatedAPIPeriodInSeconds instead. (Deprecated in GitLab 14.3) + ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` + // Deprecated: Use ThrottleUnauthenticatedWebRequestsPerPeriod or ThrottleUnauthenticatedAPIRequestsPerPeriod instead. (Deprecated in GitLab 14.3) + ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` + // Deprecated: Replaced by SearchRateLimit in GitLab 14.9 (removed in 15.0). + UserEmailLookupLimit int `json:"user_email_lookup_limit"` +} + +// BranchProtectionDefaults represents default Git protected branch permissions. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaults struct { + AllowedToPush []int `json:"allowed_to_push,omitempty"` + AllowForcePush bool `json:"allow_force_push,omitempty"` + AllowedToMerge []int `json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` +} + +// Settings requires a custom unmarshaller in order to properly unmarshal +// `container_registry_import_created_before` which is either a time.Time or +// an empty string if no value is set. +func (s *Settings) UnmarshalJSON(data []byte) error { + type Alias Settings + + raw := make(map[string]interface{}) + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + + // If empty string, remove the value to leave it nil in the response. + if v, ok := raw["container_registry_import_created_before"]; ok && v == "" { + delete(raw, "container_registry_import_created_before") + + data, err = json.Marshal(raw) + if err != nil { + return err + } + } + + return json.Unmarshal(data, (*Alias)(s)) +} + +func (s Settings) String() string { + return Stringify(s) +} + +// GetSettings gets the current application settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/settings.html#get-current-application-settings +func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "application/settings", nil, options) + if err != nil { + return nil, nil, err + } + + as := new(Settings) + resp, err := s.client.Do(req, as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} + +// UpdateSettingsOptions represents the available UpdateSettings() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/settings.html#change-application-settings +type UpdateSettingsOptions struct { + AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` + AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` + AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` + AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` + AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` + AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` + AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` + AllowAccountDeletion *bool `url:"allow_account_deletion,omitempty" json:"allow_account_deletion,omitempty"` + AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` + AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` + AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` + AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` + AllowProjectCreationForGuestAndBelow *bool `url:"allow_project_creation_for_guest_and_below,omitempty" json:"allow_project_creation_for_guest_and_below,omitempty"` + AllowRunnerRegistrationToken *bool `url:"allow_runner_registration_token,omitempty" json:"allow_runner_registration_token,omitempty"` + ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` + ASCIIDocMaxIncludes *int `url:"asciidoc_max_includes,omitempty" json:"asciidoc_max_includes,omitempty"` + AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` + AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` + AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` + AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` + AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` + AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` + AutoBanUserOnExcessiveProjectsDownload *bool `url:"auto_ban_user_on_excessive_projects_download,omitempty" json:"auto_ban_user_on_excessive_projects_download,omitempty"` + AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` + AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` + BulkImportConcurrentPipelineBatchLimit *int `url:"bulk_import_concurrent_pipeline_batch_limit,omitempty" json:"bulk_import_concurrent_pipeline_batch_limit,omitempty"` + BulkImportEnabled *bool `url:"bulk_import_enabled,omitempty" json:"bulk_import_enabled,omitempty"` + BulkImportMaxDownloadFileSize *int `url:"bulk_import_max_download_file_size,omitempty" json:"bulk_import_max_download_file_size,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` + CIMaxIncludes *int `url:"ci_max_includes,omitempty" json:"ci_max_includes,omitempty"` + CIMaxTotalYAMLSizeBytes *int `url:"ci_max_total_yaml_size_bytes,omitempty" json:"ci_max_total_yaml_size_bytes,omitempty"` + CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` + ConcurrentBitbucketImportJobsLimit *int `url:"concurrent_bitbucket_import_jobs_limit,omitempty" json:"concurrent_bitbucket_import_jobs_limit,omitempty"` + ConcurrentBitbucketServerImportJobsLimit *int `url:"concurrent_bitbucket_server_import_jobs_limit,omitempty" json:"concurrent_bitbucket_server_import_jobs_limit,omitempty"` + ConcurrentGitHubImportJobsLimit *int `url:"concurrent_github_import_jobs_limit,omitempty" json:"concurrent_github_import_jobs_limit,omitempty"` + ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` + ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` + ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` + ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` + ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` + ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` + ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` + ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` + ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` + ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` + ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` + ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` + CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` + DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` + DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` + DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` + DeactivateDormantUsersPeriod *int `url:"deactivate_dormant_users_period,omitempty" json:"deactivate_dormant_users_period,omitempty"` + DecompressArchiveFileTimeout *int `url:"decompress_archive_file_timeout,omitempty" json:"decompress_archive_file_timeout,omitempty"` + DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` + DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultBranchProtectionDefaults *BranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` + DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` + DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` + DefaultPreferredLanguage *string `url:"default_preferred_language,omitempty" json:"default_preferred_language,omitempty"` + DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` + DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` + DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` + DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` + DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` + DefaultSyntaxHighlightingTheme *int `url:"default_syntax_highlighting_theme,omitempty" json:"default_syntax_highlighting_theme,omitempty"` + DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` + DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` + DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` + DeleteUnconfirmedUsers *bool `url:"delete_unconfirmed_users,omitempty" json:"delete_unconfirmed_users,omitempty"` + DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` + DiagramsnetEnabled *bool `url:"diagramsnet_enabled,omitempty" json:"diagramsnet_enabled,omitempty"` + DiagramsnetURL *string `url:"diagramsnet_url,omitempty" json:"diagramsnet_url,omitempty"` + DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` + DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` + DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` + DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` + DisableAdminOAuthScopes *bool `url:"disable_admin_oauth_scopes,omitempty" json:"disable_admin_oauth_scopes,omitempty"` + DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` + DisablePersonalAccessTokens *bool `url:"disable_personal_access_tokens,omitempty" json:"disable_personal_access_tokens,omitempty"` + DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` + DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` + DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` + DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` + DownstreamPipelineTriggerLimitPerProjectUserSHA *int `url:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty" json:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty"` + DuoFeaturesEnabled *bool `url:"duo_features_enabled,omitempty" json:"duo_features_enabled,omitempty"` + ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` + ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` + EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` + EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` + EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` + EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` + Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` + Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` + ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` + ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` + ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` + ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` + ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` + ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` + ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` + ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` + ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` + ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` + ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` + ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` + ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` + ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` + ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` + ElasticsearchMaxCodeIndexingConcurrency *int `url:"elasticsearch_max_code_indexing_concurrency,omitempty" json:"elasticsearch_max_code_indexing_concurrency,omitempty"` + ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` + ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` + ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` + ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` + ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` + ElasticsearchRequeueWorkers *bool `url:"elasticsearch_requeue_workers,omitempty" json:"elasticsearch_requeue_workers,omitempty"` + ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` + ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` + ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` + ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` + ElasticsearchWorkerNumberOfShards *int `url:"elasticsearch_worker_number_of_shards,omitempty" json:"elasticsearch_worker_number_of_shards,omitempty"` + EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` + EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` + EmailConfirmationSetting *string `url:"email_confirmation_setting,omitempty" json:"email_confirmation_setting,omitempty"` + EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` + EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` + EnableArtifactExternalRedirectWarningPage *bool `url:"enable_artifact_external_redirect_warning_page,omitempty" json:"enable_artifact_external_redirect_warning_page,omitempty"` + EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` + EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` + EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` + EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` + EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` + ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` + ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` + ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` + ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` + ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` + ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` + ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` + ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` + ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` + ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` + FailedLoginAttemptsUnlockPeriodInMinutes *int `url:"failed_login_attempts_unlock_period_in_minutes,omitempty" json:"failed_login_attempts_unlock_period_in_minutes,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` + FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` + GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` + GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` + GitRateLimitUsersAlertlist *[]string `url:"git_rate_limit_users_alertlist,omitempty" json:"git_rate_limit_users_alertlist,omitempty"` + GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` + GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` + GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` + GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` + GitlabDedicatedInstance *bool `url:"gitlab_dedicated_instance,omitempty" json:"gitlab_dedicated_instance,omitempty"` + GitlabEnvironmentToolkitInstance *bool `url:"gitlab_environment_toolkit_instance,omitempty" json:"gitlab_environment_toolkit_instance,omitempty"` + GitlabShellOperationLimit *int `url:"gitlab_shell_operation_limit,omitempty" json:"gitlab_shell_operation_limit,omitempty"` + GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` + GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` + GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` + GloballyAllowedIPs *string `url:"globally_allowed_ips,omitempty" json:"globally_allowed_ips,omitempty"` + GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` + GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` + GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` + GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` + GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` + GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` + GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` + GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` + HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` + HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` + HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` + HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` + HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` + HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` + HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` + HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` + HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` + HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` + HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` + HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` + HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` + HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` + HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` + ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` + InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` + InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` + InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` + IncludeOptionalMetricsInServicePing *bool `url:"include_optional_metrics_in_service_ping,omitempty" json:"include_optional_metrics_in_service_ping,omitempty"` + InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` + InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` + IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` + JiraConnectApplicationKey *string `url:"jira_connect_application_key,omitempty" json:"jira_connect_application_key,omitempty"` + JiraConnectPublicKeyStorageEnabled *bool `url:"jira_connect_public_key_storage_enabled,omitempty" json:"jira_connect_public_key_storage_enabled,omitempty"` + JiraConnectProxyURL *string `url:"jira_connect_proxy_url,omitempty" json:"jira_connect_proxy_url,omitempty"` + KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` + KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` + KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` + KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` + LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` + LockDuoFeaturesEnabled *bool `url:"lock_duo_features_enabled,omitempty" json:"lock_duo_features_enabled,omitempty"` + LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` + LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` + MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` + MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` + MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` + MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` + MavenPackageRequestsForwarding *bool `url:"maven_package_requests_forwarding,omitempty" json:"maven_package_requests_forwarding,omitempty"` + MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` + MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` + MaxDecompressedArchiveSize *int `url:"max_decompressed_archive_size,omitempty" json:"max_decompressed_archive_size,omitempty"` + MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` + MaxImportRemoteFileSize *int `url:"max_import_remote_file_size,omitempty" json:"max_import_remote_file_size,omitempty"` + MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` + MaxLoginAttempts *int `url:"max_login_attempts,omitempty" json:"max_login_attempts,omitempty"` + MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` + MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` + MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` + MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` + MaxTerraformStateSizeBytes *int `url:"max_terraform_state_size_bytes,omitempty" json:"max_terraform_state_size_bytes,omitempty"` + MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` + MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` + MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` + MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` + MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` + MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` + MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` + MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` + NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` + NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` + NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` + NugetSkipMetadataURLValidation *bool `url:"nuget_skip_metadata_url_validation,omitempty" json:"nuget_skip_metadata_url_validation,omitempty"` + OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` + OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` + PackageMetadataPURLTypes *[]int `url:"package_metadata_purl_types,omitempty" json:"package_metadata_purl_types,omitempty"` + PackageRegistryAllowAnyoneToPullOption *bool `url:"package_registry_allow_anyone_to_pull_option,omitempty" json:"package_registry_allow_anyone_to_pull_option,omitempty"` + PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` + PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` + PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` + PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` + PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` + PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` + PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` + PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` + PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` + PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` + PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` + PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` + PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` + PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` + PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` + PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` + PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` + PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` + ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` + ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` + ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` + ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` + ProjectJobsAPIRateLimit *int `url:"project_jobs_api_rate_limit,omitempty" json:"project_jobs_api_rate_limit,omitempty"` + ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` + ProjectsAPIRateLimitUnauthenticated *int `url:"projects_api_rate_limit_unauthenticated,omitempty" json:"projects_api_rate_limit_unauthenticated,omitempty"` + PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` + ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` + PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` + PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` + PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` + PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` + RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` + RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` + RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` + RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` + RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` + RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` + ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` + ReceptiveClusterAgentsEnabled *bool `url:"receptive_cluster_agents_enabled,omitempty" json:"receptive_cluster_agents_enabled,omitempty"` + RememberMeEnabled *bool `url:"remember_me_enabled,omitempty" json:"remember_me_enabled,omitempty"` + RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` + RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` + RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` + RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` + RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` + RequireAdminTwoFactorAuthentication *bool `url:"require_admin_two_factor_authentication,omitempty" json:"require_admin_two_factor_authentication,omitempty"` + RequirePersonalAccessTokenExpiry *bool `url:"require_personal_access_token_expiry,omitempty" json:"require_personal_access_token_expiry,omitempty"` + RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` + RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` + SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` + SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` + SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` + SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` + SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` + SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` + SecurityApprovalPoliciesLimit *int `url:"security_approval_policies_limit,omitempty" json:"security_approval_policies_limit,omitempty"` + SecurityPolicyGlobalGroupApproversEnabled *bool `url:"security_policy_global_group_approvers_enabled,omitempty" json:"security_policy_global_group_approvers_enabled,omitempty"` + SecurityTXTContent *string `url:"security_txt_content,omitempty" json:"security_txt_content,omitempty"` + SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` + SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` + SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` + SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` + SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` + ServiceAccessTokensExpirationEnforced *bool `url:"service_access_tokens_expiration_enforced,omitempty" json:"service_access_tokens_expiration_enforced,omitempty"` + SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` + SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` + SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` + SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` + SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` + SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` + SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` + SilentAdminExportsEnabled *bool `url:"silent_admin_exports_enabled,omitempty" json:"silent_admin_exports_enabled,omitempty"` + SilentModeEnabled *bool `url:"silent_mode_enabled,omitempty" json:"silent_mode_enabled,omitempty"` + SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` + SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` + SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` + SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` + SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` + SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` + SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` + SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` + SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` + SnowplowDatabaseCollectorHostname *string `url:"snowplow_database_collector_hostname,omitempty" json:"snowplow_database_collector_hostname,omitempty"` + SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` + SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` + SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` + SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` + SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` + SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` + SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` + StaticObjectsExternalStorageAuthToken *string `url:"static_objects_external_storage_auth_token,omitempty" json:"static_objects_external_storage_auth_token,omitempty"` + StaticObjectsExternalStorageURL *string `url:"static_objects_external_storage_url,omitempty" json:"static_objects_external_storage_url,omitempty"` + SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` + TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` + Terms *string `url:"terms,omitempty" json:"terms,omitempty"` + ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` + ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` + ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` + ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` + ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` + ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` + ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` + ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` + ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` + ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` + ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` + ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` + ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` + ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` + ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` + ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` + ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` + ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` + ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` + ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` + ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` + TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + UnconfirmedUsersDeleteAfterDays *int `url:"unconfirmed_users_delete_after_days,omitempty" json:"unconfirmed_users_delete_after_days,omitempty"` + UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` + UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` + UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` + UpdateRunnerVersionsEnabled *bool `url:"update_runner_versions_enabled,omitempty" json:"update_runner_versions_enabled,omitempty"` + UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` + UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` + UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` + UseClickhouseForAnalytics *bool `url:"use_clickhouse_for_analytics,omitempty" json:"use_clickhouse_for_analytics,omitempty"` + UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` + UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` + UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` + UserDefaultsToPrivateProfile *bool `url:"user_defaults_to_private_profile,omitempty" json:"user_defaults_to_private_profile,omitempty"` + UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` + UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` + UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` + UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` + UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` + ValidRunnerRegistrars *[]string `url:"valid_runner_registrars,omitempty" json:"valid_runner_registrars,omitempty"` + VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` + WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` + WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` + WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` +} + +// BranchProtectionDefaultsOptions represents default Git protected branch permissions options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults +type BranchProtectionDefaultsOptions struct { + AllowedToPush *[]int `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` + AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` + AllowedToMerge *[]int `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` + DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` +} + +// UpdateSettings updates the application settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/settings.html#change-application-settings +func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...RequestOptionFunc) (*Settings, *Response, error) { + req, err := s.client.NewRequest(http.MethodPut, "application/settings", opt, options) + if err != nil { + return nil, nil, err + } + + as := new(Settings) + resp, err := s.client.Do(req, as) + if err != nil { + return nil, resp, err + } + + return as, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go b/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go new file mode 100644 index 0000000000..d7a7834b66 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go @@ -0,0 +1,157 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "net/http" + "time" +) + +// SidekiqService handles communication with the sidekiq service +// +// GitLab API docs: https://docs.gitlab.com/ee/api/sidekiq_metrics.html +type SidekiqService struct { + client *Client +} + +// QueueMetrics represents the GitLab sidekiq queue metrics. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-queue-metrics +type QueueMetrics struct { + Queues map[string]struct { + Backlog int `json:"backlog"` + Latency int `json:"latency"` + } `json:"queues"` +} + +// GetQueueMetrics lists information about all the registered queues, +// their backlog and their latency. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-queue-metrics +func (s *SidekiqService) GetQueueMetrics(options ...RequestOptionFunc) (*QueueMetrics, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/queue_metrics", nil, options) + if err != nil { + return nil, nil, err + } + + q := new(QueueMetrics) + resp, err := s.client.Do(req, q) + if err != nil { + return nil, resp, err + } + + return q, resp, nil +} + +// ProcessMetrics represents the GitLab sidekiq process metrics. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-process-metrics +type ProcessMetrics struct { + Processes []struct { + Hostname string `json:"hostname"` + Pid int `json:"pid"` + Tag string `json:"tag"` + StartedAt *time.Time `json:"started_at"` + Queues []string `json:"queues"` + Labels []string `json:"labels"` + Concurrency int `json:"concurrency"` + Busy int `json:"busy"` + } `json:"processes"` +} + +// GetProcessMetrics lists information about all the Sidekiq workers registered +// to process your queues. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-process-metrics +func (s *SidekiqService) GetProcessMetrics(options ...RequestOptionFunc) (*ProcessMetrics, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/process_metrics", nil, options) + if err != nil { + return nil, nil, err + } + + p := new(ProcessMetrics) + resp, err := s.client.Do(req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} + +// JobStats represents the GitLab sidekiq job stats. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-job-statistics +type JobStats struct { + Jobs struct { + Processed int `json:"processed"` + Failed int `json:"failed"` + Enqueued int `json:"enqueued"` + } `json:"jobs"` +} + +// GetJobStats list information about the jobs that Sidekiq has performed. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-job-statistics +func (s *SidekiqService) GetJobStats(options ...RequestOptionFunc) (*JobStats, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/job_stats", nil, options) + if err != nil { + return nil, nil, err + } + + j := new(JobStats) + resp, err := s.client.Do(req, j) + if err != nil { + return nil, resp, err + } + + return j, resp, nil +} + +// CompoundMetrics represents the GitLab sidekiq compounded stats. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-a-compound-response-of-all-the-previously-mentioned-metrics +type CompoundMetrics struct { + QueueMetrics + ProcessMetrics + JobStats +} + +// GetCompoundMetrics lists all the currently available information about Sidekiq. +// Get a compound response of all the previously mentioned metrics +// +// GitLab API docs: https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-a-compound-response-of-all-the-previously-mentioned-metrics +func (s *SidekiqService) GetCompoundMetrics(options ...RequestOptionFunc) (*CompoundMetrics, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/compound_metrics", nil, options) + if err != nil { + return nil, nil, err + } + + c := new(CompoundMetrics) + resp, err := s.client.Do(req, c) + if err != nil { + return nil, resp, err + } + + return c, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go new file mode 100644 index 0000000000..00761ec2ed --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go @@ -0,0 +1,203 @@ +// +// Copyright 2023, Nick Westbury +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// SnippetRepositoryStorageMoveService handles communication with the +// snippets related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html +type SnippetRepositoryStorageMoveService struct { + client *Client +} + +// SnippetRepositoryStorageMove represents the status of a repository move. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html +type SnippetRepositoryStorageMove struct { + ID int `json:"id"` + CreatedAt *time.Time `json:"created_at"` + State string `json:"state"` + SourceStorageName string `json:"source_storage_name"` + DestinationStorageName string `json:"destination_storage_name"` + Snippet *RepositorySnippet `json:"snippet"` +} + +type RepositorySnippet struct { + ID int `json:"id"` + Title string `json:"title"` + Description string `json:"description"` + Visibility VisibilityValue `json:"visibility"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` + ProjectID int `json:"project_id"` + WebURL string `json:"web_url"` + RawURL string `json:"raw_url"` + SSHURLToRepo string `json:"ssh_url_to_repo"` + HTTPURLToRepo string `json:"http_url_to_repo"` +} + +// RetrieveAllSnippetStorageMovesOptions represents the available +// RetrieveAllStorageMoves() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet +type RetrieveAllSnippetStorageMovesOptions ListOptions + +// RetrieveAllStorageMoves retrieves all snippet repository storage moves +// accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet +func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "snippet_repository_storage_moves", opts, options) + if err != nil { + return nil, nil, err + } + + var ssms []*SnippetRepositoryStorageMove + resp, err := s.client.Do(req, &ssms) + if err != nil { + return nil, resp, err + } + + return ssms, resp, err +} + +// RetrieveAllStorageMovesForSnippet retrieves all repository storage moves for +// a single snippet accessible by the authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet +func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMovesForSnippet(snippet int, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var ssms []*SnippetRepositoryStorageMove + resp, err := s.client.Do(req, &ssms) + if err != nil { + return nil, resp, err + } + + return ssms, resp, err +} + +// GetStorageMove gets a single snippet repository storage move. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#get-a-single-snippet-repository-storage-move +func (s SnippetRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("snippet_repository_storage_moves/%d", repositoryStorage) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ssm := new(SnippetRepositoryStorageMove) + resp, err := s.client.Do(req, ssm) + if err != nil { + return nil, resp, err + } + + return ssm, resp, err +} + +// GetStorageMoveForSnippet gets a single repository storage move for a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-snippet +func (s SnippetRepositoryStorageMoveService) GetStorageMoveForSnippet(snippet int, repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("snippets/%d/repository_storage_moves/%d", snippet, repositoryStorage) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ssm := new(SnippetRepositoryStorageMove) + resp, err := s.client.Do(req, ssm) + if err != nil { + return nil, resp, err + } + + return ssm, resp, err +} + +// ScheduleStorageMoveForSnippetOptions represents the available +// ScheduleStorageMoveForSnippet() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-snippet +type ScheduleStorageMoveForSnippetOptions struct { + DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` +} + +// ScheduleStorageMoveForSnippet schedule a repository to be moved for a snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-snippet +func (s SnippetRepositoryStorageMoveService) ScheduleStorageMoveForSnippet(snippet int, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { + u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet) + + req, err := s.client.NewRequest(http.MethodPost, u, opts, options) + if err != nil { + return nil, nil, err + } + + ssm := new(SnippetRepositoryStorageMove) + resp, err := s.client.Do(req, ssm) + if err != nil { + return nil, resp, err + } + + return ssm, resp, err +} + +// ScheduleAllSnippetStorageMovesOptions represents the available +// ScheduleAllStorageMoves() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard +type ScheduleAllSnippetStorageMovesOptions struct { + SourceStorageName *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"` + DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` +} + +// ScheduleAllStorageMoves schedules all snippet repositories to be moved. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard +func (s SnippetRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllSnippetStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "snippet_repository_storage_moves", opts, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/snippets.go b/vendor/github.com/xanzy/go-gitlab/snippets.go new file mode 100644 index 0000000000..3cb482773c --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/snippets.go @@ -0,0 +1,314 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "net/http" + "time" +) + +// SnippetsService handles communication with the snippets +// related methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/snippets.html +type SnippetsService struct { + client *Client +} + +// Snippet represents a GitLab snippet. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/snippets.html +type Snippet struct { + ID int `json:"id"` + Title string `json:"title"` + FileName string `json:"file_name"` + Description string `json:"description"` + Visibility string `json:"visibility"` + Author struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + } `json:"author"` + UpdatedAt *time.Time `json:"updated_at"` + CreatedAt *time.Time `json:"created_at"` + ProjectID int `json:"project_id"` + WebURL string `json:"web_url"` + RawURL string `json:"raw_url"` + Files []struct { + Path string `json:"path"` + RawURL string `json:"raw_url"` + } `json:"files"` + RepositoryStorage string `json:"repository_storage"` +} + +func (s Snippet) String() string { + return Stringify(s) +} + +// ListSnippetsOptions represents the available ListSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets-for-a-user +type ListSnippetsOptions ListOptions + +// ListSnippets gets a list of snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets-for-a-user +func (s *SnippetsService) ListSnippets(opt *ListSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "snippets", opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Snippet + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// GetSnippet gets a single snippet +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#get-a-single-snippet +func (s *SnippetsService) GetSnippet(snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) { + u := fmt.Sprintf("snippets/%d", snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ps := new(Snippet) + resp, err := s.client.Do(req, ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// SnippetContent gets a single snippet’s raw contents. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#single-snippet-contents +func (s *SnippetsService) SnippetContent(snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) { + u := fmt.Sprintf("snippets/%d/raw", snippet) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} + +// SnippetFileContent returns the raw file content as plain text. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#snippet-repository-file-content +func (s *SnippetsService) SnippetFileContent(snippet int, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { + filepath := PathEscape(filename) + u := fmt.Sprintf("snippets/%d/files/%s/%s/raw", snippet, ref, filepath) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var b bytes.Buffer + resp, err := s.client.Do(req, &b) + if err != nil { + return nil, resp, err + } + + return b.Bytes(), resp, err +} + +// CreateSnippetFileOptions represents the create snippet file options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#create-new-snippet +type CreateSnippetFileOptions struct { + FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` +} + +// CreateSnippetOptions represents the available CreateSnippet() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#create-new-snippet +type CreateSnippetOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + Files *[]*CreateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` +} + +// CreateSnippet creates a new snippet. The user must have permission +// to create new snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#create-new-snippet +func (s *SnippetsService) CreateSnippet(opt *CreateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "snippets", opt, options) + if err != nil { + return nil, nil, err + } + + ps := new(Snippet) + resp, err := s.client.Do(req, ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// UpdateSnippetFileOptions represents the update snippet file options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#update-snippet +type UpdateSnippetFileOptions struct { + Action *string `url:"action,omitempty" json:"action,omitempty"` + FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + PreviousPath *string `url:"previous_path,omitempty" json:"previous_path,omitempty"` +} + +// UpdateSnippetOptions represents the available UpdateSnippet() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#update-snippet +type UpdateSnippetOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Content *string `url:"content,omitempty" json:"content,omitempty"` + Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` + Files *[]*UpdateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` +} + +// UpdateSnippet updates an existing snippet. The user must have +// permission to change an existing snippet. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#update-snippet +func (s *SnippetsService) UpdateSnippet(snippet int, opt *UpdateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { + u := fmt.Sprintf("snippets/%d", snippet) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + ps := new(Snippet) + resp, err := s.client.Do(req, ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// DeleteSnippet deletes an existing snippet. This is an idempotent +// function and deleting a non-existent snippet still returns a 200 OK status +// code. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#delete-snippet +func (s *SnippetsService) DeleteSnippet(snippet int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("snippets/%d", snippet) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ExploreSnippetsOptions represents the available ExploreSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#list-all-public-snippets +type ExploreSnippetsOptions ListOptions + +// ExploreSnippets gets the list of public snippets. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#list-all-public-snippets +func (s *SnippetsService) ExploreSnippets(opt *ExploreSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "snippets/public", opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Snippet + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} + +// ListAllSnippetsOptions represents the available ListAllSnippets() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets +type ListAllSnippetsOptions struct { + ListOptions + CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` + RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` +} + +// ListAllSnippets gets all snippets the current user has access to. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets +func (s *SnippetsService) ListAllSnippets(opt *ListAllSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "snippets/all", opt, options) + if err != nil { + return nil, nil, err + } + + var ps []*Snippet + resp, err := s.client.Do(req, &ps) + if err != nil { + return nil, resp, err + } + + return ps, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/strings.go b/vendor/github.com/xanzy/go-gitlab/strings.go new file mode 100644 index 0000000000..efbd10ffd5 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/strings.go @@ -0,0 +1,93 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "fmt" + "reflect" +) + +// Stringify attempts to create a reasonable string representation of types in +// the Gitlab library. It does things like resolve pointers to their values +// and omits struct fields with nil values. +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// stringifyValue was heavily inspired by the goprotobuf library. +func stringifyValue(buf *bytes.Buffer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + buf.WriteString("") + return + } + + v := reflect.Indirect(val) + + switch v.Kind() { + case reflect.String: + fmt.Fprintf(buf, `"%s"`, v) + case reflect.Slice: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(' ') + } + + stringifyValue(buf, v.Index(i)) + } + + buf.WriteByte(']') + return + case reflect.Struct: + if v.Type().Name() != "" { + buf.WriteString(v.Type().String()) + } + + buf.WriteByte('{') + + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + + if sep { + buf.WriteString(", ") + } else { + sep = true + } + + buf.WriteString(v.Type().Field(i).Name) + buf.WriteByte(':') + stringifyValue(buf, fv) + } + + buf.WriteByte('}') + default: + if v.CanInterface() { + fmt.Fprint(buf, v.Interface()) + } + } +} diff --git a/vendor/github.com/xanzy/go-gitlab/system_hooks.go b/vendor/github.com/xanzy/go-gitlab/system_hooks.go new file mode 100644 index 0000000000..1f151fa930 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/system_hooks.go @@ -0,0 +1,176 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// SystemHooksService handles communication with the system hooks related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/system_hooks.html +type SystemHooksService struct { + client *Client +} + +// Hook represents a GitLap system hook. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/system_hooks.html +type Hook struct { + ID int `json:"id"` + URL string `json:"url"` + CreatedAt *time.Time `json:"created_at"` + PushEvents bool `json:"push_events"` + TagPushEvents bool `json:"tag_push_events"` + MergeRequestsEvents bool `json:"merge_requests_events"` + RepositoryUpdateEvents bool `json:"repository_update_events"` + EnableSSLVerification bool `json:"enable_ssl_verification"` +} + +func (h Hook) String() string { + return Stringify(h) +} + +// ListHooks gets a list of system hooks. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/system_hooks.html#list-system-hooks +func (s *SystemHooksService) ListHooks(options ...RequestOptionFunc) ([]*Hook, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "hooks", nil, options) + if err != nil { + return nil, nil, err + } + + var h []*Hook + resp, err := s.client.Do(req, &h) + if err != nil { + return nil, resp, err + } + + return h, resp, nil +} + +// GetHook get a single system hook. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/system_hooks.html#get-system-hook +func (s *SystemHooksService) GetHook(hook int, options ...RequestOptionFunc) (*Hook, *Response, error) { + u := fmt.Sprintf("hooks/%d", hook) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var h *Hook + resp, err := s.client.Do(req, &h) + if err != nil { + return nil, resp, err + } + + return h, resp, nil +} + +// AddHookOptions represents the available AddHook() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/system_hooks.html#add-new-system-hook +type AddHookOptions struct { + URL *string `url:"url,omitempty" json:"url,omitempty"` + Token *string `url:"token,omitempty" json:"token,omitempty"` + PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` + TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` + MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` + RepositoryUpdateEvents *bool `url:"repository_update_events,omitempty" json:"repository_update_events,omitempty"` + EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` +} + +// AddHook adds a new system hook hook. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/system_hooks.html#add-new-system-hook +func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...RequestOptionFunc) (*Hook, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "hooks", opt, options) + if err != nil { + return nil, nil, err + } + + h := new(Hook) + resp, err := s.client.Do(req, h) + if err != nil { + return nil, resp, err + } + + return h, resp, nil +} + +// HookEvent represents an event trigger by a GitLab system hook. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/system_hooks.html +type HookEvent struct { + EventName string `json:"event_name"` + Name string `json:"name"` + Path string `json:"path"` + ProjectID int `json:"project_id"` + OwnerName string `json:"owner_name"` + OwnerEmail string `json:"owner_email"` +} + +func (h HookEvent) String() string { + return Stringify(h) +} + +// TestHook tests a system hook. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/system_hooks.html#test-system-hook +func (s *SystemHooksService) TestHook(hook int, options ...RequestOptionFunc) (*HookEvent, *Response, error) { + u := fmt.Sprintf("hooks/%d", hook) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + h := new(HookEvent) + resp, err := s.client.Do(req, h) + if err != nil { + return nil, resp, err + } + + return h, resp, nil +} + +// DeleteHook deletes a system hook. This is an idempotent API function and +// returns 200 OK even if the hook is not available. If the hook is deleted it +// is also returned as JSON. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/system_hooks.html#delete-system-hook +func (s *SystemHooksService) DeleteHook(hook int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("hooks/%d", hook) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/tags.go b/vendor/github.com/xanzy/go-gitlab/tags.go new file mode 100644 index 0000000000..18a710d185 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/tags.go @@ -0,0 +1,248 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// TagsService handles communication with the tags related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/tags.html +type TagsService struct { + client *Client +} + +// Tag represents a GitLab tag. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/tags.html +type Tag struct { + Commit *Commit `json:"commit"` + Release *ReleaseNote `json:"release"` + Name string `json:"name"` + Message string `json:"message"` + Protected bool `json:"protected"` + Target string `json:"target"` +} + +// ReleaseNote represents a GitLab version release. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/tags.html +type ReleaseNote struct { + TagName string `json:"tag_name"` + Description string `json:"description"` +} + +func (t Tag) String() string { + return Stringify(t) +} + +// ListTagsOptions represents the available ListTags() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#list-project-repository-tags +type ListTagsOptions struct { + ListOptions + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` +} + +// ListTags gets a list of tags from a project, sorted by name in reverse +// alphabetical order. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#list-project-repository-tags +func (s *TagsService) ListTags(pid interface{}, opt *ListTagsOptions, options ...RequestOptionFunc) ([]*Tag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/tags", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var t []*Tag + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// GetTag a specific repository tag determined by its name. It returns 200 together +// with the tag information if the tag exists. It returns 404 if the tag does not exist. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#get-a-single-repository-tag +func (s *TagsService) GetTag(pid interface{}, tag string, options ...RequestOptionFunc) (*Tag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/tags/%s", PathEscape(project), url.PathEscape(tag)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var t *Tag + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// CreateTagOptions represents the available CreateTag() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#create-a-new-tag +type CreateTagOptions struct { + TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` + Message *string `url:"message,omitempty" json:"message,omitempty"` + + // Deprecated: Use the Releases API instead. (Deprecated in GitLab 11.7) + ReleaseDescription *string `url:"release_description:omitempty" json:"release_description,omitempty"` +} + +// CreateTag creates a new tag in the repository that points to the supplied ref. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#create-a-new-tag +func (s *TagsService) CreateTag(pid interface{}, opt *CreateTagOptions, options ...RequestOptionFunc) (*Tag, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/tags", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(Tag) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// DeleteTag deletes a tag of a repository with given name. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#delete-a-tag +func (s *TagsService) DeleteTag(pid interface{}, tag string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/repository/tags/%s", PathEscape(project), url.PathEscape(tag)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// CreateReleaseNoteOptions represents the available CreateReleaseNote() options. +// +// Deprecated: This feature was deprecated in GitLab 11.7. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#create-a-new-release +type CreateReleaseNoteOptions struct { + Description *string `url:"description:omitempty" json:"description,omitempty"` +} + +// CreateReleaseNote Add release notes to the existing git tag. +// If there already exists a release for the given tag, status code 409 is returned. +// +// Deprecated: This feature was deprecated in GitLab 11.7. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#create-a-new-release +func (s *TagsService) CreateReleaseNote(pid interface{}, tag string, opt *CreateReleaseNoteOptions, options ...RequestOptionFunc) (*ReleaseNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/tags/%s/release", PathEscape(project), url.PathEscape(tag)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + r := new(ReleaseNote) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// UpdateReleaseNoteOptions represents the available UpdateReleaseNote() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#update-a-release +type UpdateReleaseNoteOptions struct { + Description *string `url:"description:omitempty" json:"description,omitempty"` +} + +// UpdateReleaseNote Updates the release notes of a given release. +// +// Deprecated: This feature was deprecated in GitLab 11.7. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/tags.html#update-a-release +func (s *TagsService) UpdateReleaseNote(pid interface{}, tag string, opt *UpdateReleaseNoteOptions, options ...RequestOptionFunc) (*ReleaseNote, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/repository/tags/%s/release", PathEscape(project), url.PathEscape(tag)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + r := new(ReleaseNote) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/time_stats.go b/vendor/github.com/xanzy/go-gitlab/time_stats.go new file mode 100644 index 0000000000..0ce2d6751f --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/time_stats.go @@ -0,0 +1,180 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// timeStatsService handles communication with the time tracking related +// methods of the GitLab API. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +type timeStatsService struct { + client *Client +} + +// TimeStats represents the time estimates and time spent for an issue. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +type TimeStats struct { + HumanTimeEstimate string `json:"human_time_estimate"` + HumanTotalTimeSpent string `json:"human_total_time_spent"` + TimeEstimate int `json:"time_estimate"` + TotalTimeSpent int `json:"total_time_spent"` +} + +func (t TimeStats) String() string { + return Stringify(t) +} + +// SetTimeEstimateOptions represents the available SetTimeEstimate() +// options. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +type SetTimeEstimateOptions struct { + Duration *string `url:"duration,omitempty" json:"duration,omitempty"` +} + +// setTimeEstimate sets the time estimate for a single project issue. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +func (s *timeStatsService) setTimeEstimate(pid interface{}, entity string, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/time_estimate", PathEscape(project), entity, issue) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(TimeStats) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// resetTimeEstimate resets the time estimate for a single project issue. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +func (s *timeStatsService) resetTimeEstimate(pid interface{}, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/reset_time_estimate", PathEscape(project), entity, issue) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(TimeStats) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// AddSpentTimeOptions represents the available AddSpentTime() options. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +type AddSpentTimeOptions struct { + Duration *string `url:"duration,omitempty" json:"duration,omitempty"` + Summary *string `url:"summary,omitempty" json:"summary,omitempty"` +} + +// addSpentTime adds spent time for a single project issue. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +func (s *timeStatsService) addSpentTime(pid interface{}, entity string, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/add_spent_time", PathEscape(project), entity, issue) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(TimeStats) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// resetSpentTime resets the spent time for a single project issue. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +func (s *timeStatsService) resetSpentTime(pid interface{}, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/reset_spent_time", PathEscape(project), entity, issue) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(TimeStats) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// getTimeSpent gets the spent time for a single project issue. +// +// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html +func (s *timeStatsService) getTimeSpent(pid interface{}, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/%s/%d/time_stats", PathEscape(project), entity, issue) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(TimeStats) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/todos.go b/vendor/github.com/xanzy/go-gitlab/todos.go new file mode 100644 index 0000000000..2e26b70779 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/todos.go @@ -0,0 +1,163 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// TodosService handles communication with the todos related methods of +// the Gitlab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html +type TodosService struct { + client *Client +} + +// Todo represents a GitLab todo. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html +type Todo struct { + ID int `json:"id"` + Project *BasicProject `json:"project"` + Author *BasicUser `json:"author"` + ActionName TodoAction `json:"action_name"` + TargetType TodoTargetType `json:"target_type"` + Target *TodoTarget `json:"target"` + TargetURL string `json:"target_url"` + Body string `json:"body"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` +} + +func (t Todo) String() string { + return Stringify(t) +} + +// TodoTarget represents a todo target of type Issue or MergeRequest +type TodoTarget struct { + Assignees []*BasicUser `json:"assignees"` + Assignee *BasicUser `json:"assignee"` + Author *BasicUser `json:"author"` + CreatedAt *time.Time `json:"created_at"` + Description string `json:"description"` + Downvotes int `json:"downvotes"` + ID interface{} `json:"id"` + IID int `json:"iid"` + Labels []string `json:"labels"` + Milestone *Milestone `json:"milestone"` + ProjectID int `json:"project_id"` + State string `json:"state"` + Subscribed bool `json:"subscribed"` + TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` + Title string `json:"title"` + UpdatedAt *time.Time `json:"updated_at"` + Upvotes int `json:"upvotes"` + UserNotesCount int `json:"user_notes_count"` + WebURL string `json:"web_url"` + + // Only available for type Issue + Confidential bool `json:"confidential"` + DueDate string `json:"due_date"` + HasTasks bool `json:"has_tasks"` + Links *IssueLinks `json:"_links"` + MovedToID int `json:"moved_to_id"` + TimeStats *TimeStats `json:"time_stats"` + Weight int `json:"weight"` + + // Only available for type MergeRequest + MergedAt *time.Time `json:"merged_at"` + ApprovalsBeforeMerge int `json:"approvals_before_merge"` + ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` + MergeCommitSHA string `json:"merge_commit_sha"` + MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` + MergeStatus string `json:"merge_status"` + Reference string `json:"reference"` + Reviewers []*BasicUser `json:"reviewers"` + SHA string `json:"sha"` + ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int `json:"source_project_id"` + Squash bool `json:"squash"` + TargetBranch string `json:"target_branch"` + TargetProjectID int `json:"target_project_id"` + WorkInProgress bool `json:"work_in_progress"` + + // Only available for type DesignManagement::Design + FileName string `json:"filename"` + ImageURL string `json:"image_url"` +} + +// ListTodosOptions represents the available ListTodos() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html#get-a-list-of-to-do-items +type ListTodosOptions struct { + ListOptions + Action *TodoAction `url:"action,omitempty" json:"action,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Type *string `url:"type,omitempty" json:"type,omitempty"` +} + +// ListTodos lists all todos created by authenticated user. +// When no filter is applied, it returns all pending todos for the current user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/todos.html#get-a-list-of-to-do-items +func (s *TodosService) ListTodos(opt *ListTodosOptions, options ...RequestOptionFunc) ([]*Todo, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "todos", opt, options) + if err != nil { + return nil, nil, err + } + + var t []*Todo + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// MarkTodoAsDone marks a single pending todo given by its ID for the current user as done. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html#mark-a-to-do-item-as-done +func (s *TodosService) MarkTodoAsDone(id int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("todos/%d/mark_as_done", id) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// MarkAllTodosAsDone marks all pending todos for the current user as done. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html#mark-all-to-do-items-as-done +func (s *TodosService) MarkAllTodosAsDone(options ...RequestOptionFunc) (*Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "todos/mark_as_done", nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/topics.go b/vendor/github.com/xanzy/go-gitlab/topics.go new file mode 100644 index 0000000000..719589f589 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/topics.go @@ -0,0 +1,222 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// TopicsService handles communication with the topics related methods +// of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html +type TopicsService struct { + client *Client +} + +// Topic represents a GitLab project topic. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html +type Topic struct { + ID int `json:"id"` + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + TotalProjectsCount uint64 `json:"total_projects_count"` + AvatarURL string `json:"avatar_url"` +} + +func (t Topic) String() string { + return Stringify(t) +} + +// ListTopicsOptions represents the available ListTopics() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html#list-topics +type ListTopicsOptions struct { + ListOptions + Search *string `url:"search,omitempty" json:"search,omitempty"` +} + +// ListTopics returns a list of project topics in the GitLab instance ordered +// by number of associated projects. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html#list-topics +func (s *TopicsService) ListTopics(opt *ListTopicsOptions, options ...RequestOptionFunc) ([]*Topic, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "topics", opt, options) + if err != nil { + return nil, nil, err + } + + var t []*Topic + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// GetTopic gets a project topic by ID. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html#get-a-topic +func (s *TopicsService) GetTopic(topic int, options ...RequestOptionFunc) (*Topic, *Response, error) { + u := fmt.Sprintf("topics/%d", topic) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(Topic) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// CreateTopicOptions represents the available CreateTopic() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/topics.html#create-a-project-topic +type CreateTopicOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Avatar *TopicAvatar `url:"-" json:"-"` +} + +// TopicAvatar represents a GitLab topic avatar. +type TopicAvatar struct { + Filename string + Image io.Reader +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *TopicAvatar) MarshalJSON() ([]byte, error) { + if a.Filename == "" && a.Image == nil { + return []byte(`""`), nil + } + type alias TopicAvatar + return json.Marshal((*alias)(a)) +} + +// CreateTopic creates a new project topic. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/topics.html#create-a-project-topic +func (s *TopicsService) CreateTopic(opt *CreateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, "topics", opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + "topics", + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + t := new(Topic) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// UpdateTopicOptions represents the available UpdateTopic() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/topics.html#update-a-project-topic +type UpdateTopicOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Avatar *TopicAvatar `url:"-" json:"avatar,omitempty"` +} + +// UpdateTopic updates a project topic. Only available to administrators. +// +// To remove a topic avatar set the TopicAvatar.Filename to an empty string +// and set TopicAvatar.Image to nil. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/topics.html#update-a-project-topic +func (s *TopicsService) UpdateTopic(topic int, opt *UpdateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { + u := fmt.Sprintf("topics/%d", topic) + + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { + req, err = s.client.NewRequest(http.MethodPut, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPut, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + t := new(Topic) + resp, err := s.client.Do(req, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// DeleteTopic deletes a project topic. Only available to administrators. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/topics.html#delete-a-project-topic +func (s *TopicsService) DeleteTopic(topic int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("topics/%d", topic) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/types.go b/vendor/github.com/xanzy/go-gitlab/types.go new file mode 100644 index 0000000000..9ce13d735c --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/types.go @@ -0,0 +1,979 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// Ptr is a helper that returns a pointer to v. +func Ptr[T any](v T) *T { + return &v +} + +// AccessControlValue represents an access control value within GitLab, +// used for managing access to certain project features. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html +type AccessControlValue string + +// List of available access control values. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html +const ( + DisabledAccessControl AccessControlValue = "disabled" + EnabledAccessControl AccessControlValue = "enabled" + PrivateAccessControl AccessControlValue = "private" + PublicAccessControl AccessControlValue = "public" +) + +// AccessControl is a helper routine that allocates a new AccessControlValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func AccessControl(v AccessControlValue) *AccessControlValue { + return Ptr(v) +} + +// AccessLevelValue represents a permission level within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html +type AccessLevelValue int + +// List of available access levels. +// +// GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html +const ( + NoPermissions AccessLevelValue = 0 + MinimalAccessPermissions AccessLevelValue = 5 + GuestPermissions AccessLevelValue = 10 + ReporterPermissions AccessLevelValue = 20 + DeveloperPermissions AccessLevelValue = 30 + MaintainerPermissions AccessLevelValue = 40 + OwnerPermissions AccessLevelValue = 50 + AdminPermissions AccessLevelValue = 60 + + // Deprecated: Renamed to MaintainerPermissions in GitLab 11.0. + MasterPermissions AccessLevelValue = 40 + // Deprecated: Renamed to OwnerPermissions. + OwnerPermission AccessLevelValue = 50 +) + +// AccessLevel is a helper routine that allocates a new AccessLevelValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func AccessLevel(v AccessLevelValue) *AccessLevelValue { + return Ptr(v) +} + +// UserIDValue represents a user ID value within GitLab. +type UserIDValue string + +// List of available user ID values. +const ( + UserIDAny UserIDValue = "Any" + UserIDNone UserIDValue = "None" +) + +// ApproverIDsValue represents an approver ID value within GitLab. +type ApproverIDsValue struct { + value interface{} +} + +// ApproverIDs is a helper routine that creates a new ApproverIDsValue. +func ApproverIDs(v interface{}) *ApproverIDsValue { + switch v.(type) { + case UserIDValue, []int: + return &ApproverIDsValue{value: v} + default: + panic("Unsupported value passed as approver ID") + } +} + +// EncodeValues implements the query.Encoder interface. +func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { + switch value := a.value.(type) { + case UserIDValue: + v.Set(key, string(value)) + case []int: + v.Del(key) + v.Del(key + "[]") + for _, id := range value { + v.Add(key+"[]", strconv.Itoa(id)) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (a ApproverIDsValue) MarshalJSON() ([]byte, error) { + return json.Marshal(a.value) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (a *ApproverIDsValue) UnmarshalJSON(bytes []byte) error { + return json.Unmarshal(bytes, a.value) +} + +// AssigneeIDValue represents an assignee ID value within GitLab. +type AssigneeIDValue struct { + value interface{} +} + +// AssigneeID is a helper routine that creates a new AssigneeIDValue. +func AssigneeID(v interface{}) *AssigneeIDValue { + switch v.(type) { + case UserIDValue, int: + return &AssigneeIDValue{value: v} + default: + panic("Unsupported value passed as assignee ID") + } +} + +// EncodeValues implements the query.Encoder interface. +func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { + switch value := a.value.(type) { + case UserIDValue: + v.Set(key, string(value)) + case int: + v.Set(key, strconv.Itoa(value)) + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (a AssigneeIDValue) MarshalJSON() ([]byte, error) { + return json.Marshal(a.value) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (a *AssigneeIDValue) UnmarshalJSON(bytes []byte) error { + return json.Unmarshal(bytes, a.value) +} + +// ReviewerIDValue represents a reviewer ID value within GitLab. +type ReviewerIDValue struct { + value interface{} +} + +// ReviewerID is a helper routine that creates a new ReviewerIDValue. +func ReviewerID(v interface{}) *ReviewerIDValue { + switch v.(type) { + case UserIDValue, int: + return &ReviewerIDValue{value: v} + default: + panic("Unsupported value passed as reviewer ID") + } +} + +// EncodeValues implements the query.Encoder interface. +func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { + switch value := a.value.(type) { + case UserIDValue: + v.Set(key, string(value)) + case int: + v.Set(key, strconv.Itoa(value)) + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (a ReviewerIDValue) MarshalJSON() ([]byte, error) { + return json.Marshal(a.value) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (a *ReviewerIDValue) UnmarshalJSON(bytes []byte) error { + return json.Unmarshal(bytes, a.value) +} + +// AvailabilityValue represents an availability value within GitLab. +type AvailabilityValue string + +// List of available availability values. +// +// Undocummented, see code at: +// https://gitlab.com/gitlab-org/gitlab-foss/-/blob/master/app/models/user_status.rb#L22 +const ( + NotSet AvailabilityValue = "not_set" + Busy AvailabilityValue = "busy" +) + +// Availability is a helper routine that allocates a new AvailabilityValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func Availability(v AvailabilityValue) *AvailabilityValue { + return Ptr(v) +} + +// BuildStateValue represents a GitLab build state. +type BuildStateValue string + +// These constants represent all valid build states. +const ( + Created BuildStateValue = "created" + WaitingForResource BuildStateValue = "waiting_for_resource" + Preparing BuildStateValue = "preparing" + Pending BuildStateValue = "pending" + Running BuildStateValue = "running" + Success BuildStateValue = "success" + Failed BuildStateValue = "failed" + Canceled BuildStateValue = "canceled" + Skipped BuildStateValue = "skipped" + Manual BuildStateValue = "manual" + Scheduled BuildStateValue = "scheduled" +) + +// BuildState is a helper routine that allocates a new BuildStateValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func BuildState(v BuildStateValue) *BuildStateValue { + return Ptr(v) +} + +// CommentEventAction identifies if a comment has been newly created or updated. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-events +type CommentEventAction string + +const ( + CommentEventActionCreate CommentEventAction = "create" + CommentEventActionUpdate CommentEventAction = "update" +) + +// ContainerRegistryStatus represents the status of a Container Registry. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repositories +type ContainerRegistryStatus string + +// ContainerRegistryStatus represents all valid statuses of a Container Registry. +// +// Undocumented, see code at: +// https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/container_repository.rb?ref_type=heads#L35 +const ( + ContainerRegistryStatusDeleteScheduled ContainerRegistryStatus = "delete_scheduled" + ContainerRegistryStatusDeleteFailed ContainerRegistryStatus = "delete_failed" + ContainerRegistryStatusDeleteOngoing ContainerRegistryStatus = "delete_ongoing" +) + +// DeploymentApprovalStatus represents a Gitlab deployment approval status. +type DeploymentApprovalStatus string + +// These constants represent all valid deployment approval statuses. +const ( + DeploymentApprovalStatusApproved DeploymentApprovalStatus = "approved" + DeploymentApprovalStatusRejected DeploymentApprovalStatus = "rejected" +) + +// DeploymentStatusValue represents a Gitlab deployment status. +type DeploymentStatusValue string + +// These constants represent all valid deployment statuses. +const ( + DeploymentStatusCreated DeploymentStatusValue = "created" + DeploymentStatusRunning DeploymentStatusValue = "running" + DeploymentStatusSuccess DeploymentStatusValue = "success" + DeploymentStatusFailed DeploymentStatusValue = "failed" + DeploymentStatusCanceled DeploymentStatusValue = "canceled" +) + +// DeploymentStatus is a helper routine that allocates a new +// DeploymentStatusValue to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func DeploymentStatus(v DeploymentStatusValue) *DeploymentStatusValue { + return Ptr(v) +} + +// DORAMetricType represents all valid DORA metrics types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricType string + +// List of available DORA metric type names. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +const ( + DORAMetricDeploymentFrequency DORAMetricType = "deployment_frequency" + DORAMetricLeadTimeForChanges DORAMetricType = "lead_time_for_changes" + DORAMetricTimeToRestoreService DORAMetricType = "time_to_restore_service" + DORAMetricChangeFailureRate DORAMetricType = "change_failure_rate" +) + +// DORAMetricInterval represents the time period over which the +// metrics are aggregated. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +type DORAMetricInterval string + +// List of available DORA metric interval types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html +const ( + DORAMetricIntervalDaily DORAMetricInterval = "daily" + DORAMetricIntervalMonthly DORAMetricInterval = "monthly" + DORAMetricIntervalAll DORAMetricInterval = "all" +) + +// EventTypeValue represents actions type for contribution events. +type EventTypeValue string + +// List of available action type. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events +const ( + CreatedEventType EventTypeValue = "created" + UpdatedEventType EventTypeValue = "updated" + ClosedEventType EventTypeValue = "closed" + ReopenedEventType EventTypeValue = "reopened" + PushedEventType EventTypeValue = "pushed" + CommentedEventType EventTypeValue = "commented" + MergedEventType EventTypeValue = "merged" + JoinedEventType EventTypeValue = "joined" + LeftEventType EventTypeValue = "left" + DestroyedEventType EventTypeValue = "destroyed" + ExpiredEventType EventTypeValue = "expired" +) + +// EventTargetTypeValue represents actions type value for contribution events. +type EventTargetTypeValue string + +// List of available action type. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/events.html#target-types +const ( + IssueEventTargetType EventTargetTypeValue = "issue" + MilestoneEventTargetType EventTargetTypeValue = "milestone" + MergeRequestEventTargetType EventTargetTypeValue = "merge_request" + NoteEventTargetType EventTargetTypeValue = "note" + ProjectEventTargetType EventTargetTypeValue = "project" + SnippetEventTargetType EventTargetTypeValue = "snippet" + UserEventTargetType EventTargetTypeValue = "user" +) + +// FileActionValue represents the available actions that can be performed on a file. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions +type FileActionValue string + +// The available file actions. +const ( + FileCreate FileActionValue = "create" + FileDelete FileActionValue = "delete" + FileMove FileActionValue = "move" + FileUpdate FileActionValue = "update" + FileChmod FileActionValue = "chmod" +) + +// FileAction is a helper routine that allocates a new FileActionValue value +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func FileAction(v FileActionValue) *FileActionValue { + return Ptr(v) +} + +// GenericPackageSelectValue represents a generic package select value. +type GenericPackageSelectValue string + +// The available generic package select values. +const ( + SelectPackageFile GenericPackageSelectValue = "package_file" +) + +// GenericPackageSelect is a helper routine that allocates a new +// GenericPackageSelectValue value to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func GenericPackageSelect(v GenericPackageSelectValue) *GenericPackageSelectValue { + return Ptr(v) +} + +// GenericPackageStatusValue represents a generic package status. +type GenericPackageStatusValue string + +// The available generic package statuses. +const ( + PackageDefault GenericPackageStatusValue = "default" + PackageHidden GenericPackageStatusValue = "hidden" +) + +// GenericPackageStatus is a helper routine that allocates a new +// GenericPackageStatusValue value to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func GenericPackageStatus(v GenericPackageStatusValue) *GenericPackageStatusValue { + return Ptr(v) +} + +// ISOTime represents an ISO 8601 formatted date. +type ISOTime time.Time + +// ISO 8601 date format. +const iso8601 = "2006-01-02" + +// ParseISOTime parses an ISO 8601 formatted date. +func ParseISOTime(s string) (ISOTime, error) { + t, err := time.Parse(iso8601, s) + return ISOTime(t), err +} + +// MarshalJSON implements the json.Marshaler interface. +func (t ISOTime) MarshalJSON() ([]byte, error) { + if reflect.ValueOf(t).IsZero() { + return []byte(`null`), nil + } + + if y := time.Time(t).Year(); y < 0 || y >= 10000 { + // ISO 8901 uses 4 digits for the years. + return nil, errors.New("json: ISOTime year outside of range [0,9999]") + } + + b := make([]byte, 0, len(iso8601)+2) + b = append(b, '"') + b = time.Time(t).AppendFormat(b, iso8601) + b = append(b, '"') + + return b, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *ISOTime) UnmarshalJSON(data []byte) error { + // Ignore null, like in the main JSON package. + if string(data) == "null" { + return nil + } + + isotime, err := time.Parse(`"`+iso8601+`"`, string(data)) + *t = ISOTime(isotime) + + return err +} + +// EncodeValues implements the query.Encoder interface. +func (t *ISOTime) EncodeValues(key string, v *url.Values) error { + if t == nil || (time.Time(*t)).IsZero() { + return nil + } + v.Add(key, t.String()) + return nil +} + +// String implements the Stringer interface. +func (t ISOTime) String() string { + return time.Time(t).Format(iso8601) +} + +// Labels represents a list of labels. +type Labels []string + +// LabelOptions is a custom type with specific marshaling characteristics. +type LabelOptions []string + +// MarshalJSON implements the json.Marshaler interface. +func (l *LabelOptions) MarshalJSON() ([]byte, error) { + if *l == nil { + return []byte(`null`), nil + } + return json.Marshal(strings.Join(*l, ",")) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelOptions) UnmarshalJSON(data []byte) error { + type alias LabelOptions + if !bytes.HasPrefix(data, []byte("[")) { + data = []byte(fmt.Sprintf("[%s]", string(data))) + } + return json.Unmarshal(data, (*alias)(l)) +} + +// EncodeValues implements the query.EncodeValues interface. +func (l *LabelOptions) EncodeValues(key string, v *url.Values) error { + v.Set(key, strings.Join(*l, ",")) + return nil +} + +// LinkTypeValue represents a release link type. +type LinkTypeValue string + +// List of available release link types. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link +const ( + ImageLinkType LinkTypeValue = "image" + OtherLinkType LinkTypeValue = "other" + PackageLinkType LinkTypeValue = "package" + RunbookLinkType LinkTypeValue = "runbook" +) + +// LinkType is a helper routine that allocates a new LinkType value +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func LinkType(v LinkTypeValue) *LinkTypeValue { + return Ptr(v) +} + +// LicenseApprovalStatusValue describe the approval statuses of a license. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/managed_licenses.html +type LicenseApprovalStatusValue string + +// List of available license approval statuses. +const ( + LicenseApproved LicenseApprovalStatusValue = "approved" + LicenseBlacklisted LicenseApprovalStatusValue = "blacklisted" + LicenseAllowed LicenseApprovalStatusValue = "allowed" + LicenseDenied LicenseApprovalStatusValue = "denied" +) + +// LicenseApprovalStatus is a helper routine that allocates a new license +// approval status value to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func LicenseApprovalStatus(v LicenseApprovalStatusValue) *LicenseApprovalStatusValue { + return Ptr(v) +} + +// MergeMethodValue represents a project merge type within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#project-merge-method +type MergeMethodValue string + +// List of available merge type +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#project-merge-method +const ( + NoFastForwardMerge MergeMethodValue = "merge" + FastForwardMerge MergeMethodValue = "ff" + RebaseMerge MergeMethodValue = "rebase_merge" +) + +// MergeMethod is a helper routine that allocates a new MergeMethod +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func MergeMethod(v MergeMethodValue) *MergeMethodValue { + return Ptr(v) +} + +// NoteTypeValue represents the type of a Note. +type NoteTypeValue string + +// List of available note types. +const ( + DiffNote NoteTypeValue = "DiffNote" + DiscussionNote NoteTypeValue = "DiscussionNote" + GenericNote NoteTypeValue = "Note" + LegacyDiffNote NoteTypeValue = "LegacyDiffNote" +) + +// NoteType is a helper routine that allocates a new NoteTypeValue to +// store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func NoteType(v NoteTypeValue) *NoteTypeValue { + return Ptr(v) +} + +// NotificationLevelValue represents a notification level. +type NotificationLevelValue int + +// String implements the fmt.Stringer interface. +func (l NotificationLevelValue) String() string { + return notificationLevelNames[l] +} + +// MarshalJSON implements the json.Marshaler interface. +func (l NotificationLevelValue) MarshalJSON() ([]byte, error) { + return json.Marshal(l.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *NotificationLevelValue) UnmarshalJSON(data []byte) error { + var raw interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + switch raw := raw.(type) { + case float64: + *l = NotificationLevelValue(raw) + case string: + *l = notificationLevelTypes[raw] + case nil: + // No action needed. + default: + return fmt.Errorf("json: cannot unmarshal %T into Go value of type %T", raw, *l) + } + + return nil +} + +// List of valid notification levels. +const ( + DisabledNotificationLevel NotificationLevelValue = iota + ParticipatingNotificationLevel + WatchNotificationLevel + GlobalNotificationLevel + MentionNotificationLevel + CustomNotificationLevel +) + +var notificationLevelNames = [...]string{ + "disabled", + "participating", + "watch", + "global", + "mention", + "custom", +} + +var notificationLevelTypes = map[string]NotificationLevelValue{ + "disabled": DisabledNotificationLevel, + "participating": ParticipatingNotificationLevel, + "watch": WatchNotificationLevel, + "global": GlobalNotificationLevel, + "mention": MentionNotificationLevel, + "custom": CustomNotificationLevel, +} + +// NotificationLevel is a helper routine that allocates a new NotificationLevelValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func NotificationLevel(v NotificationLevelValue) *NotificationLevelValue { + return Ptr(v) +} + +// ProjectCreationLevelValue represents a project creation level within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +type ProjectCreationLevelValue string + +// List of available project creation levels. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +const ( + NoOneProjectCreation ProjectCreationLevelValue = "noone" + MaintainerProjectCreation ProjectCreationLevelValue = "maintainer" + DeveloperProjectCreation ProjectCreationLevelValue = "developer" +) + +// ProjectCreationLevel is a helper routine that allocates a new ProjectCreationLevelValue +// to store v and returns a pointer to it. +// Please use Ptr instead. +func ProjectCreationLevel(v ProjectCreationLevelValue) *ProjectCreationLevelValue { + return Ptr(v) +} + +// ProjectHookEvent represents a project hook event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events +type ProjectHookEvent string + +// List of available project hook events. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events +const ( + ProjectHookEventPush ProjectHookEvent = "push_events" + ProjectHookEventTagPush ProjectHookEvent = "tag_push_events" + ProjectHookEventIssues ProjectHookEvent = "issues_events" + ProjectHookEventConfidentialIssues ProjectHookEvent = "confidential_issues_events" + ProjectHookEventNote ProjectHookEvent = "note_events" + ProjectHookEventMergeRequests ProjectHookEvent = "merge_requests_events" + ProjectHookEventJob ProjectHookEvent = "job_events" + ProjectHookEventPipeline ProjectHookEvent = "pipeline_events" + ProjectHookEventWiki ProjectHookEvent = "wiki_page_events" + ProjectHookEventReleases ProjectHookEvent = "releases_events" + ProjectHookEventEmoji ProjectHookEvent = "emoji_events" + ProjectHookEventResourceAccessToken ProjectHookEvent = "resource_access_token_events" +) + +// ResourceGroupProcessMode represents a process mode for a resource group +// within a GitLab project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes +type ResourceGroupProcessMode string + +// List of available resource group process modes. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes +const ( + Unordered ResourceGroupProcessMode = "unordered" + OldestFirst ResourceGroupProcessMode = "oldest_first" + NewestFirst ResourceGroupProcessMode = "newest_first" +) + +// SharedRunnersSettingValue determines whether shared runners are enabled for a +// group’s subgroups and projects. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-shared_runners_setting +type SharedRunnersSettingValue string + +// List of available shared runner setting levels. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#options-for-shared_runners_setting +const ( + EnabledSharedRunnersSettingValue SharedRunnersSettingValue = "enabled" + DisabledAndOverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_overridable" + DisabledAndUnoverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_unoverridable" + + // Deprecated: DisabledWithOverrideSharedRunnersSettingValue is deprecated + // in favor of DisabledAndOverridableSharedRunnersSettingValue. + DisabledWithOverrideSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_with_override" +) + +// SharedRunnersSetting is a helper routine that allocates a new SharedRunnersSettingValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func SharedRunnersSetting(v SharedRunnersSettingValue) *SharedRunnersSettingValue { + return Ptr(v) +} + +// SubGroupCreationLevelValue represents a sub group creation level within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +type SubGroupCreationLevelValue string + +// List of available sub group creation levels. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +const ( + OwnerSubGroupCreationLevelValue SubGroupCreationLevelValue = "owner" + MaintainerSubGroupCreationLevelValue SubGroupCreationLevelValue = "maintainer" +) + +// SubGroupCreationLevel is a helper routine that allocates a new SubGroupCreationLevelValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func SubGroupCreationLevel(v SubGroupCreationLevelValue) *SubGroupCreationLevelValue { + return Ptr(v) +} + +// SquashOptionValue represents a squash optional level within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project +type SquashOptionValue string + +// List of available squash options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project +const ( + SquashOptionNever SquashOptionValue = "never" + SquashOptionAlways SquashOptionValue = "always" + SquashOptionDefaultOff SquashOptionValue = "default_off" + SquashOptionDefaultOn SquashOptionValue = "default_on" +) + +// SquashOption is a helper routine that allocates a new SquashOptionValue +// to store s and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func SquashOption(s SquashOptionValue) *SquashOptionValue { + return Ptr(s) +} + +// TasksCompletionStatus represents tasks of the issue/merge request. +type TasksCompletionStatus struct { + Count int `json:"count"` + CompletedCount int `json:"completed_count"` +} + +// TodoAction represents the available actions that can be performed on a todo. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html +type TodoAction string + +// The available todo actions. +const ( + TodoAssigned TodoAction = "assigned" + TodoMentioned TodoAction = "mentioned" + TodoBuildFailed TodoAction = "build_failed" + TodoMarked TodoAction = "marked" + TodoApprovalRequired TodoAction = "approval_required" + TodoDirectlyAddressed TodoAction = "directly_addressed" +) + +// TodoTargetType represents the available target that can be linked to a todo. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html +type TodoTargetType string + +const ( + TodoTargetAlertManagement TodoTargetType = "AlertManagement::Alert" + TodoTargetDesignManagement TodoTargetType = "DesignManagement::Design" + TodoTargetIssue TodoTargetType = "Issue" + TodoTargetMergeRequest TodoTargetType = "MergeRequest" +) + +// UploadType represents the available upload types. +type UploadType string + +// The available upload types. +const ( + UploadAvatar UploadType = "avatar" + UploadFile UploadType = "file" +) + +// VariableTypeValue represents a variable type within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +type VariableTypeValue string + +// List of available variable types. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +const ( + EnvVariableType VariableTypeValue = "env_var" + FileVariableType VariableTypeValue = "file" +) + +// VariableType is a helper routine that allocates a new VariableTypeValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func VariableType(v VariableTypeValue) *VariableTypeValue { + return Ptr(v) +} + +// VisibilityValue represents a visibility level within GitLab. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +type VisibilityValue string + +// List of available visibility levels. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/ +const ( + PrivateVisibility VisibilityValue = "private" + InternalVisibility VisibilityValue = "internal" + PublicVisibility VisibilityValue = "public" +) + +// Visibility is a helper routine that allocates a new VisibilityValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func Visibility(v VisibilityValue) *VisibilityValue { + return Ptr(v) +} + +// WikiFormatValue represents the available wiki formats. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/wikis.html +type WikiFormatValue string + +// The available wiki formats. +const ( + WikiFormatMarkdown WikiFormatValue = "markdown" + WikiFormatRDoc WikiFormatValue = "rdoc" + WikiFormatASCIIDoc WikiFormatValue = "asciidoc" + WikiFormatOrg WikiFormatValue = "org" +) + +// WikiFormat is a helper routine that allocates a new WikiFormatValue +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func WikiFormat(v WikiFormatValue) *WikiFormatValue { + return Ptr(v) +} + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func Bool(v bool) *bool { + return Ptr(v) +} + +// Int is a helper routine that allocates a new int value +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func Int(v int) *int { + return Ptr(v) +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func String(v string) *string { + return Ptr(v) +} + +// Time is a helper routine that allocates a new time.Time value +// to store v and returns a pointer to it. +// +// Deprecated: Please use Ptr instead. +func Time(v time.Time) *time.Time { + return Ptr(v) +} + +// BoolValue is a boolean value with advanced json unmarshaling features. +type BoolValue bool + +// UnmarshalJSON allows 1, 0, "true", and "false" to be considered as boolean values +// Needed for: +// https://gitlab.com/gitlab-org/gitlab-ce/issues/50122 +// https://gitlab.com/gitlab-org/gitlab/-/issues/233941 +// https://github.com/gitlabhq/terraform-provider-gitlab/issues/348 +func (t *BoolValue) UnmarshalJSON(b []byte) error { + switch string(b) { + case `"1"`: + *t = true + return nil + case `"0"`: + *t = false + return nil + case `"true"`: + *t = true + return nil + case `"false"`: + *t = false + return nil + default: + var v bool + err := json.Unmarshal(b, &v) + *t = BoolValue(v) + return err + } +} diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go new file mode 100644 index 0000000000..f463952ac8 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/users.go @@ -0,0 +1,1591 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "time" + + "github.com/hashicorp/go-retryablehttp" +) + +// List a couple of standard errors. +var ( + ErrUserActivatePrevented = errors.New("Cannot activate a user that is blocked by admin or by LDAP synchronization") + ErrUserApprovePrevented = errors.New("Cannot approve a user that is blocked by admin or by LDAP synchronization") + ErrUserBlockPrevented = errors.New("Cannot block a user that is already blocked by LDAP synchronization") + ErrUserConflict = errors.New("User does not have a pending request") + ErrUserDeactivatePrevented = errors.New("Cannot deactivate a user that is blocked by admin or by LDAP synchronization") + ErrUserDisableTwoFactorPrevented = errors.New("Cannot disable two factor authentication if not authenticated as administrator") + ErrUserNotFound = errors.New("User does not exist") + ErrUserRejectPrevented = errors.New("Cannot reject a user if not authenticated as administrator") + ErrUserTwoFactorNotEnabled = errors.New("Cannot disable two factor authentication if not enabled") + ErrUserUnblockPrevented = errors.New("Cannot unblock a user that is blocked by LDAP synchronization") +) + +// UsersService handles communication with the user related methods of +// the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html +type UsersService struct { + client *Client +} + +// BasicUser included in other service responses (such as merge requests, pipelines, etc). +type BasicUser struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + CreatedAt *time.Time `json:"created_at"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` +} + +// User represents a GitLab user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html +type User struct { + ID int `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + State string `json:"state"` + WebURL string `json:"web_url"` + CreatedAt *time.Time `json:"created_at"` + Bio string `json:"bio"` + Bot bool `json:"bot"` + Location string `json:"location"` + PublicEmail string `json:"public_email"` + Skype string `json:"skype"` + Linkedin string `json:"linkedin"` + Twitter string `json:"twitter"` + WebsiteURL string `json:"website_url"` + Organization string `json:"organization"` + JobTitle string `json:"job_title"` + ExternUID string `json:"extern_uid"` + Provider string `json:"provider"` + ThemeID int `json:"theme_id"` + LastActivityOn *ISOTime `json:"last_activity_on"` + ColorSchemeID int `json:"color_scheme_id"` + IsAdmin bool `json:"is_admin"` + IsAuditor bool `json:"is_auditor"` + AvatarURL string `json:"avatar_url"` + CanCreateGroup bool `json:"can_create_group"` + CanCreateProject bool `json:"can_create_project"` + ProjectsLimit int `json:"projects_limit"` + CurrentSignInAt *time.Time `json:"current_sign_in_at"` + CurrentSignInIP *net.IP `json:"current_sign_in_ip"` + LastSignInAt *time.Time `json:"last_sign_in_at"` + LastSignInIP *net.IP `json:"last_sign_in_ip"` + ConfirmedAt *time.Time `json:"confirmed_at"` + TwoFactorEnabled bool `json:"two_factor_enabled"` + Note string `json:"note"` + Identities []*UserIdentity `json:"identities"` + External bool `json:"external"` + PrivateProfile bool `json:"private_profile"` + SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` + ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` + UsingLicenseSeat bool `json:"using_license_seat"` + CustomAttributes []*CustomAttribute `json:"custom_attributes"` + NamespaceID int `json:"namespace_id"` + Locked bool `json:"locked"` +} + +// UserIdentity represents a user identity. +type UserIdentity struct { + Provider string `json:"provider"` + ExternUID string `json:"extern_uid"` +} + +// UserAvatar represents a GitLab user avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html +type UserAvatar struct { + Filename string + Image io.Reader +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *UserAvatar) MarshalJSON() ([]byte, error) { + if a.Filename == "" && a.Image == nil { + return []byte(`""`), nil + } + type alias UserAvatar + return json.Marshal((*alias)(a)) +} + +// ListUsersOptions represents the available ListUsers() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-users +type ListUsersOptions struct { + ListOptions + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Blocked *bool `url:"blocked,omitempty" json:"blocked,omitempty"` + ExcludeInternal *bool `url:"exclude_internal,omitempty" json:"exclude_internal,omitempty"` + ExcludeExternal *bool `url:"exclude_external,omitempty" json:"exclude_external,omitempty"` + + // The options below are only available for admins. + Search *string `url:"search,omitempty" json:"search,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + ExternalUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + TwoFactor *string `url:"two_factor,omitempty" json:"two_factor,omitempty"` + Admins *bool `url:"admins,omitempty" json:"admins,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + WithoutProjects *bool `url:"without_projects,omitempty" json:"without_projects,omitempty"` + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` + WithoutProjectBots *bool `url:"without_project_bots,omitempty" json:"without_project_bots,omitempty"` +} + +// ListUsers gets a list of users. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-users +func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "users", opt, options) + if err != nil { + return nil, nil, err + } + + var usr []*User + resp, err := s.client.Do(req, &usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// GetUsersOptions represents the available GetUser() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-user +type GetUsersOptions struct { + WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` +} + +// GetUser gets a single user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-user +func (s *UsersService) GetUser(user int, opt GetUsersOptions, options ...RequestOptionFunc) (*User, *Response, error) { + u := fmt.Sprintf("users/%d", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// CreateUserOptions represents the available CreateUser() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation +type CreateUserOptions struct { + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"-"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + ForceRandomPassword *bool `url:"force_random_password,omitempty" json:"force_random_password,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` + SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` +} + +// CreateUser creates a new user. Note only administrators can create new users. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation +func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { + var err error + var req *retryablehttp.Request + + if opt.Avatar == nil { + req, err = s.client.NewRequest(http.MethodPost, "users", opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPost, + "users", + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// ModifyUserOptions represents the available ModifyUser() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification +type ModifyUserOptions struct { + Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` + Avatar *UserAvatar `url:"-" json:"avatar,omitempty"` + Bio *string `url:"bio,omitempty" json:"bio,omitempty"` + CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` + CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + External *bool `url:"external,omitempty" json:"external,omitempty"` + ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` + JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` + Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` + Location *string `url:"location,omitempty" json:"location,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + Note *string `url:"note,omitempty" json:"note,omitempty"` + Organization *string `url:"organization,omitempty" json:"organization,omitempty"` + Password *string `url:"password,omitempty" json:"password,omitempty"` + PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` + ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` + Provider *string `url:"provider,omitempty" json:"provider,omitempty"` + PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` + SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` + Skype *string `url:"skype,omitempty" json:"skype,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` + Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` + Username *string `url:"username,omitempty" json:"username,omitempty"` + WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` +} + +// ModifyUser modifies an existing user. Only administrators can change attributes +// of a user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification +func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { + var err error + var req *retryablehttp.Request + u := fmt.Sprintf("users/%d", user) + + if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { + req, err = s.client.NewRequest(http.MethodPut, u, opt, options) + } else { + req, err = s.client.UploadRequest( + http.MethodPut, + u, + opt.Avatar.Image, + opt.Avatar.Filename, + UploadAvatar, + opt, + options, + ) + } + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// DeleteUser deletes a user. Available only for administrators. This is an +// idempotent function, calling this function for a non-existent user id still +// returns a status code 200 OK. The JSON response differs if the user was +// actually deleted or not. In the former the user is returned and in the +// latter not. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-deletion +func (s *UsersService) DeleteUser(user int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("users/%d", user) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// CurrentUser gets currently authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-current-user +func (s *UsersService) CurrentUser(options ...RequestOptionFunc) (*User, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "user", nil, options) + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// UserStatus represents the current status of a user +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#user-status +type UserStatus struct { + Emoji string `json:"emoji"` + Availability AvailabilityValue `json:"availability"` + Message string `json:"message"` + MessageHTML string `json:"message_html"` +} + +// CurrentUserStatus retrieves the user status +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#user-status +func (s *UsersService) CurrentUserStatus(options ...RequestOptionFunc) (*UserStatus, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "user/status", nil, options) + if err != nil { + return nil, nil, err + } + + status := new(UserStatus) + resp, err := s.client.Do(req, status) + if err != nil { + return nil, resp, err + } + + return status, resp, nil +} + +// GetUserStatus retrieves a user's status +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-the-status-of-a-user +func (s *UsersService) GetUserStatus(user int, options ...RequestOptionFunc) (*UserStatus, *Response, error) { + u := fmt.Sprintf("users/%d/status", user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + status := new(UserStatus) + resp, err := s.client.Do(req, status) + if err != nil { + return nil, resp, err + } + + return status, resp, nil +} + +// UserStatusOptions represents the options required to set the status +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#set-user-status +type UserStatusOptions struct { + Emoji *string `url:"emoji,omitempty" json:"emoji,omitempty"` + Availability *AvailabilityValue `url:"availability,omitempty" json:"availability,omitempty"` + Message *string `url:"message,omitempty" json:"message,omitempty"` +} + +// SetUserStatus sets the user's status +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#set-user-status +func (s *UsersService) SetUserStatus(opt *UserStatusOptions, options ...RequestOptionFunc) (*UserStatus, *Response, error) { + req, err := s.client.NewRequest(http.MethodPut, "user/status", opt, options) + if err != nil { + return nil, nil, err + } + + status := new(UserStatus) + resp, err := s.client.Do(req, status) + if err != nil { + return nil, resp, err + } + + return status, resp, nil +} + +// UserAssociationsCount represents the user associations count. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/users.html#list-associations-count-for-user +type UserAssociationsCount struct { + GroupsCount int `json:"groups_count"` + ProjectsCount int `json:"projects_count"` + IssuesCount int `json:"issues_count"` + MergeRequestsCount int `json:"merge_requests_count"` +} + +// GetUserAssociationsCount gets a list of a specified user associations. +// +// Gitlab API docs: https://docs.gitlab.com/ee/api/users.html#list-associations-count-for-user +func (s *UsersService) GetUserAssociationsCount(user int, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) { + u := fmt.Sprintf("users/%d/associations_count", user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + uac := new(UserAssociationsCount) + resp, err := s.client.Do(req, uac) + if err != nil { + return nil, resp, err + } + + return uac, resp, nil +} + +// SSHKey represents a SSH key. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys +type SSHKey struct { + ID int `json:"id"` + Title string `json:"title"` + Key string `json:"key"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *time.Time `json:"expires_at"` +} + +// ListSSHKeysOptions represents the available ListSSHKeys options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys +type ListSSHKeysOptions ListOptions + +// ListSSHKeys gets a list of currently authenticated user's SSH keys. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys +func (s *UsersService) ListSSHKeys(opt *ListSSHKeysOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "user/keys", opt, options) + if err != nil { + return nil, nil, err + } + + var k []*SSHKey + resp, err := s.client.Do(req, &k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// ListSSHKeysForUserOptions represents the available ListSSHKeysForUser() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#list-ssh-keys-for-user +type ListSSHKeysForUserOptions ListOptions + +// ListSSHKeysForUser gets a list of a specified user's SSH keys. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#list-ssh-keys-for-user +func (s *UsersService) ListSSHKeysForUser(uid interface{}, opt *ListSSHKeysForUserOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { + user, err := parseID(uid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("users/%s/keys", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var k []*SSHKey + resp, err := s.client.Do(req, &k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// GetSSHKey gets a single key. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-ssh-key +func (s *UsersService) GetSSHKey(key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + u := fmt.Sprintf("user/keys/%d", key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(SSHKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// GetSSHKeyForUser gets a single key for a given user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-ssh-key-for-given-user +func (s *UsersService) GetSSHKeyForUser(user int, key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + u := fmt.Sprintf("users/%d/keys/%d", user, key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(SSHKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// AddSSHKeyOptions represents the available AddSSHKey() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-ssh-key +type AddSSHKeyOptions struct { + Title *string `url:"title,omitempty" json:"title,omitempty"` + Key *string `url:"key,omitempty" json:"key,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// AddSSHKey creates a new key owned by the currently authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-ssh-key +func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "user/keys", opt, options) + if err != nil { + return nil, nil, err + } + + k := new(SSHKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// AddSSHKeyForUser creates new key owned by specified user. Available only for +// admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-ssh-key-for-user +func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { + u := fmt.Sprintf("users/%d/keys", user) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + k := new(SSHKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// DeleteSSHKey deletes key owned by currently authenticated user. This is an +// idempotent function and calling it on a key that is already deleted or not +// available results in 200 OK. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#delete-ssh-key-for-current-user +func (s *UsersService) DeleteSSHKey(key int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("user/keys/%d", key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteSSHKeyForUser deletes key owned by a specified user. Available only +// for admin. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#delete-ssh-key-for-given-user +func (s *UsersService) DeleteSSHKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("users/%d/keys/%d", user, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// GPGKey represents a GPG key. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-all-gpg-keys +type GPGKey struct { + ID int `json:"id"` + Key string `json:"key"` + CreatedAt *time.Time `json:"created_at"` +} + +// ListGPGKeys gets a list of currently authenticated user’s GPG keys. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-all-gpg-keys +func (s *UsersService) ListGPGKeys(options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "user/gpg_keys", nil, options) + if err != nil { + return nil, nil, err + } + + var ks []*GPGKey + resp, err := s.client.Do(req, &ks) + if err != nil { + return nil, resp, err + } + + return ks, resp, nil +} + +// GetGPGKey gets a specific GPG key of currently authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key +func (s *UsersService) GetGPGKey(key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + u := fmt.Sprintf("user/gpg_keys/%d", key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(GPGKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// AddGPGKeyOptions represents the available AddGPGKey() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-a-gpg-key +type AddGPGKeyOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` +} + +// AddGPGKey creates a new GPG key owned by the currently authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-a-gpg-key +func (s *UsersService) AddGPGKey(opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "user/gpg_keys", opt, options) + if err != nil { + return nil, nil, err + } + + k := new(GPGKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// DeleteGPGKey deletes a GPG key owned by currently authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#delete-a-gpg-key +func (s *UsersService) DeleteGPGKey(key int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("user/gpg_keys/%d", key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListGPGKeysForUser gets a list of a specified user’s GPG keys. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#list-all-gpg-keys-for-given-user +func (s *UsersService) ListGPGKeysForUser(user int, options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { + u := fmt.Sprintf("users/%d/gpg_keys", user) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var ks []*GPGKey + resp, err := s.client.Do(req, &ks) + if err != nil { + return nil, resp, err + } + + return ks, resp, nil +} + +// GetGPGKeyForUser gets a specific GPG key for a given user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key-for-a-given-user +func (s *UsersService) GetGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + u := fmt.Sprintf("users/%d/gpg_keys/%d", user, key) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + k := new(GPGKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// AddGPGKeyForUser creates new GPG key owned by the specified user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#add-a-gpg-key-for-a-given-user +func (s *UsersService) AddGPGKeyForUser(user int, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { + u := fmt.Sprintf("users/%d/gpg_keys", user) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + k := new(GPGKey) + resp, err := s.client.Do(req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// DeleteGPGKeyForUser deletes a GPG key owned by a specified user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#delete-a-gpg-key-for-a-given-user +func (s *UsersService) DeleteGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("users/%d/gpg_keys/%d", user, key) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// Email represents an Email. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-emails +type Email struct { + ID int `json:"id"` + Email string `json:"email"` + ConfirmedAt *time.Time `json:"confirmed_at"` +} + +// ListEmails gets a list of currently authenticated user's Emails. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-emails +func (s *UsersService) ListEmails(options ...RequestOptionFunc) ([]*Email, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "user/emails", nil, options) + if err != nil { + return nil, nil, err + } + + var e []*Email + resp, err := s.client.Do(req, &e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// ListEmailsForUserOptions represents the available ListEmailsForUser() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#list-emails-for-user +type ListEmailsForUserOptions ListOptions + +// ListEmailsForUser gets a list of a specified user's Emails. Available +// only for admin +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#list-emails-for-user +func (s *UsersService) ListEmailsForUser(user int, opt *ListEmailsForUserOptions, options ...RequestOptionFunc) ([]*Email, *Response, error) { + u := fmt.Sprintf("users/%d/emails", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var e []*Email + resp, err := s.client.Do(req, &e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// GetEmail gets a single email. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-email +func (s *UsersService) GetEmail(email int, options ...RequestOptionFunc) (*Email, *Response, error) { + u := fmt.Sprintf("user/emails/%d", email) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + e := new(Email) + resp, err := s.client.Do(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// AddEmailOptions represents the available AddEmail() options. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-email +type AddEmailOptions struct { + Email *string `url:"email,omitempty" json:"email,omitempty"` + SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` +} + +// AddEmail creates a new email owned by the currently authenticated user. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-email +func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "user/emails", opt, options) + if err != nil { + return nil, nil, err + } + + e := new(Email) + resp, err := s.client.Do(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// AddEmailForUser creates new email owned by specified user. Available only for +// admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-email-for-user +func (s *UsersService) AddEmailForUser(user int, opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { + u := fmt.Sprintf("users/%d/emails", user) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + e := new(Email) + resp, err := s.client.Do(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} + +// DeleteEmail deletes email owned by currently authenticated user. This is an +// idempotent function and calling it on a key that is already deleted or not +// available results in 200 OK. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#delete-email-for-current-user +func (s *UsersService) DeleteEmail(email int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("user/emails/%d", email) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteEmailForUser deletes email owned by a specified user. Available only +// for admin. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#delete-email-for-given-user +func (s *UsersService) DeleteEmailForUser(user, email int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("users/%d/emails/%d", user, email) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// BlockUser blocks the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#block-user +func (s *UsersService) BlockUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/block", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 403: + return ErrUserBlockPrevented + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// UnblockUser unblocks the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#unblock-user +func (s *UsersService) UnblockUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/unblock", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 403: + return ErrUserUnblockPrevented + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// BanUser bans the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#ban-user +func (s *UsersService) BanUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/ban", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// UnbanUser unbans the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#unban-user +func (s *UsersService) UnbanUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/unban", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// DeactivateUser deactivate the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#deactivate-user +func (s *UsersService) DeactivateUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/deactivate", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 403: + return ErrUserDeactivatePrevented + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// ActivateUser activate the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#activate-user +func (s *UsersService) ActivateUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/activate", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 403: + return ErrUserActivatePrevented + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// ApproveUser approve the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#approve-user +func (s *UsersService) ApproveUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/approve", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 201: + return nil + case 403: + return ErrUserApprovePrevented + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// RejectUser reject the specified user. Available only for admin. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#reject-user +func (s *UsersService) RejectUser(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/reject", user) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 200: + return nil + case 403: + return ErrUserRejectPrevented + case 404: + return ErrUserNotFound + case 409: + return ErrUserConflict + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// ImpersonationToken represents an impersonation token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-all-impersonation-tokens-of-a-user +type ImpersonationToken struct { + ID int `json:"id"` + Name string `json:"name"` + Active bool `json:"active"` + Token string `json:"token"` + Scopes []string `json:"scopes"` + Revoked bool `json:"revoked"` + CreatedAt *time.Time `json:"created_at"` + ExpiresAt *ISOTime `json:"expires_at"` + LastUsedAt *time.Time `json:"last_used_at"` +} + +// GetAllImpersonationTokensOptions represents the available +// GetAllImpersonationTokens() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-all-impersonation-tokens-of-a-user +type GetAllImpersonationTokensOptions struct { + ListOptions + State *string `url:"state,omitempty" json:"state,omitempty"` +} + +// GetAllImpersonationTokens retrieves all impersonation tokens of a user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-all-impersonation-tokens-of-a-user +func (s *UsersService) GetAllImpersonationTokens(user int, opt *GetAllImpersonationTokensOptions, options ...RequestOptionFunc) ([]*ImpersonationToken, *Response, error) { + u := fmt.Sprintf("users/%d/impersonation_tokens", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ts []*ImpersonationToken + resp, err := s.client.Do(req, &ts) + if err != nil { + return nil, resp, err + } + + return ts, resp, nil +} + +// GetImpersonationToken retrieves an impersonation token of a user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-an-impersonation-token-of-a-user +func (s *UsersService) GetImpersonationToken(user, token int, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { + u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + t := new(ImpersonationToken) + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// CreateImpersonationTokenOptions represents the available +// CreateImpersonationToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-an-impersonation-token +type CreateImpersonationTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// CreateImpersonationToken creates an impersonation token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-an-impersonation-token +func (s *UsersService) CreateImpersonationToken(user int, opt *CreateImpersonationTokenOptions, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { + u := fmt.Sprintf("users/%d/impersonation_tokens", user) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(ImpersonationToken) + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// RevokeImpersonationToken revokes an impersonation token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#revoke-an-impersonation-token +func (s *UsersService) RevokeImpersonationToken(user, token int, options ...RequestOptionFunc) (*Response, error) { + u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// CreatePersonalAccessTokenOptions represents the available +// CreatePersonalAccessToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token +type CreatePersonalAccessTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` +} + +// CreatePersonalAccessToken creates a personal access token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token +func (s *UsersService) CreatePersonalAccessToken(user int, opt *CreatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := fmt.Sprintf("users/%d/personal_access_tokens", user) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(PersonalAccessToken) + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// CreatePersonalAccessTokenForCurrentUserOptions represents the available +// CreatePersonalAccessTokenForCurrentUser() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token-with-limited-scopes-for-the-currently-authenticated-user +type CreatePersonalAccessTokenForCurrentUserOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` + ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` +} + +// CreatePersonalAccessTokenForCurrentUser creates a personal access token with limited scopes for the currently authenticated user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token-with-limited-scopes-for-the-currently-authenticated-user +func (s *UsersService) CreatePersonalAccessTokenForCurrentUser(opt *CreatePersonalAccessTokenForCurrentUserOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { + u := "user/personal_access_tokens" + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + t := new(PersonalAccessToken) + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// UserActivity represents an entry in the user/activities response +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-user-activities +type UserActivity struct { + Username string `json:"username"` + LastActivityOn *ISOTime `json:"last_activity_on"` +} + +// GetUserActivitiesOptions represents the options for GetUserActivities +// +// GitLap API docs: +// https://docs.gitlab.com/ee/api/users.html#get-user-activities +type GetUserActivitiesOptions struct { + ListOptions + From *ISOTime `url:"from,omitempty" json:"from,omitempty"` +} + +// GetUserActivities retrieves user activities (admin only) +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#get-user-activities +func (s *UsersService) GetUserActivities(opt *GetUserActivitiesOptions, options ...RequestOptionFunc) ([]*UserActivity, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "user/activities", opt, options) + if err != nil { + return nil, nil, err + } + + var t []*UserActivity + resp, err := s.client.Do(req, &t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} + +// UserMembership represents a membership of the user in a namespace or project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#user-memberships +type UserMembership struct { + SourceID int `json:"source_id"` + SourceName string `json:"source_name"` + SourceType string `json:"source_type"` + AccessLevel AccessLevelValue `json:"access_level"` +} + +// GetUserMembershipOptions represents the options available to query user memberships. +// +// GitLab API docs: +// ohttps://docs.gitlab.com/ee/api/users.html#user-memberships +type GetUserMembershipOptions struct { + ListOptions + Type *string `url:"type,omitempty" json:"type,omitempty"` +} + +// GetUserMemberships retrieves a list of the user's memberships. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#user-memberships +func (s *UsersService) GetUserMemberships(user int, opt *GetUserMembershipOptions, options ...RequestOptionFunc) ([]*UserMembership, *Response, error) { + u := fmt.Sprintf("users/%d/memberships", user) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var m []*UserMembership + resp, err := s.client.Do(req, &m) + if err != nil { + return nil, resp, err + } + + return m, resp, nil +} + +// DisableTwoFactor disables two factor authentication for the specified user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#disable-two-factor-authentication +func (s *UsersService) DisableTwoFactor(user int, options ...RequestOptionFunc) error { + u := fmt.Sprintf("users/%d/disable_two_factor", user) + + req, err := s.client.NewRequest(http.MethodPatch, u, nil, options) + if err != nil { + return err + } + + resp, err := s.client.Do(req, nil) + if err != nil && resp == nil { + return err + } + + switch resp.StatusCode { + case 204: + return nil + case 400: + return ErrUserTwoFactorNotEnabled + case 403: + return ErrUserDisableTwoFactorPrevented + case 404: + return ErrUserNotFound + default: + return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) + } +} + +// UserRunner represents a GitLab runner linked to the current user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-runner +type UserRunner struct { + ID int `json:"id"` + Token string `json:"token"` + TokenExpiresAt *time.Time `json:"token_expires_at"` +} + +// CreateUserRunnerOptions represents the available CreateUserRunner() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-runner +type CreateUserRunnerOptions struct { + RunnerType *string `url:"runner_type,omitempty" json:"runner_type,omitempty"` + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` + ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` + Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` + Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` + RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` + TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` + AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` + MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` + MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` +} + +// CreateUserRunner creates a runner linked to the current user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#create-a-runner +func (s *UsersService) CreateUserRunner(opts *CreateUserRunnerOptions, options ...RequestOptionFunc) (*UserRunner, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "user/runners", opts, options) + if err != nil { + return nil, nil, err + } + + r := new(UserRunner) + resp, err := s.client.Do(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// CreateServiceAccountUser creates a new service account user. Note only administrators can create new service account users. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#create-service-account-user +func (s *UsersService) CreateServiceAccountUser(options ...RequestOptionFunc) (*User, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "service_accounts", nil, options) + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} + +// UploadAvatar uploads an avatar to the current user. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/users.html#upload-a-current-user-avatar +func (s *UsersService) UploadAvatar(avatar io.Reader, filename string, options ...RequestOptionFunc) (*User, *Response, error) { + u := "user/avatar" + + req, err := s.client.UploadRequest( + http.MethodPut, + u, + avatar, + filename, + UploadAvatar, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + usr := new(User) + resp, err := s.client.Do(req, usr) + if err != nil { + return nil, resp, err + } + + return usr, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/validate.go b/vendor/github.com/xanzy/go-gitlab/validate.go new file mode 100644 index 0000000000..cb79ac8380 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/validate.go @@ -0,0 +1,154 @@ +// +// Copyright 2021, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ValidateService handles communication with the validation related methods of +// the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/lint.html +type ValidateService struct { + client *Client +} + +// LintResult represents the linting results. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/lint.html +type LintResult struct { + Status string `json:"status"` + Errors []string `json:"errors"` + Warnings []string `json:"warnings"` + MergedYaml string `json:"merged_yaml"` +} + +// ProjectLintResult represents the linting results by project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration +type ProjectLintResult struct { + Valid bool `json:"valid"` + Errors []string `json:"errors"` + Warnings []string `json:"warnings"` + MergedYaml string `json:"merged_yaml"` +} + +// LintOptions represents the available Lint() options. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration +type LintOptions struct { + Content string `url:"content,omitempty" json:"content,omitempty"` + IncludeMergedYAML bool `url:"include_merged_yaml,omitempty" json:"include_merged_yaml,omitempty"` + IncludeJobs bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` +} + +// Lint validates .gitlab-ci.yml content. +// Deprecated: This endpoint was removed in GitLab 16.0. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration-deprecated +func (s *ValidateService) Lint(opts *LintOptions, options ...RequestOptionFunc) (*LintResult, *Response, error) { + req, err := s.client.NewRequest(http.MethodPost, "ci/lint", &opts, options) + if err != nil { + return nil, nil, err + } + + l := new(LintResult) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// ProjectNamespaceLintOptions represents the available ProjectNamespaceLint() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-a-ci-yaml-configuration-with-a-namespace +type ProjectNamespaceLintOptions struct { + Content *string `url:"content,omitempty" json:"content,omitempty"` + DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` + IncludeJobs *bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +// ProjectNamespaceLint validates .gitlab-ci.yml content by project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-a-ci-yaml-configuration-with-a-namespace +func (s *ValidateService) ProjectNamespaceLint(pid interface{}, opt *ProjectNamespaceLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/ci/lint", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, &opt, options) + if err != nil { + return nil, nil, err + } + + l := new(ProjectLintResult) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} + +// ProjectLintOptions represents the available ProjectLint() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration +type ProjectLintOptions struct { + ContentRef *string `url:"content_ref,omitempty" json:"content_ref,omitempty"` + DryRunRef *string `url:"dry_run_ref,omitempty" json:"dry_run_ref,omitempty"` + DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` + IncludeJobs *bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` + Ref *string `url:"ref,omitempty" json:"ref,omitempty"` +} + +// ProjectLint validates .gitlab-ci.yml content by project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration +func (s *ValidateService) ProjectLint(pid interface{}, opt *ProjectLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/ci/lint", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, &opt, options) + if err != nil { + return nil, nil, err + } + + l := new(ProjectLintResult) + resp, err := s.client.Do(req, l) + if err != nil { + return nil, resp, err + } + + return l, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/version.go b/vendor/github.com/xanzy/go-gitlab/version.go new file mode 100644 index 0000000000..0d30b90062 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/version.go @@ -0,0 +1,58 @@ +// +// Copyright 2021, Andrea Funto' +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import "net/http" + +// VersionService handles communication with the GitLab server instance to +// retrieve its version information via the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/version.html +type VersionService struct { + client *Client +} + +// Version represents a GitLab instance version. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/version.html +type Version struct { + Version string `json:"version"` + Revision string `json:"revision"` +} + +func (s Version) String() string { + return Stringify(s) +} + +// GetVersion gets a GitLab server instance version; it is only available to +// authenticated users. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/version.html +func (s *VersionService) GetVersion(options ...RequestOptionFunc) (*Version, *Response, error) { + req, err := s.client.NewRequest(http.MethodGet, "version", nil, options) + if err != nil { + return nil, nil, err + } + + v := new(Version) + resp, err := s.client.Do(req, v) + if err != nil { + return nil, resp, err + } + + return v, resp, nil +} diff --git a/vendor/github.com/xanzy/go-gitlab/wikis.go b/vendor/github.com/xanzy/go-gitlab/wikis.go new file mode 100644 index 0000000000..39847ef053 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/wikis.go @@ -0,0 +1,204 @@ +// +// Copyright 2021, Stany MARCEL +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gitlab + +import ( + "fmt" + "net/http" + "net/url" +) + +// WikisService handles communication with the wikis related methods of +// the Gitlab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/wikis.html +type WikisService struct { + client *Client +} + +// Wiki represents a GitLab wiki. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/wikis.html +type Wiki struct { + Content string `json:"content"` + Encoding string `json:"encoding"` + Format WikiFormatValue `json:"format"` + Slug string `json:"slug"` + Title string `json:"title"` +} + +func (w Wiki) String() string { + return Stringify(w) +} + +// ListWikisOptions represents the available ListWikis options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#list-wiki-pages +type ListWikisOptions struct { + WithContent *bool `url:"with_content,omitempty" json:"with_content,omitempty"` +} + +// ListWikis lists all pages of the wiki of the given project id. +// When with_content is set, it also returns the content of the pages. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#list-wiki-pages +func (s *WikisService) ListWikis(pid interface{}, opt *ListWikisOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/wikis", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var ws []*Wiki + resp, err := s.client.Do(req, &ws) + if err != nil { + return nil, resp, err + } + + return ws, resp, nil +} + +// GetWikiPageOptions represents options to GetWikiPage +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#get-a-wiki-page +type GetWikiPageOptions struct { + RenderHTML *bool `url:"render_html,omitempty" json:"render_html,omitempty"` + Version *string `url:"version,omitempty" json:"version,omitempty"` +} + +// GetWikiPage gets a wiki page for a given project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#get-a-wiki-page +func (s *WikisService) GetWikiPage(pid interface{}, slug string, opt *GetWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + w := new(Wiki) + resp, err := s.client.Do(req, w) + if err != nil { + return nil, resp, err + } + + return w, resp, nil +} + +// CreateWikiPageOptions represents options to CreateWikiPage. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#create-a-new-wiki-page +type CreateWikiPageOptions struct { + Content *string `url:"content,omitempty" json:"content,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` +} + +// CreateWikiPage creates a new wiki page for the given repository with +// the given title, slug, and content. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#create-a-new-wiki-page +func (s *WikisService) CreateWikiPage(pid interface{}, opt *CreateWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/wikis", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + w := new(Wiki) + resp, err := s.client.Do(req, w) + if err != nil { + return nil, resp, err + } + + return w, resp, nil +} + +// EditWikiPageOptions represents options to EditWikiPage. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#edit-an-existing-wiki-page +type EditWikiPageOptions struct { + Content *string `url:"content,omitempty" json:"content,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` + Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` +} + +// EditWikiPage Updates an existing wiki page. At least one parameter is +// required to update the wiki page. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#edit-an-existing-wiki-page +func (s *WikisService) EditWikiPage(pid interface{}, slug string, opt *EditWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, nil, err + } + + w := new(Wiki) + resp, err := s.client.Do(req, w) + if err != nil { + return nil, resp, err + } + + return w, resp, nil +} + +// DeleteWikiPage deletes a wiki page with a given slug. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/wikis.html#delete-a-wiki-page +func (s *WikisService) DeleteWikiPage(pid interface{}, slug string, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/zeebo/errs/AUTHORS b/vendor/github.com/zeebo/errs/AUTHORS index 6246e7403d..71f6192426 100644 --- a/vendor/github.com/zeebo/errs/AUTHORS +++ b/vendor/github.com/zeebo/errs/AUTHORS @@ -1,5 +1,9 @@ Egon Elbre Jeff Wendling JT Olio +<<<<<<< HEAD Kaloyan Raev paul cannon +======= +Kaloyan Raev +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/zeebo/errs/README.md b/vendor/github.com/zeebo/errs/README.md index 0f72bf7b01..e06407e7ea 100644 --- a/vendor/github.com/zeebo/errs/README.md +++ b/vendor/github.com/zeebo/errs/README.md @@ -152,7 +152,11 @@ func checkForNeatThings() { } ``` +<<<<<<< HEAD It knows about both the `Unwrap() error` and `Unwrap() []error` methods that are +======= +It knows about both the `Cause() error` and `Unwrap() error` methods that are +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) often used in the community, and will call them as many times as possible. ### Defer diff --git a/vendor/github.com/zeebo/errs/errs.go b/vendor/github.com/zeebo/errs/errs.go index 9a42e3da87..6745417450 100644 --- a/vendor/github.com/zeebo/errs/errs.go +++ b/vendor/github.com/zeebo/errs/errs.go @@ -14,11 +14,24 @@ type Namer interface{ Name() (string, bool) } // Causer is implemented by all errors returned in this package. It returns // the underlying cause of the error, or nil if there is no underlying cause. +<<<<<<< HEAD // // Deprecated: check for the 'Unwrap()' interface from the stdlib errors package // instead. type Causer interface{ Cause() error } +======= +type Causer interface{ Cause() error } + +// unwrapper is implemented by all errors returned in this package. It returns +// the underlying cause of the error, or nil if there is no underlying error. +type unwrapper interface{ Unwrap() error } + +// ungrouper is implemented by combinedError returned in this package. It +// returns all underlying errors, or nil if there is no underlying error. +type ungrouper interface{ Ungroup() []error } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // New returns an error not contained in any class. This is the same as calling // fmt.Errorf(...) except it captures a stack trace on creation. func New(format string, args ...interface{}) error { @@ -40,6 +53,7 @@ func WrapP(err *error) { } } +<<<<<<< HEAD // Often, we call Unwrap as much as possible. Since comparing arbitrary // interfaces with equality isn't panic safe, we only loop up to 100 // times to ensure that a poor implementation that causes a cycle does @@ -51,12 +65,24 @@ const maxUnwrap = 100 // Deprecated: Prefer errors.Is() and errors.As(). func Unwrap(err error) error { for i := 0; err != nil && i < maxUnwrap; i++ { +======= +// Often, we call Cause as much as possible. Since comparing arbitrary +// interfaces with equality isn't panic safe, we only loop up to 100 +// times to ensure that a poor implementation that causes a cycle does +// not run forever. +const maxCause = 100 + +// Unwrap returns the underlying error, if any, or just the error. +func Unwrap(err error) error { + for i := 0; err != nil && i < maxCause; i++ { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var nerr error switch e := err.(type) { case Causer: nerr = e.Cause() +<<<<<<< HEAD case interface{ Unwrap() error }: nerr = e.Unwrap() @@ -72,6 +98,10 @@ func Unwrap(err error) error { if len(errs) > 0 { nerr = errs[0] } +======= + case unwrapper: + nerr = e.Unwrap() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if nerr == nil { @@ -85,6 +115,7 @@ func Unwrap(err error) error { // Classes returns all the classes that have wrapped the error. func Classes(err error) (classes []*Class) { +<<<<<<< HEAD IsFunc(err, func(err error) bool { if e, ok := err.(*errorT); ok { classes = append(classes, e.class) @@ -92,10 +123,45 @@ func Classes(err error) (classes []*Class) { return false }) return classes +======= + causes := 0 + for { + switch e := err.(type) { + case *errorT: + if e.class != nil { + classes = append(classes, e.class) + } + err = e.err + continue + + case Causer: + err = e.Cause() + + case unwrapper: + err = e.Unwrap() + + default: + return classes + } + + if causes >= maxCause { + return classes + } + causes++ + } +} + +// Is checks if any of the underlying errors matches target +func Is(err, target error) bool { + return IsFunc(err, func(err error) bool { + return err == target + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // IsFunc checks if any of the underlying errors matches the func func IsFunc(err error, is func(err error) bool) bool { +<<<<<<< HEAD for { if is(err) { return true @@ -126,6 +192,47 @@ func IsFunc(err error, is func(err error) bool) bool { return false } } +======= + causes := 0 + errs := []error{err} + + for len(errs) > 0 { + var next []error + for _, err := range errs { + if is(err) { + return true + } + + switch e := err.(type) { + case ungrouper: + ungrouped := e.Ungroup() + for _, unerr := range ungrouped { + if unerr != nil { + next = append(next, unerr) + } + } + case Causer: + cause := e.Cause() + if cause != nil { + next = append(next, cause) + } + case unwrapper: + unwrapped := e.Unwrap() + if unwrapped != nil { + next = append(next, unwrapped) + } + } + + if causes >= maxCause { + return false + } + causes++ + } + errs = next + } + + return false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // @@ -136,8 +243,12 @@ func IsFunc(err error, is func(err error) bool) bool { // errors are part of the class. type Class string +<<<<<<< HEAD // Has returns true if the passed in error (or any error wrapped by it) has // this class. +======= +// Has returns true if the passed in error was wrapped by this class. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Class) Has(err error) bool { return IsFunc(err, func(err error) bool { errt, ok := err.(*errorT) @@ -165,6 +276,7 @@ func (c *Class) WrapP(err *error) { } } +<<<<<<< HEAD // Instance creates a class membership object which implements the error // interface and allows errors.Is() to check whether given errors are // (or contain) an instance of this class. @@ -181,6 +293,8 @@ func (c *Class) Instance() error { return (*classMembershipChecker)(c) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // create constructs the error, or just adds the class to the error, keeping // track of the stack if it needs to construct it. func (c *Class) create(depth int, err error) error { @@ -211,12 +325,15 @@ func (c *Class) create(depth int, err error) error { return errt } +<<<<<<< HEAD type classMembershipChecker Class func (cmc *classMembershipChecker) Error() string { panic("classMembershipChecker used as concrete error! don't do that") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // errors // @@ -258,13 +375,24 @@ func (e *errorT) Format(f fmt.State, c rune) { } } +<<<<<<< HEAD // Cause implements the interface wrapping errors were previously // expected to implement to allow getting at underlying causes. +======= +// Cause implements the interface wrapping errors are expected to implement +// to allow getting at underlying causes. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *errorT) Cause() error { return e.err } +<<<<<<< HEAD // Unwrap returns the immediate underlying error. +======= +// Unwrap implements the draft design for error inspection. Since this is +// on an unexported type, it should not be hard to maintain going forward +// given that it also is the exact same semantics as Cause. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *errorT) Unwrap() error { return e.err } @@ -277,6 +405,7 @@ func (e *errorT) Name() (string, bool) { return string(*e.class), true } +<<<<<<< HEAD // Is determines whether an error is an instance of the given error class. // // Use with (*Class).Instance(). @@ -285,6 +414,8 @@ func (e *errorT) Is(err error) bool { return ok && e.class == (*Class)(cmc) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // summarizeStack writes stack line entries to the writer. func summarizeStack(w io.Writer, pcs []uintptr) { frames := runtime.CallersFrames(pcs) diff --git a/vendor/github.com/zeebo/errs/group.go b/vendor/github.com/zeebo/errs/group.go index 22b824aaf8..2a37999097 100644 --- a/vendor/github.com/zeebo/errs/group.go +++ b/vendor/github.com/zeebo/errs/group.go @@ -56,8 +56,28 @@ func (group Group) sanitize() Group { // combinedError is a list of non-empty errors type combinedError []error +<<<<<<< HEAD // Unwrap returns the first error. func (group combinedError) Unwrap() []error { return group } +======= +// Cause returns the first error. +func (group combinedError) Cause() error { + if len(group) > 0 { + return group[0] + } + return nil +} + +// Unwrap returns the first error. +func (group combinedError) Unwrap() error { + return group.Cause() +} + +// Ungroup returns all errors. +func (group combinedError) Ungroup() []error { + return group +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Error returns error string delimited by semicolons. func (group combinedError) Error() string { return fmt.Sprintf("%v", group) } diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go index 2a38c01957..268cc51e68 100644 --- a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go @@ -5,7 +5,11 @@ package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" // Version is the current release version of the GCP resource detector. func Version() string { +<<<<<<< HEAD return "1.32.0" +======= + return "1.29.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 9e87fb4bb1..c143fdb58a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -51,11 +51,19 @@ type config struct { tracer trace.Tracer meter metric.Meter +<<<<<<< HEAD rpcDuration metric.Float64Histogram rpcInBytes metric.Int64Histogram rpcOutBytes metric.Int64Histogram rpcInMessages metric.Int64Histogram rpcOutMessages metric.Int64Histogram +======= + rpcDuration metric.Float64Histogram + rpcRequestSize metric.Int64Histogram + rpcResponseSize metric.Int64Histogram + rpcRequestsPerRPC metric.Int64Histogram + rpcResponsesPerRPC metric.Int64Histogram +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Option applies an option value for a config. @@ -96,41 +104,73 @@ func newConfig(opts []Option, role string) *config { } } +<<<<<<< HEAD rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", +======= + c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) +<<<<<<< HEAD if rpcRequestSize == nil { rpcRequestSize = noop.Int64Histogram{} } } rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", +======= + if c.rpcRequestSize == nil { + c.rpcRequestSize = noop.Int64Histogram{} + } + } + + c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) +<<<<<<< HEAD if rpcResponseSize == nil { rpcResponseSize = noop.Int64Histogram{} } } rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", +======= + if c.rpcResponseSize == nil { + c.rpcResponseSize = noop.Int64Histogram{} + } + } + + c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) +<<<<<<< HEAD if rpcRequestsPerRPC == nil { rpcRequestsPerRPC = noop.Int64Histogram{} } } rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", +======= + if c.rpcRequestsPerRPC == nil { + c.rpcRequestsPerRPC = noop.Int64Histogram{} + } + } + + c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) +<<<<<<< HEAD if rpcResponsesPerRPC == nil { rpcResponsesPerRPC = noop.Int64Histogram{} } @@ -154,6 +194,13 @@ func newConfig(opts []Option, role string) *config { c.rpcOutMessages = noop.Int64Histogram{} } +======= + if c.rpcResponsesPerRPC == nil { + c.rpcResponsesPerRPC = noop.Int64Histogram{} + } + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return c } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index c01cb897cd..4739f72f1e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,22 +13,36 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" +<<<<<<< HEAD +======= + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" +<<<<<<< HEAD "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) type gRPCContextKey struct{} type gRPCContext struct { +<<<<<<< HEAD inMessages int64 outMessages int64 metricAttrs []attribute.KeyValue record bool +======= + messagesReceived int64 + messagesSent int64 + metricAttrs []attribute.KeyValue + record bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type serverHandler struct { @@ -151,8 +165,13 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { +<<<<<<< HEAD messageId = atomic.AddInt64(&gctx.inMessages, 1) c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) +======= + messageId = atomic.AddInt64(&gctx.messagesReceived, 1) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if c.ReceivedEvent { @@ -167,8 +186,13 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { +<<<<<<< HEAD messageId = atomic.AddInt64(&gctx.outMessages, 1) c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) +======= + messageId = atomic.AddInt64(&gctx.messagesSent, 1) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if c.SentEvent { @@ -214,8 +238,13 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { +<<<<<<< HEAD c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) +======= + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 55219cb6cc..893dee9a6d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,11 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { +<<<<<<< HEAD return "0.57.0" +======= + return "0.54.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index b25641c55d..8235af4737 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,11 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { +<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) +======= + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -27,7 +31,11 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { +<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) +======= + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -36,7 +44,11 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { +<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) +======= + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index a83a026274..cd91491827 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,6 +18,16 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) +<<<<<<< HEAD +======= +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Outgoing request bytes total + clientResponseSize = "http.client.response.size" // Outgoing response bytes total + clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds +) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 3ea05d0199..3003f4b739 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,7 +12,10 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" +<<<<<<< HEAD "go.opentelemetry.io/otel/attribute" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -22,6 +25,7 @@ type middleware struct { operation string server string +<<<<<<< HEAD tracer trace.Tracer propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption @@ -32,6 +36,17 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool metricAttributesFn func(*http.Request) []attribute.KeyValue +======= + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) semconv semconv.HTTPServer } @@ -81,7 +96,16 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) +<<<<<<< HEAD h.metricAttributesFn = c.MetricAttributesFn +======= +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -120,11 +144,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } +<<<<<<< HEAD if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { opts = append(opts, trace.WithTimestamp(startTime)) requestStartTime = startTime } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -192,6 +219,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) +<<<<<<< HEAD metricAttributes := semconv.MetricAttributes{ Req: r, StatusCode: statusCode, @@ -217,6 +245,19 @@ func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.Ke return attributeForRequest } +======= + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index fbc344cbdd..2977a8b096 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -44,9 +44,13 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) { w.mu.Lock() defer w.mu.Unlock() +<<<<<<< HEAD if !w.wroteHeader { w.writeHeader(http.StatusOK) } +======= + w.writeHeader(http.StatusOK) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n, err := w.ResponseWriter.Write(p) n1 := int64(n) @@ -82,12 +86,16 @@ func (w *RespWriterWrapper) writeHeader(statusCode int) { // Flush implements [http.Flusher]. func (w *RespWriterWrapper) Flush() { +<<<<<<< HEAD w.mu.Lock() defer w.mu.Unlock() if !w.wroteHeader { w.writeHeader(http.StatusOK) } +======= + w.WriteHeader(http.StatusOK) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if f, ok := w.ResponseWriter.(http.Flusher); ok { f.Flush() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index eaf4c37967..a9a4a554eb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,6 +1,9 @@ +<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/env.go.tmpl +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -12,17 +15,23 @@ import ( "net/http" "os" "strings" +<<<<<<< HEAD "sync" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) +<<<<<<< HEAD // OTelSemConvStabilityOptIn is an environment variable. // That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -38,11 +47,14 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram +<<<<<<< HEAD // New metrics requestBodySizeHistogram metric.Int64Histogram responseBodySizeHistogram metric.Int64Histogram requestDurationHistogram metric.Float64Histogram +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -63,9 +75,15 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { +<<<<<<< HEAD return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } return OldHTTPServer{}.RequestTraceAttrs(server, req) +======= + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -73,14 +91,24 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { +<<<<<<< HEAD return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } return OldHTTPServer{}.ResponseTraceAttrs(resp) +======= + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { +<<<<<<< HEAD return OldHTTPServer{}.Route(route) +======= + return oldHTTPServer{}.Route(route) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Status returns a span status code and message for an HTTP status code @@ -96,6 +124,7 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } +<<<<<<< HEAD type ServerMetricData struct { ServerName string ResponseSize int64 @@ -157,19 +186,55 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { func NewHTTPServer(meter metric.Meter) HTTPServer { env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) +======= +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } +<<<<<<< HEAD server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) if duplicate { server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) } +======= + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return server } type HTTPClient struct { duplicate bool +<<<<<<< HEAD // old metrics requestBytesCounter metric.Int64Counter @@ -193,23 +258,43 @@ func NewHTTPClient(meter metric.Meter) HTTPClient { } return client +======= +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { +<<<<<<< HEAD return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } return OldHTTPClient{}.RequestTraceAttrs(req) +======= + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { +<<<<<<< HEAD return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } return OldHTTPClient{}.ResponseTraceAttrs(resp) +======= + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -224,11 +309,16 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { +<<<<<<< HEAD return CurrentHTTPClient{}.ErrorType(err) +======= + return newHTTPClient{}.ErrorType(err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return attribute.KeyValue{} } +<<<<<<< HEAD type MetricOpts struct { measurement metric.MeasurementOption @@ -288,3 +378,5 @@ func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 8c3c627513..8c483195c6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,6 +1,9 @@ +<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/httpconv.go.tmpl +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -10,17 +13,27 @@ import ( "fmt" "net/http" "reflect" +<<<<<<< HEAD "slices" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "strings" "go.opentelemetry.io/otel/attribute" +<<<<<<< HEAD "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) type CurrentHTTPServer struct{} +======= + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -38,18 +51,31 @@ type CurrentHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. +<<<<<<< HEAD func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +======= +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { +<<<<<<< HEAD host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = SplitHostPort(server) if p < 0 { _, p = SplitHostPort(req.Host) +======= + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -65,7 +91,11 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ scheme := n.scheme(req.TLS != nil) +<<<<<<< HEAD if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { +======= + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -110,7 +140,11 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ attrs = append(attrs, methodOriginal) } +<<<<<<< HEAD if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { +======= + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -141,7 +175,11 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ return attrs } +<<<<<<< HEAD func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +======= +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -156,7 +194,11 @@ func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute. return semconvNew.HTTPRequestMethodGet, orig } +<<<<<<< HEAD func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +======= +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if https { return semconvNew.URLScheme("https") } @@ -166,7 +208,11 @@ func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:rev // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +<<<<<<< HEAD func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +======= +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var count int if resp.ReadBytes > 0 { @@ -201,6 +247,7 @@ func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribut } // Route returns the attribute for the route. +<<<<<<< HEAD func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } @@ -289,6 +336,16 @@ type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +======= +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) /* below attributes are returned: - http.request.method @@ -308,7 +365,11 @@ func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyV var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { +<<<<<<< HEAD requestHost, requestPort = SplitHostPort(hostport) +======= + requestHost, requestPort = splitHostPort(hostport) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if requestHost != "" || requestPort > 0 { break } @@ -370,7 +431,11 @@ func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyV } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +<<<<<<< HEAD func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +======= +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) /* below attributes are returned: - http.response.status_code @@ -397,7 +462,11 @@ func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.K return attrs } +<<<<<<< HEAD func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { +======= +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -414,7 +483,11 @@ func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } +<<<<<<< HEAD func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +======= +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -429,6 +502,7 @@ func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute. return semconvNew.HTTPRequestMethodGet, orig } +<<<<<<< HEAD func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { if meter == nil { return noop.Int64Histogram{}, noop.Float64Histogram{} @@ -514,6 +588,8 @@ func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:rev return semconvNew.URLScheme("http") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index 558efd0594..d78ec2700d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,6 +1,9 @@ +<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/util.go.tmpl +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -17,14 +20,22 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) +<<<<<<< HEAD // SplitHostPort splits a network address hostport of the form "host", +======= +// splitHostPort splits a network address hostport of the form "host", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. +<<<<<<< HEAD func SplitHostPort(hostport string) (host string, port int) { +======= +func splitHostPort(hostport string) (host string, port int) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) port = -1 if strings.HasPrefix(hostport, "[") { @@ -99,6 +110,7 @@ func handleErr(err error) { otel.Handle(err) } } +<<<<<<< HEAD func standardizeHTTPMethod(method string) string { method = strings.ToUpper(method) @@ -109,3 +121,5 @@ func standardizeHTTPMethod(method string) string { } return method } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 57d1507b62..02ebb68bf3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,6 +1,9 @@ +<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/v120.0.go.tmpl +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -11,6 +14,10 @@ import ( "io" "net/http" "slices" +<<<<<<< HEAD +======= + "strings" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -19,7 +26,11 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) +<<<<<<< HEAD type OldHTTPServer struct{} +======= +type oldHTTPServer struct{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -37,14 +48,22 @@ type OldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. +<<<<<<< HEAD func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +======= +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +<<<<<<< HEAD func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +======= +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -69,7 +88,11 @@ func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. +<<<<<<< HEAD func (o OldHTTPServer) Route(route string) attribute.KeyValue { +======= +func (o oldHTTPServer) Route(route string) attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return semconv.HTTPRoute(route) } @@ -86,7 +109,11 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) +<<<<<<< HEAD func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +======= +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -115,17 +142,30 @@ func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } +<<<<<<< HEAD func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +======= +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n := len(additionalAttributes) + 3 var host string var p int if server == "" { +<<<<<<< HEAD host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = SplitHostPort(server) if p < 0 { _, p = SplitHostPort(req.Host) +======= + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -146,7 +186,11 @@ func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, +<<<<<<< HEAD semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), +======= + o.methodMetric(req.Method), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -166,13 +210,28 @@ func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } +<<<<<<< HEAD func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +======= +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } +<<<<<<< HEAD type OldHTTPClient struct{} func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { @@ -264,3 +323,14 @@ func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } +======= +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 44b86ad860..32ed56c7e4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,9 +13,17 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" +<<<<<<< HEAD "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" +======= + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -27,6 +35,10 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer +<<<<<<< HEAD +======= + meter metric.Meter +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -34,7 +46,14 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue +<<<<<<< HEAD semconv semconv.HTTPClient +======= + semconv semconv.HTTPClient + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var _ http.RoundTripper = &Transport{} @@ -51,7 +70,12 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ +<<<<<<< HEAD rt: base, +======= + rt: base, + semconv: semconv.NewHTTPClient(), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } defaultOpts := []Option{ @@ -61,21 +85,59 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) +<<<<<<< HEAD +======= + t.createMeasures() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer +<<<<<<< HEAD +======= + t.meter = c.Meter +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace +<<<<<<< HEAD t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } +======= + t.metricAttributesFn = c.MetricAttributesFn +} + +func (t *Transport) createMeasures() { + var err error + t.requestBytesCounter, err = t.meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + t.responseBytesCounter, err = t.meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + t.latencyMeasure, err = t.meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -145,6 +207,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics +<<<<<<< HEAD metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ Req: r, StatusCode: res.StatusCode, @@ -154,6 +217,18 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { t.semconv.RecordResponseSize(ctx, n, metricOpts) +======= + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) + if res.StatusCode > 0 { + metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) + } + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) + // For handling response bytes we leverage a callback when the client reads the http response + readRecordFunc := func(n int64) { + t.responseBytesCounter.Add(ctx, n, o) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // traces @@ -165,12 +240,18 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) +<<<<<<< HEAD t.semconv.RecordMetrics(ctx, semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, }, metricOpts) return res, nil +======= + t.latencyMeasure.Record(ctx, elapsedTime, o) + + return res, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 386f09e1b7..a04cc9938a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,11 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { +<<<<<<< HEAD return "0.59.0" +======= + return "0.54.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index ae8577ef36..aa50a57d76 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,3 +12,14 @@ go.work go.work.sum gen/ +<<<<<<< HEAD +======= + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index ce3f40b609..02105abf6d 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,16 +22,25 @@ linters: - govet - ineffassign - misspell +<<<<<<< HEAD - perfsprint - revive - staticcheck - tenv - testifylint +======= + - revive + - staticcheck + - tenv +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - typecheck - unconvert - unused - unparam +<<<<<<< HEAD - usestdlibvars +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) issues: # Maximum issues count per one linter. @@ -63,17 +72,29 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive +<<<<<<< HEAD # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) +======= + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec +<<<<<<< HEAD # Ignoring gosec G402: TLS MinVersion too low +======= + # Igonoring gosec G402: TLS MinVersion too low +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -98,6 +119,7 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" +<<<<<<< HEAD auto/sdk: files: - "!internal/global/trace.go" @@ -105,6 +127,8 @@ linters-settings: deny: - pkg: "go.opentelemetry.io/auto/sdk" desc: Do not use SDK from automatic instrumentation. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -137,6 +161,11 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" +<<<<<<< HEAD +======= + - "**/example/*.go" + - "**/example/**/*.go" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -164,12 +193,15 @@ linters-settings: locale: US ignore-words: - cancelled +<<<<<<< HEAD perfsprint: err-error: true errorf: true int-conversion: true sprintf1: true strconcat: true +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. @@ -317,9 +349,12 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false +<<<<<<< HEAD testifylint: enable-all: true disable: - float-compare - go-require - require-error +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 599d59cd13..f4655720f3 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +<<<<<<< HEAD ## [1.34.0/0.56.0/0.10.0] 2025-01-17 ### Changed @@ -146,6 +147,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Drop support for [Go 1.21]. (#5736, #5740, #5800) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [1.29.0/0.51.0/0.5.0] 2024-08-23 This release is the last to support [Go 1.21]. @@ -2030,7 +2033,11 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. +<<<<<<< HEAD Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. +======= + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2604,7 +2611,11 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +<<<<<<< HEAD - Update otel-collector example to use the v0.5.0 collector. (#915) +======= +- Update otel-colector example to use the v0.5.0 collector. (#915) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3197,12 +3208,16 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. +<<<<<<< HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +======= +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 945a07d2b0..2213a70537 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,12 @@ # https://help.github.com/en/articles/about-code-owners # +<<<<<<< HEAD * @MrAlias @XSAM @dashpole @pellared @dmathieu CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu +======= +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 22a2e9dbd4..f31cdb0438 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,10 +578,14 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the +<<<<<<< HEAD absence of race conditions. The top-level tests with this term will be run many times in the `test-concurrent-safe` CI job to increase the chance of catching concurrency issues. This does not apply to subtests when this term is not in their root name. +======= +absence of race conditions. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Internal packages @@ -629,6 +633,7 @@ should be canceled. ## Approvers and Maintainers +<<<<<<< HEAD ### Triagers - [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent @@ -637,6 +642,15 @@ should be canceled. ### Maintainers +======= +### Approvers + +- [Chester Cheung](https://github.com/hanyuancheung), Tencent + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -645,6 +659,7 @@ should be canceled. ### Emeritus +<<<<<<< HEAD - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) - [Chester Cheung](https://github.com/hanyuancheung) @@ -652,6 +667,13 @@ should be canceled. - [Gustavo Silva Paiva](https://github.com/paivagustavo) - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +======= +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a7f6d8cc68..6ea0dff898 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,13 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci +<<<<<<< HEAD precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +======= +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Tools @@ -54,6 +59,12 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto +<<<<<<< HEAD +======= +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -64,7 +75,11 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools +<<<<<<< HEAD tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +======= +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Virtualized python tools via docker @@ -142,14 +157,21 @@ build-tests/%: # Tests +<<<<<<< HEAD TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +======= +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +<<<<<<< HEAD test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -235,6 +257,7 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +<<<<<<< HEAD .PHONY: toolchain-check toolchain-check: @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ @@ -245,6 +268,8 @@ toolchain-check: exit 1; \ fi +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -270,7 +295,11 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) +<<<<<<< HEAD $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" +======= + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index d9a1920762..c43dba0a7d 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,10 @@ # OpenTelemetry-Go +<<<<<<< HEAD [![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) +======= +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) @@ -51,6 +55,7 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.23 | amd64 | | Ubuntu | 1.22 | amd64 | +<<<<<<< HEAD | Ubuntu | 1.23 | 386 | | Ubuntu | 1.22 | 386 | | Linux | 1.23 | arm64 | @@ -63,6 +68,27 @@ Currently, this project supports the following environments. | Windows | 1.22 | amd64 | | Windows | 1.23 | 386 | | Windows | 1.22 | 386 | +======= +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -89,8 +115,13 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +<<<<<<< HEAD package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) are a good way to see some practical uses of this process. +======= +package. The included [examples](./example/) are a good way to see some +practical uses of this process. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 4ebef4f9dd..9896ee3ebd 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -111,6 +111,20 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +<<<<<<< HEAD +======= +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Post-Release ### Contrib Repository @@ -130,6 +144,12 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: +<<<<<<< HEAD - [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) - [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) - [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) +======= +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c16..1db051e44a 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,11 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get +<<<<<<< HEAD go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a +======= + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 6cbefceadf..e09eefb5d6 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,6 +347,7 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: +<<<<<<< HEAD return [1]KeyValue(kvs) case 2: return [2]KeyValue(kvs) @@ -366,6 +367,47 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { return [9]KeyValue(kvs) case 10: return [10]KeyValue(kvs) +======= + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 0e1fe24220..18e32ed9d9 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -50,7 +50,11 @@ type Property struct { // component boundaries may impose their own restrictions on Property key. // For example, the W3C Baggage specification restricts the Property keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. +<<<<<<< HEAD // For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +======= +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewKeyProperty(key string) (Property, error) { if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -90,7 +94,11 @@ func NewKeyValueProperty(key, value string) (Property, error) { // component boundaries may impose their own restrictions on Property key. // For example, the W3C Baggage specification restricts the Property keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. +<<<<<<< HEAD // For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +======= +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewKeyValuePropertyRaw(key, value string) (Property, error) { if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -287,7 +295,11 @@ func NewMember(key, value string, props ...Property) (Member, error) { // component boundaries may impose their own restrictions on baggage key. // For example, the W3C Baggage specification restricts the baggage keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. +<<<<<<< HEAD // For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. +======= +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -355,7 +367,11 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +<<<<<<< HEAD func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { +======= +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +379,11 @@ func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder +<<<<<<< HEAD b.Grow(c) +======= + b.Grow(cap) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 49a35b1225..1c8c5bfaf0 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,7 +5,10 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "strconv" ) @@ -64,7 +67,11 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { +<<<<<<< HEAD return errors.New("nil receiver passed to UnmarshalJSON") +======= + return fmt.Errorf("nil receiver passed to UnmarshalJSON") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 691d96c755..b8e759305a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,11 +49,20 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } +<<<<<<< HEAD cpy := make([]bool, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy +======= + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -62,11 +71,20 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } +<<<<<<< HEAD cpy := make([]int64, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy +======= + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -75,11 +93,20 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } +<<<<<<< HEAD cpy := make([]float64, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy +======= + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -88,9 +115,18 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } +<<<<<<< HEAD cpy := make([]string, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy +======= + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ae92a42516..a8eccf3a72 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,11 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { +<<<<<<< HEAD unwrap() metric.Observable +======= + Unwrap() metric.Observable +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type afCounter struct { @@ -40,7 +44,11 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } +<<<<<<< HEAD func (i *afCounter) unwrap() metric.Observable { +======= +func (i *afCounter) Unwrap() metric.Observable { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +79,11 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } +<<<<<<< HEAD func (i *afUpDownCounter) unwrap() metric.Observable { +======= +func (i *afUpDownCounter) Unwrap() metric.Observable { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +114,11 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } +<<<<<<< HEAD func (i *afGauge) unwrap() metric.Observable { +======= +func (i *afGauge) Unwrap() metric.Observable { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +149,11 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } +<<<<<<< HEAD func (i *aiCounter) unwrap() metric.Observable { +======= +func (i *aiCounter) Unwrap() metric.Observable { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +184,11 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } +<<<<<<< HEAD func (i *aiUpDownCounter) unwrap() metric.Observable { +======= +func (i *aiUpDownCounter) Unwrap() metric.Observable { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +219,11 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } +<<<<<<< HEAD func (i *aiGauge) unwrap() metric.Observable { +======= +func (i *aiGauge) Unwrap() metric.Observable { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index a6acd8dca6..d4f63d9389 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,9 +5,14 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" +<<<<<<< HEAD "context" "reflect" "sync" +======= + "sync" + "sync/atomic" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -67,7 +72,10 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), +<<<<<<< HEAD attrs: c.InstrumentationAttributes(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if p.meters == nil { @@ -78,7 +86,11 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } +<<<<<<< HEAD t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} +======= + t := &meter{name: name, opts: opts} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.meters[key] = t return t } @@ -94,17 +106,26 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex +<<<<<<< HEAD instruments map[instID]delegatedInstrument registry list.List delegate metric.Meter +======= + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type delegatedInstrument interface { setDelegate(metric.Meter) } +<<<<<<< HEAD // instID are the identifying properties of a instrument. type instID struct { // name is the name of the stream. @@ -117,6 +138,8 @@ type instID struct { unit string } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -124,12 +147,21 @@ type instID struct { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() meter := provider.Meter(m.name, m.opts...) m.delegate = meter +======= + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -147,6 +179,7 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -166,10 +199,20 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) } i := &siCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -189,10 +232,20 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou } i := &siUpDownCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -212,10 +265,20 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti } i := &siHistogram{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -235,10 +298,20 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met } i := &siGauge{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -258,10 +331,20 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser } i := &aiCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -281,10 +364,20 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 } i := &aiUpDownCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -304,10 +397,20 @@ func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64Observa } i := &aiGauge{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -327,10 +430,20 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti } i := &sfCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -350,10 +463,20 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow } i := &sfUpDownCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -373,10 +496,20 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram } i := &sfHistogram{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -396,10 +529,20 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) } i := &sfGauge{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -419,10 +562,20 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O } i := &afCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -442,10 +595,20 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl } i := &afUpDownCounter{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -465,11 +628,21 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs } i := &afGauge{name: name, opts: options} m.instruments[id] = i +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { +<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -477,6 +650,16 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } +======= + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -488,11 +671,22 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } +<<<<<<< HEAD +======= +type wrapped interface { + unwrap() metric.Observable +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { +<<<<<<< HEAD if in, ok := inst.(unwrapper); ok { +======= + if in, ok := inst.(wrapped); ok { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -512,6 +706,7 @@ type registration struct { unregMu sync.Mutex } +<<<<<<< HEAD type unwrapObs struct { embedded.Observer obs metric.Observer @@ -567,6 +762,11 @@ func unwrapCallback(f metric.Callback) metric.Callback { } func (c *registration) setDelegate(m metric.Meter) { +======= +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.unregMu.Lock() defer c.unregMu.Unlock() @@ -575,10 +775,16 @@ func (c *registration) setDelegate(m metric.Meter) { return } +<<<<<<< HEAD reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return +======= + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 8982aa0dc5..2b0194ebab 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,7 +25,10 @@ import ( "sync" "sync/atomic" +<<<<<<< HEAD "go.opentelemetry.io/auto/sdk" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -88,7 +91,10 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), +<<<<<<< HEAD attrs: c.InstrumentationAttributes(), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if p.tracers == nil { @@ -104,12 +110,16 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } +<<<<<<< HEAD type il struct { name string version string schema string attrs attribute.Set } +======= +type il struct{ name, version, schema string } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // tracer is a placeholder for a trace.Tracer. // @@ -146,6 +156,7 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } +<<<<<<< HEAD return t.newSpan(ctx, autoInstEnabled, name, opts) } @@ -170,6 +181,8 @@ func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts return tracer.Start(ctx, name, opts...) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index b2fe3e41d3..b543d512a0 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,8 +20,12 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { +<<<<<<< HEAD // Assumes original was a valid int64 (overflow not checked). return uint64(i) // nolint: gosec +======= + return uint64(i) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func RawToInt64(r uint64) int64 { diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index f8435d8f28..a965f5d1ed 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,11 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes +<<<<<<< HEAD // observations for a Float64Observable instrument it is registered with. +======= +// observations for a Float64Observerable instrument it is registered with. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index e079aaef16..a3778661a9 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,11 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations +<<<<<<< HEAD // for an Int64Observable instrument it is registered with. Calls to the +======= +// for an Int64Observerable instrument it is registered with. Calls to the +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index a535782e1d..3e1a2ad63a 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,11 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) +<<<<<<< HEAD // WithAttributeSet(attribute.NewSet(cp...)) +======= +// WithAttributes(attribute.NewSet(cp...)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 4f80c898a1..ab0e0aa0e5 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -15,12 +15,19 @@ "enabled": true }, { +<<<<<<< HEAD "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" }, { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" +======= + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 34852a47b2..f852f747b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,8 +3,11 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +<<<<<<< HEAD import "go.opentelemetry.io/otel/attribute" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -14,6 +17,9 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string +<<<<<<< HEAD // Attributes of the telemetry emitted by the scope. Attributes attribute.Set +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 203cd9d650..2e7d9fc38a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -5,6 +5,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" +<<<<<<< HEAD "errors" "os" "strings" @@ -12,15 +13,26 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/sdk/metric/exemplar" +======= + "fmt" + "sync" + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/resource" ) // config contains configuration options for a MeterProvider. type config struct { +<<<<<<< HEAD res *resource.Resource readers []Reader views []View exemplarFilter exemplar.Filter +======= + res *resource.Resource + readers []Reader + views []View +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // readerSignals returns a force-flush and shutdown function for a @@ -44,6 +56,7 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro // value. func unify(funcs []func(context.Context) error) func(context.Context) error { return func(ctx context.Context) error { +<<<<<<< HEAD var err error for _, f := range funcs { if e := f(ctx); e != nil { @@ -51,6 +64,27 @@ func unify(funcs []func(context.Context) error) func(context.Context) error { } } return err +======= + var errs []error + for _, f := range funcs { + if err := f(ctx); err != nil { + errs = append(errs, err) + } + } + return unifyErrors(errs) + } +} + +// unifyErrors combines multiple errors into a single error. +func unifyErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -68,6 +102,7 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { +<<<<<<< HEAD conf := config{ res: resource.Default(), exemplarFilter: exemplar.TraceBasedFilter, @@ -75,6 +110,9 @@ func newConfig(options []Option) config { for _, o := range meterProviderOptionsFromEnv() { conf = o.apply(conf) } +======= + conf := config{res: resource.Default()} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, o := range options { conf = o.apply(conf) } @@ -102,11 +140,15 @@ func (o optionFunc) apply(conf config) config { // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) Option { return optionFunc(func(conf config) config { +<<<<<<< HEAD var err error conf.res, err = resource.Merge(resource.Environment(), res) if err != nil { otel.Handle(err) } +======= + conf.res = res +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return conf }) } @@ -138,6 +180,7 @@ func WithView(views ...View) Option { return cfg }) } +<<<<<<< HEAD // WithExemplarFilter configures the exemplar filter. // @@ -170,3 +213,5 @@ func meterProviderOptionsFromEnv() []Option { } return opts } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 0335b8ae48..12feb98427 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -4,6 +4,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( +<<<<<<< HEAD "runtime" "go.opentelemetry.io/otel/attribute" @@ -41,12 +42,57 @@ func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filte // guarantees are made on the shape or statistical properties of returned // exemplars. func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider { +======= + "os" + "runtime" + "slices" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/x" +) + +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and user defined +// environment variables. +// +// Note: This will only return non-nil values when the experimental exemplar +// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable +// is not set to always_off. +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { + if !x.Exemplars.Enabled() { + return nil + } + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + var filter exemplar.Filter + + switch os.Getenv(filterEnvKey) { + case "always_on": + filter = exemplar.AlwaysOnFilter + case "always_off": + return exemplar.Drop + case "trace_based": + fallthrough + default: + filter = exemplar.SampledFilter + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults // Explicit bucket histogram aggregation with more than 1 bucket will // use AlignedHistogramBucketExemplarReservoir. a, ok := agg.(AggregationExplicitBucketHistogram) if ok && len(a.Boundaries) > 0 { +<<<<<<< HEAD return exemplar.HistogramReservoirProvider(a.Boundaries) +======= + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var n int @@ -73,5 +119,11 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi } } +<<<<<<< HEAD return exemplar.FixedSizeReservoirProvider(n) +======= + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index 48b723a7b3..6e75e67687 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -144,12 +144,15 @@ type Stream struct { // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to // provide an allow-list of attribute keys here. AttributeFilter attribute.Filter +<<<<<<< HEAD // ExemplarReservoirProvider selects the // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based // on the [Aggregation]. // // If unspecified, [DefaultExemplarReservoirProviderSelector] is used. ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // instID are the identifying properties of a instrument. @@ -240,8 +243,13 @@ func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Se } } +<<<<<<< HEAD // observableID is a comparable unique identifier of an observable. type observableID[N int64 | float64] struct { +======= +// observablID is a comparable unique identifier of an observable. +type observablID[N int64 | float64] struct { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) name string description string kind InstrumentKind @@ -293,7 +301,11 @@ func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int type observable[N int64 | float64] struct { metric.Observable +<<<<<<< HEAD observableID[N] +======= + observablID[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) meter *meter measures measures[N] @@ -302,7 +314,11 @@ type observable[N int64 | float64] struct { func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { return &observable[N]{ +<<<<<<< HEAD observableID: observableID[N]{ +======= + observablID: observablID[N]{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) name: name, description: desc, kind: kind, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index fde2193338..deeec091e9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -8,6 +8,10 @@ import ( "time" "go.opentelemetry.io/otel/attribute" +<<<<<<< HEAD +======= + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -37,8 +41,13 @@ type Builder[N int64 | float64] struct { // create new exemplar reservoirs for a new seen attribute set. // // If this is not provided a default factory function that returns an +<<<<<<< HEAD // dropReservoir reservoir will be used. ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N] +======= + // exemplar.Drop reservoir will be used. + ReservoirFunc func() exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -49,12 +58,20 @@ type Builder[N int64 | float64] struct { AggregationLimit int } +<<<<<<< HEAD func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] { +======= +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if b.ReservoirFunc != nil { return b.ReservoirFunc } +<<<<<<< HEAD return dropReservoir +======= + return exemplar.Drop +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go index 25d709948e..6397dbf47e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -6,7 +6,11 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "sync" +<<<<<<< HEAD "go.opentelemetry.io/otel/sdk/metric/exemplar" +======= + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -17,7 +21,10 @@ var exemplarPool = sync.Pool{ func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { dest := exemplarPool.Get().(*[]exemplar.Exemplar) defer func() { +<<<<<<< HEAD clear(*dest) // Erase elements to let GC collect objects. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) *dest = (*dest)[:0] exemplarPool.Put(dest) }() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index b7aa721651..f1729ac727 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -12,6 +12,10 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" +<<<<<<< HEAD +======= + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -30,7 +34,11 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set +<<<<<<< HEAD res FilteredExemplarReservoir[N] +======= + res exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) count uint64 min N @@ -283,7 +291,11 @@ func (b *expoBuckets) downscale(delta int32) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. +<<<<<<< HEAD func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] { +======= +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -306,7 +318,11 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int32 +<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] +======= + newRes func() exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -327,7 +343,11 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib v, ok := e.values[attr.Equivalent()] if !ok { v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) +<<<<<<< HEAD v.res = e.newRes(attr) +======= + v.res = e.newRes() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) e.values[attr.Equivalent()] = v } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index d577ae2c19..e600c99aea 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -11,12 +11,20 @@ import ( "time" "go.opentelemetry.io/otel/attribute" +<<<<<<< HEAD +======= + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type buckets[N int64 | float64] struct { attrs attribute.Set +<<<<<<< HEAD res FilteredExemplarReservoir[N] +======= + res exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) counts []uint64 count uint64 @@ -47,13 +55,21 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 +<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] +======= + newRes func() exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } +<<<<<<< HEAD func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] { +======= +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -93,7 +109,11 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](attr, len(s.bounds)+1) +<<<<<<< HEAD b.res = s.newRes(attr) +======= + b.res = s.newRes() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value @@ -108,7 +128,11 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. +<<<<<<< HEAD func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] { +======= +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index d3a93f085c..d0f1ad6f18 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -9,6 +9,10 @@ import ( "time" "go.opentelemetry.io/otel/attribute" +<<<<<<< HEAD +======= + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -16,10 +20,17 @@ import ( type datapoint[N int64 | float64] struct { attrs attribute.Set value N +<<<<<<< HEAD res FilteredExemplarReservoir[N] } func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] { +======= + res exemplar.FilteredReservoir[N] +} + +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), @@ -32,7 +43,11 @@ func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredEx type lastValue[N int64 | float64] struct { sync.Mutex +<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] +======= + newRes func() exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] start time.Time @@ -45,7 +60,11 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. attr := s.limit.Attributes(fltrAttr, s.values) d, ok := s.values[attr.Equivalent()] if !ok { +<<<<<<< HEAD d.res = s.newRes(attr) +======= + d.res = s.newRes() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } d.attrs = attr @@ -114,7 +133,11 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. +<<<<<<< HEAD func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] { +======= +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 8e132ad618..cd098b02ac 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -9,24 +9,40 @@ import ( "time" "go.opentelemetry.io/otel/attribute" +<<<<<<< HEAD +======= + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type sumValue[N int64 | float64] struct { n N +<<<<<<< HEAD res FilteredExemplarReservoir[N] +======= + res exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex +<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] +======= + newRes func() exemplar.FilteredReservoir[N] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } +<<<<<<< HEAD func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { +======= +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -41,7 +57,11 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S attr := s.limit.Attributes(fltrAttr, s.values) v, ok := s.values[attr.Equivalent()] if !ok { +<<<<<<< HEAD v.res = s.newRes(attr) +======= + v.res = s.newRes() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } v.attrs = attr @@ -54,7 +74,11 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. +<<<<<<< HEAD func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { +======= +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -141,9 +165,15 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { } // newPrecomputedSum returns an aggregator that summarizes a set of +<<<<<<< HEAD // observations as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] { +======= +// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// the aggregation cycle the measurements were made in. +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -151,7 +181,11 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attr } } +<<<<<<< HEAD // precomputedSum summarizes a set of observations as their arithmetic sum. +======= +// precomputedSum summarizes a set of observatrions as their arithmetic sum. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type precomputedSum[N int64 | float64] struct { *valueMap[N] diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go new file mode 100644 index 0000000000..5394f48e0d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go new file mode 100644 index 0000000000..5a0f39ae14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]Exemplar) { + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 0000000000..fcaa6a4697 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go new file mode 100644 index 0000000000..152a069a09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool + +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() +} + +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 0000000000..9fedfa4be6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go new file mode 100644 index 0000000000..a6ff86d027 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Histogram returns a [Reservoir] that samples the last measurement that falls +// within a histogram bucket. The histogram bucket upper-boundaries are define +// by bounds. +// +// The passed bounds will be sorted by this function. +func Histogram(bounds []float64) Reservoir { + slices.Sort(bounds) + return &histRes{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +type histRes struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go new file mode 100644 index 0000000000..199a2608f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -0,0 +1,191 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "math" + "math/rand" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) + +// random returns, as a float64, a uniform pseudo-random number in the open +// interval (0.0,1.0). +func random() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + rngMu.Lock() + defer rngMu.Unlock() + + f := rng.Float64() + for f == 0 { + f = rng.Float64() + } + return f +} + +// FixedSize returns a [Reservoir] that samples at most k exemplars. If there +// are k or less measurements made, the Reservoir will sample each one. If +// there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} + r.reset() + return r +} + +type randRes struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 +} + +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *randRes) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *randRes) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 +} + +func (r *randRes) Collect(dest *[]Exemplar) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go new file mode 100644 index 0000000000..80fa59554f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]Exemplar) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go new file mode 100644 index 0000000000..10b2976f79 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement +} + +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage) Collect(dest *[]Exemplar) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.Exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value Value + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 0000000000..1957d6b1e3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go index 0891993706..76dd4a00c7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -10,6 +10,7 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( "os" "strconv" +<<<<<<< HEAD ) // CardinalityLimit is an experimental feature flag that defines if @@ -27,6 +28,41 @@ var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool } return n, true }) +======= + "strings" +) + +var ( + // Exemplars is an experimental feature flag that defines if exemplars + // should be recorded for metric data-points. + // + // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable + // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" + // will also enable this). + Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false + }) + + // CardinalityLimit is an experimental feature flag that defines if + // cardinality limits should be applied to the recorded metric data-points. + // + // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment + // variable to the integer limit value you want to use. + // + // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 + // will disable the cardinality limits. + CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true + }) +) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index c495985bc2..5a93e5e9a0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -113,17 +113,29 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr if err != nil { return err } +<<<<<<< HEAD for _, producer := range mr.externalProducers.Load().([]Producer) { externalMetrics, e := producer.Produce(ctx) if e != nil { err = errors.Join(err, e) +======= + var errs []error + for _, producer := range mr.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("ManualReader collection", "Data", rm) +<<<<<<< HEAD return err +======= + return unifyErrors(errs) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MarshalLog returns logging data about the ManualReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index 823cdf2c62..e51f9f6981 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -150,11 +150,14 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6 continue } inst.appendMeasures(in) +<<<<<<< HEAD // Add the measures to the pipeline. It is required to maintain // measures per pipeline to avoid calling the measure that // is not part of the pipeline. insert.pipeline.addInt64Measure(inst.observableID, in) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, cback := range callbacks { inst := int64Observer{measures: in} fn := cback @@ -314,11 +317,14 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl continue } inst.appendMeasures(in) +<<<<<<< HEAD // Add the measures to the pipeline. It is required to maintain // measures per pipeline to avoid calling the measure that // is not part of the pipeline. insert.pipeline.addFloat64Measure(inst.observableID, in) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, cback := range callbacks { inst := float64Observer{measures: in} fn := cback @@ -451,6 +457,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return noopRegister{}, nil } +<<<<<<< HEAD var err error validInstruments := make([]metric.Observable, 0, len(insts)) for _, inst := range insts { @@ -473,17 +480,52 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) } validInstruments = append(validInstruments, inst) +======= + reg := newObserver() + var errs multierror + for _, inst := range insts { + // Unwrap any global. + if u, ok := inst.(interface { + Unwrap() metric.Observable + }); ok { + inst = u.Unwrap() + } + + switch o := inst.(type) { + case int64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerInt64(o.observablID) + case float64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerFloat64(o.observablID) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: // Instrument external to the SDK. return nil, fmt.Errorf("invalid observable: from different implementation") } } +<<<<<<< HEAD if len(validInstruments) == 0 { +======= + err := errs.errorOrNil() + if reg.len() == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // All insts use drop aggregation or are invalid. return noopRegister{}, err } +<<<<<<< HEAD unregs := make([]func(), len(m.pipes)) for ix, pipe := range m.pipes { reg := newObserver(pipe) @@ -502,11 +544,17 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) } return unregisterFuncs{f: unregs}, err +======= + // Some or all instruments were valid. + cback := func(ctx context.Context) error { return f(ctx, reg) } + return m.pipes.registerMultiCallback(cback), err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type observer struct { embedded.Observer +<<<<<<< HEAD pipe *pipeline float64 map[observableID[float64]]struct{} int64 map[observableID[int64]]struct{} @@ -525,6 +573,28 @@ func (r observer) registerFloat64(id observableID[float64]) { } func (r observer) registerInt64(id observableID[int64]) { +======= + float64 map[observablID[float64]]struct{} + int64 map[observablID[int64]]struct{} +} + +func newObserver() observer { + return observer{ + float64: make(map[observablID[float64]]struct{}), + int64: make(map[observablID[int64]]struct{}), + } +} + +func (r observer) len() int { + return len(r.float64) + len(r.int64) +} + +func (r observer) registerFloat64(id observablID[float64]) { + r.float64[id] = struct{}{} +} + +func (r observer) registerInt64(id observablID[int64]) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.int64[id] = struct{}{} } @@ -538,12 +608,29 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... switch conv := o.(type) { case float64Observable: oImpl = conv +<<<<<<< HEAD +======= + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(float64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: global.Error(errUnknownObserver, "failed to record") return } +<<<<<<< HEAD if _, registered := r.float64[oImpl.observableID]; !registered { +======= + if _, registered := r.float64[oImpl.observablID]; !registered { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -555,12 +642,16 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... return } c := metric.NewObserveConfig(opts) +<<<<<<< HEAD // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce. // TODO (#5946): Refactor pipeline and observable measures. measures := r.pipe.float64Measures[oImpl.observableID] for _, m := range measures { m(context.Background(), v, c.Attributes()) } +======= + oImpl.observe(v, c.Attributes()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { @@ -568,12 +659,29 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric switch conv := o.(type) { case int64Observable: oImpl = conv +<<<<<<< HEAD +======= + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(int64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: global.Error(errUnknownObserver, "failed to record") return } +<<<<<<< HEAD if _, registered := r.int64[oImpl.observableID]; !registered { +======= + if _, registered := r.int64[oImpl.observablID]; !registered { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -585,12 +693,16 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric return } c := metric.NewObserveConfig(opts) +<<<<<<< HEAD // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce. // TODO (#5946): Refactor pipeline and observable measures. measures := r.pipe.int64Measures[oImpl.observableID] for _, m := range measures { m(context.Background(), v, c.Attributes()) } +======= + oImpl.observe(v, c.Attributes()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type noopRegister struct{ embedded.Registration } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index dcd2182d9a..9ff2da33fe 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -251,17 +251,29 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd if err != nil { return err } +<<<<<<< HEAD for _, producer := range r.externalProducers.Load().([]Producer) { externalMetrics, e := producer.Produce(ctx) if e != nil { err = errors.Join(err, e) +======= + var errs []error + for _, producer := range r.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("PeriodicReader collection", "Data", rm) +<<<<<<< HEAD return err +======= + return unifyErrors(errs) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // export exports metric data m using r's exporter. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 775e245261..63a25580cc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -8,13 +8,23 @@ import ( "context" "errors" "fmt" +<<<<<<< HEAD +======= + "strings" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" +<<<<<<< HEAD "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/exemplar" +======= + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/internal/x" @@ -37,17 +47,27 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } +<<<<<<< HEAD func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { +======= +func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if res == nil { res = resource.Empty() } return &pipeline{ +<<<<<<< HEAD resource: res, reader: reader, views: views, int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, exemplarFilter: exemplarFilter, +======= + resource: res, + reader: reader, + views: views, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // aggregations is lazy allocated when needed. } } @@ -65,6 +85,7 @@ type pipeline struct { views []View sync.Mutex +<<<<<<< HEAD int64Measures map[observableID[int64]][]aggregate.Measure[int64] float64Measures map[observableID[float64]][]aggregate.Measure[float64] aggregations map[instrumentation.Scope][]instrumentSync @@ -85,6 +106,11 @@ func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Mea p.Lock() defer p.Unlock() p.float64Measures[id] = m +======= + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // addSync adds the instrumentSync to pipeline p with scope. This method is not @@ -124,6 +150,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) p.Lock() defer p.Unlock() +<<<<<<< HEAD var err error for _, c := range p.callbacks { // TODO make the callbacks parallel. ( #3034 ) @@ -133,6 +160,16 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if err := ctx.Err(); err != nil { rm.Resource = nil clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. +======= + var errs multierror + for _, c := range p.callbacks { + // TODO make the callbacks parallel. ( #3034 ) + if err := c(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + rm.Resource = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -140,13 +177,21 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) f := e.Value.(multiCallback) +<<<<<<< HEAD if e := f(ctx); e != nil { err = errors.Join(err, e) +======= + if err := f(ctx); err != nil { + errs.append(err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := ctx.Err(); err != nil { // This means the context expired before we finished running callbacks. rm.Resource = nil +<<<<<<< HEAD clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -178,7 +223,11 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) rm.ScopeMetrics = rm.ScopeMetrics[:i] +<<<<<<< HEAD return err +======= + return errs.errorOrNil() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // inserter facilitates inserting of new instruments from a single scope into a @@ -240,7 +289,11 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures []aggregate.Measure[N] ) +<<<<<<< HEAD var err error +======= + errs := &multierror{wrapped: errCreatingAggregators} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) seen := make(map[uint64]struct{}) for _, v := range i.pipeline.views { stream, match := v(inst) @@ -248,9 +301,15 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) continue } matched = true +<<<<<<< HEAD in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) if e != nil { err = errors.Join(err, e) +======= + in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if in == nil { // Drop aggregation. continue @@ -263,12 +322,17 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures = append(measures, in) } +<<<<<<< HEAD if err != nil { err = errors.Join(errCreatingAggregators, err) } if matched { return measures, err +======= + if matched { + return measures, errs.errorOrNil() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Apply implicit default view if no explicit matched. @@ -277,18 +341,28 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) Description: inst.Description, Unit: inst.Unit, } +<<<<<<< HEAD in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) if e != nil { if err == nil { err = errCreatingAggregators } err = errors.Join(err, e) +======= + in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if in != nil { // Ensured to have not seen given matched was false. measures = append(measures, in) } +<<<<<<< HEAD return measures, err +======= + return measures, errs.errorOrNil() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // addCallback registers a single instrument callback to be run when @@ -357,9 +431,12 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum // The view explicitly requested the default aggregation. stream.Aggregation = DefaultAggregationSelector(kind) } +<<<<<<< HEAD if stream.ExemplarReservoirProviderSelector == nil { stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { return nil, 0, fmt.Errorf( @@ -380,7 +457,11 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), +<<<<<<< HEAD ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter), +======= + ReservoirFunc: reservoirFunc[N](stream.Aggregation), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -583,16 +664,34 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline +<<<<<<< HEAD func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { p := newPipeline(res, r, views, exemplarFilter) +======= +func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { + pipes := make([]*pipeline, 0, len(readers)) + for _, r := range readers { + p := newPipeline(res, r, views) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.register(p) pipes = append(pipes, p) } return pipes } +<<<<<<< HEAD +======= +func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { + unregs := make([]func(), len(p)) + for i, pipe := range p { + unregs[i] = pipe.addMultiCallback(c) + } + return unregisterFuncs{f: unregs} +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type unregisterFuncs struct { embedded.Registration f []func() @@ -625,6 +724,7 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] +<<<<<<< HEAD var err error for _, i := range r.inserters { in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) @@ -634,6 +734,17 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) measures = append(measures, in...) } return measures, err +======= + errs := &multierror{} + for _, i := range r.inserters { + in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // HistogramAggregators returns the histogram Aggregators that must be updated by the instrument @@ -642,13 +753,18 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] +<<<<<<< HEAD var err error +======= + errs := &multierror{} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, i := range r.inserters { agg := i.readerDefaultAggregation(id.Kind) if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { histAgg.Boundaries = boundaries agg = histAgg } +<<<<<<< HEAD in, e := i.Instrument(id, agg) if e != nil { err = errors.Join(err, e) @@ -656,4 +772,32 @@ func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ( measures = append(measures, in...) } return measures, err +======= + in, err := i.Instrument(id, agg) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +type multierror struct { + wrapped error + errors []string +} + +func (m *multierror) errorOrNil() error { + if len(m.errors) == 0 { + return nil + } + if m.wrapped == nil { + return errors.New(strings.Join(m.errors, "; ")) + } + return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) +} + +func (m *multierror) append(err error) { + m.errors = append(m.errors, err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index 2fca89e5a8..5598a7a6c8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,7 +42,11 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ +<<<<<<< HEAD pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), +======= + pipes: newPipelines(conf.res, conf.readers, conf.views), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) forceFlush: flush, shutdown: sdown, } @@ -76,17 +80,26 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri c := metric.NewMeterConfig(options...) s := instrumentation.Scope{ +<<<<<<< HEAD Name: name, Version: c.InstrumentationVersion(), SchemaURL: c.SchemaURL(), Attributes: c.InstrumentationAttributes(), +======= + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } global.Info("Meter created", "Name", s.Name, "Version", s.Version, "SchemaURL", s.SchemaURL, +<<<<<<< HEAD "Attributes", s.Attributes, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) return mp.meters.Lookup(s, func() *meter { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index 6347060bf4..d4f76c9121 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,9 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { +<<<<<<< HEAD return "1.32.0" +======= + return "1.29.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index 630890f426..7df11083ae 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -96,12 +96,20 @@ func NewView(criteria Instrument, mask Stream) View { return func(i Instrument) (Stream, bool) { if matchFunc(i) { return Stream{ +<<<<<<< HEAD Name: nonZero(mask.Name, i.Name), Description: nonZero(mask.Description, i.Description), Unit: nonZero(mask.Unit, i.Unit), Aggregation: agg, AttributeFilter: mask.AttributeFilter, ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector, +======= + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, true } return Stream{}, false diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index c02aeefdde..9efba5c021 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,6 +7,10 @@ import ( "context" "errors" "fmt" +<<<<<<< HEAD +======= + "strings" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ErrPartialResource is returned by a detector when complete source @@ -56,15 +60,22 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( +<<<<<<< HEAD r *Resource err error e error +======= + r *Resource + errs detectErrs + err error +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) for _, detector := range detectors { if detector == nil { continue } +<<<<<<< HEAD r, e = detector.Detect(ctx) if e != nil { err = errors.Join(err, e) @@ -75,10 +86,23 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error { r, e = Merge(res, r) if e != nil { err = errors.Join(err, e) +======= + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } *res = *r } +<<<<<<< HEAD if err != nil { if errors.Is(err, ErrSchemaURLConflict) { // If there has been a merge conflict, ensure the resource has no @@ -89,4 +113,41 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error { err = fmt.Errorf("error detecting resource: %w", err) } return err +======= + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e15c..1e8588fb17 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,13 +20,23 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use +<<<<<<< HEAD // resource.New() to explicitly disable them. +======= + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the +<<<<<<< HEAD // resource.New() to explicitly disable them. +======= + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 3677c83d7d..f308630856 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,16 +10,28 @@ import ( "golang.org/x/sys/windows/registry" ) +<<<<<<< HEAD // implements hostIDReader. type hostIDReaderWindows struct{} // read reads MachineGuid from the Windows registry key: // SOFTWARE\Microsoft\Cryptography. +======= +// implements hostIDReader +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the windows registry key: +// SOFTWARE\Microsoft\Cryptography +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index a6a5a53c0e..b74a4fe847 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,6 +17,10 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +<<<<<<< HEAD +======= + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 6b40385107..38a1fae4d1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,9 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { +<<<<<<< HEAD return "1.34.0" +======= + return "1.29.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 9c0b720a4d..a0ff0a1660 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,11 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this +<<<<<<< HEAD // option is provided to a Span's start event. Otherwise, these +======= +// option is provided to a Span's start or end events. Otherwise, these +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 8c45a7107f..c92f47d1ec 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,11 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } +<<<<<<< HEAD // ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly +======= +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index cdbf41d6d7..a17df0da1b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,11 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of +<<<<<<< HEAD [go.opentelemetry.io/otel/trace], which may be done with a transitive +======= +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 0000000000..e57bf57fce --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index eb22002d82..0a0496e9b7 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,9 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { +<<<<<<< HEAD return "1.34.0" +======= + return "1.29.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index ce4fe59b0e..24b65ac6ec 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,26 @@ module-sets: stable-v1: +<<<<<<< HEAD version: v1.34.0 +======= + version: v1.29.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test +<<<<<<< HEAD +======= + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,11 +36,20 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: +<<<<<<< HEAD version: v0.56.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: version: v0.10.0 +======= + version: v0.51.0 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/prometheus + experimental-logs: + version: v0.5.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -35,7 +57,11 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: +<<<<<<< HEAD version: v0.0.12 +======= + version: v0.0.8 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.step.sm/crypto/LICENSE b/vendor/go.step.sm/crypto/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.step.sm/crypto/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.step.sm/crypto/fingerprint/fingerprint.go b/vendor/go.step.sm/crypto/fingerprint/fingerprint.go new file mode 100644 index 0000000000..6eb174bdb6 --- /dev/null +++ b/vendor/go.step.sm/crypto/fingerprint/fingerprint.go @@ -0,0 +1,78 @@ +package fingerprint + +import ( + "crypto" + "encoding/base64" + "encoding/hex" + "fmt" + "strings" + + "go.step.sm/crypto/internal/emoji" +) + +// Encoding defines the supported encodings for certificates and key +// fingerprints. +// +// This type is the base for sshutil.FingerprintEncoding and +// x509util.FingerprintEncoding types. +type Encoding int + +const ( + // HexFingerprint represents the hex encoding of the fingerprint. + // + // This is the default encoding for an X.509 certificate. + HexFingerprint Encoding = iota + 1 + // Base64Fingerprint represents the base64 encoding of the fingerprint. + // + // This is the default encoding for a public key. + Base64Fingerprint + // Base64URLFingerprint represents the base64URL encoding of the fingerprint. + Base64URLFingerprint + // Base64RawFingerprint represents the base64RawStd encoding of the + // fingerprint. + // + // This is the default encoding for an SSH key and certificate. + Base64RawFingerprint + // Base64RawURLFingerprint represents the base64RawURL encoding of the fingerprint. + Base64RawURLFingerprint + // EmojiFingerprint represents the emoji encoding of the fingerprint. + EmojiFingerprint +) + +// New creates a fingerprint of the given data by hashing it and returns it in +// the encoding format. +func New(data []byte, h crypto.Hash, encoding Encoding) (string, error) { + if !h.Available() { + return "", fmt.Errorf("hash function %q is not available", h.String()) + } + hash := h.New() + if _, err := hash.Write(data); err != nil { + return "", fmt.Errorf("error creating hash: %w", err) + } + fp := Fingerprint(hash.Sum(nil), encoding) + if fp == "" { + return "", fmt.Errorf("unknown encoding value %d", encoding) + } + return fp, nil +} + +// Fingerprint encodes the given digest using the encoding format. If an invalid +// encoding is passed, the return value will be an empty string. +func Fingerprint(digest []byte, encoding Encoding) string { + switch encoding { + case HexFingerprint: + return strings.ToLower(hex.EncodeToString(digest)) + case Base64Fingerprint: + return base64.StdEncoding.EncodeToString(digest) + case Base64URLFingerprint: + return base64.URLEncoding.EncodeToString(digest) + case Base64RawFingerprint: + return base64.RawStdEncoding.EncodeToString(digest) + case Base64RawURLFingerprint: + return base64.RawURLEncoding.EncodeToString(digest) + case EmojiFingerprint: + return emoji.Emoji(digest) + default: + return "" + } +} diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE new file mode 100644 index 0000000000..b99c5e3b98 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 Dmitry Chestnykh +Copyright (c) 2010 The Go Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README new file mode 100644 index 0000000000..fb0fc8b70f --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README @@ -0,0 +1,22 @@ +Go implementation of bcrypt_pbkdf(3) from OpenBSD +(a variant of PBKDF2 with bcrypt-based PRF). + + +USAGE + + func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) + + + Key derives a key from the password, salt and rounds count, returning a + []byte of length keyLen that can be used as cryptographic key. + + Remember to get a good random salt of at least 16 bytes. Using a higher + rounds count will increase the cost of an exhaustive search but will also + make derivation proportionally slower. + + +REFERENCES + +* https://github.com/dchest/bcrypt_pbkdf +* http://www.tedunangst.com/flak/post/bcrypt-pbkdf +* http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go new file mode 100644 index 0000000000..be443c8788 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go @@ -0,0 +1,100 @@ +// Copyright 2014 Dmitry Chestnykh. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt_pbkdf implements password-based key derivation function based +// on bcrypt compatible with bcrypt_pbkdf(3) from OpenBSD. +// +//nolint:revive,stylecheck // ignore underscore in package +package bcrypt_pbkdf + +import ( + "crypto/sha512" + "errors" + + // NOTE! Requires blowfish package version from Aug 1, 2014 or later. + // Will produce incorrect results if the package is older. + // See commit message for details: http://goo.gl/wx6g8O + //nolint:staticcheck // needs insecure package + "golang.org/x/crypto/blowfish" +) + +// Key derives a key from the password, salt and rounds count, returning a +// []byte of length keyLen that can be used as cryptographic key. +// +// Remember to get a good random salt of at least 16 bytes. Using a higher +// rounds count will increase the cost of an exhaustive search but will also +// make derivation proportionally slower. +func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { + if rounds < 1 { + return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") + } + if len(password) == 0 { + return nil, errors.New("bcrypt_pbkdf: empty password") + } + if len(salt) == 0 || len(salt) > 1<<20 { + return nil, errors.New("bcrypt_pbkdf: bad salt length") + } + if keyLen > 1024 { + return nil, errors.New("bcrypt_pbkdf: keyLen is too large") + } + var shapass, shasalt [sha512.Size]byte + var out, tmp [32]byte + var cnt [4]byte + + numBlocks := (keyLen + len(out) - 1) / len(out) + key := make([]byte, numBlocks*len(out)) + + h := sha512.New() + h.Write(password) + h.Sum(shapass[:0]) + + for block := 1; block <= numBlocks; block++ { + h.Reset() + h.Write(salt) + cnt[0] = byte(block >> 24) + cnt[1] = byte(block >> 16) + cnt[2] = byte(block >> 8) + cnt[3] = byte(block) + h.Write(cnt[:]) + bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) + copy(out[:], tmp[:]) + + for i := 2; i <= rounds; i++ { + h.Reset() + h.Write(tmp[:]) + bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) + for j := 0; j < len(out); j++ { + out[j] ^= tmp[j] + } + } + + for i, v := range out { + key[i*numBlocks+(block-1)] = v + } + } + return key[:keyLen], nil +} + +var magic = []byte("OxychromaticBlowfishSwatDynamite") + +func bcryptHash(out, shapass, shasalt []byte) { + c, err := blowfish.NewSaltedCipher(shapass, shasalt) + if err != nil { + panic(err) + } + for i := 0; i < 64; i++ { + blowfish.ExpandKey(shasalt, c) + blowfish.ExpandKey(shapass, c) + } + copy(out, magic) + for i := 0; i < 32; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(out[i:i+8], out[i:i+8]) + } + } + // Swap bytes due to different endianness. + for i := 0; i < 32; i += 4 { + out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] + } +} diff --git a/vendor/go.step.sm/crypto/internal/emoji/emoji.go b/vendor/go.step.sm/crypto/internal/emoji/emoji.go new file mode 100644 index 0000000000..7235cff1f5 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/emoji/emoji.go @@ -0,0 +1,274 @@ +package emoji + +import "strings" + +func Emoji(input []byte) string { + var b strings.Builder + for _, r := range input { + b.WriteString(emojiCodeMap[r]) + } + return b.String() +} + +// emojiCodeMap is a mapping from byte to emoji. +// +// The mapping is based on draft+2 of https://github.com/emojisum/emojisum. +// (see: https://github.com/emojisum/emojisum/releases/tag/draft%2B2) +var emojiCodeMap = []string{ + "\U0001f44d", // 👍 :+1: + "\U0001f3b1", // 🎱 :8ball: + "\u2708\ufe0f", // ✈️ :airplane: + "\U0001f47d", // 👽 :alien: + "\u2693", // ⚓ :anchor: + "\U0001f47c", // 👼 :angel: + "\U0001f620", // 😠 :angry: + "\U0001f41c", // 🐜 :ant: + "\U0001f34e", // 🍎 :apple: + "\U0001f3a8", // 🎨 :art: + "\U0001f476", // 👶 :baby: + "\U0001f37c", // 🍼 :baby_bottle: + "\U0001f519", // 🔙 :back: + "\U0001f38d", // 🎍 :bamboo: + "\U0001f34c", // 🍌 :banana: + "\U0001f488", // 💈 :barber: + "\U0001f6c1", // 🛁 :bathtub: + "\U0001f37a", // 🍺 :beer: + "\U0001f514", // 🔔 :bell: + "\U0001f6b4\u200d\u2642\ufe0f", // 🚴‍♂️ :bicyclist: + "\U0001f426", // 🐦 :bird: + "\U0001f382", // 🎂 :birthday: + "\U0001f33c", // 🌼 :blossom: + "\U0001f699", // 🚙 :blue_car: + "\U0001f417", // 🐗 :boar: + "\U0001f4a3", // 💣 :bomb: + "\U0001f4a5", // 💥 :boom: + "\U0001f647\u200d\u2642\ufe0f", // 🙇‍♂️ :bow: + "\U0001f466", // 👦 :boy: + "\U0001f494", // 💔 :broken_heart: + "\U0001f4a1", // 💡 :bulb: + "\U0001f68c", // 🚌 :bus: + "\U0001f335", // 🌵 :cactus: + "\U0001f4c6", // 📆 :calendar: + "\U0001f4f7", // 📷 :camera: + "\U0001f36c", // 🍬 :candy: + "\U0001f431", // 🐱 :cat: + "\U0001f352", // 🍒 :cherries: + "\U0001f6b8", // 🚸 :children_crossing: + "\U0001f36b", // 🍫 :chocolate_bar: + "\U0001f44f", // 👏 :clap: + "\u2601\ufe0f", // ☁️ :cloud: + "\u2663\ufe0f", // ♣️ :clubs: + "\U0001f1e8\U0001f1f3", // 🇨🇳 :cn: + "\u2615", // ☕ :coffee: + "\U0001f6a7", // 🚧 :construction: + "\U0001f36a", // 🍪 :cookie: + "\u00a9\ufe0f", // ©️ :copyright: + "\U0001f33d", // 🌽 :corn: + "\U0001f42e", // 🐮 :cow: + "\U0001f319", // 🌙 :crescent_moon: + "\U0001f451", // 👑 :crown: + "\U0001f622", // 😢 :cry: + "\U0001f52e", // 🔮 :crystal_ball: + "\u27b0", // ➰ :curly_loop: + "\U0001f46f\u200d\u2640\ufe0f", // 👯‍♀️ :dancers: + "\U0001f4a8", // 💨 :dash: + "\U0001f1e9\U0001f1ea", // 🇩🇪 :de: + "\u2666\ufe0f", // ♦️ :diamonds: + "\U0001f436", // 🐶 :dog: + "\U0001f369", // 🍩 :doughnut: + "\U0001f409", // 🐉 :dragon: + "\U0001f4c0", // 📀 :dvd: + "\U0001f442", // 👂 :ear: + "\U0001f346", // 🍆 :eggplant: + "\U0001f418", // 🐘 :elephant: + "\U0001f51a", // 🔚 :end: + "\u2709", // ✉ :envelope: + "\U0001f1ea\U0001f1f8", // 🇪🇸 :es: + "\U0001f440", // 👀 :eyes: + "\U0001f44a", // 👊 :facepunch: + "\U0001f468\u200d\U0001f469\u200d\U0001f466", // 👨‍👩‍👦 :family: + "\U0001f3a1", // 🎡 :ferris_wheel: + "\U0001f630", // 😰 :cold_sweat: + "\U0001f525", // 🔥 :fire: + "\U0001f386", // 🎆 :fireworks: + "\U0001f4be", // 💾 :floppy_disk: + "\U0001f3c8", // 🏈 :football: + "\U0001f374", // 🍴 :fork_and_knife: + "\U0001f340", // 🍀 :four_leaf_clover: + "\U0001f1eb\U0001f1f7", // 🇫🇷 :fr: + "\U0001f35f", // 🍟 :fries: + "\U0001f95c", // 🥜 :peanuts: + "\U0001f595", // 🖕 :fu: + "\U0001f315", // 🌕 :full_moon: + "\U0001f3b2", // 🎲 :game_die: + "\U0001f1ea\U0001f1fa", // 🇪🇺 :eu: + "\U0001f48e", // 💎 :gem: + "\U0001f467", // 👧 :girl: + "\U0001f410", // 🐐 :goat: + "\U0001f62c", // 😬 :grimacing: + "\U0001f601", // 😁 :grin: + "\U0001f482\u200d\u2642\ufe0f", // 💂‍♂️ :guardsman: + "\U0001f3b8", // 🎸 :guitar: + "\U0001f52b", // 🔫 :gun: + "\U0001f354", // 🍔 :hamburger: + "\U0001f528", // 🔨 :hammer: + "\U0001f439", // 🐹 :hamster: + "\U0001f649", // 🙉 :hear_no_evil: + "\u2764\ufe0f", // ❤️ :heart: + "\U0001f63b", // 😻 :heart_eyes_cat: + "\u2763\ufe0f", // ❣️ :heavy_heart_exclamation: + "\u2714\ufe0f", // ✔️ :heavy_check_mark: + "\U0001f5ff", // 🗿 :moyai: + "\U0001f3ee", // 🏮 :izakaya_lantern: + "\U0001f681", // 🚁 :helicopter: + "\U0001f52a", // 🔪 :hocho: + "\U0001f41d", // 🐝 :honeybee: + "\U0001f434", // 🐴 :horse: + "\U0001f3c7", // 🏇 :horse_racing: + "\u231b", // ⌛ :hourglass: + "\U0001f3e0", // 🏠 :house: + "\U0001f575\ufe0f\u200d\u2640\ufe0f", // 🕵️‍♀️ :female_detective: + "\U0001f366", // 🍦 :icecream: + "\U0001f47f", // 👿 :imp: + "\U0001f1ee\U0001f1f9", // 🇮🇹 :it: + "\U0001f383", // 🎃 :jack_o_lantern: + "\U0001f47a", // 👺 :japanese_goblin: + "\U0001f1ef\U0001f1f5", // 🇯🇵 :jp: + "\U0001f511", // 🔑 :key: + "\U0001f48b", // 💋 :kiss: + "\U0001f63d", // 😽 :kissing_cat: + "\U0001f428", // 🐨 :koala: + "\U0001f1f0\U0001f1f7", // 🇰🇷 :kr: + "\U0001f34b", // 🍋 :lemon: + "\U0001f484", // 💄 :lipstick: + "\U0001f512", // 🔒 :lock: + "\U0001f36d", // 🍭 :lollipop: + "\U0001f468", // 👨 :man: + "\U0001f341", // 🍁 :maple_leaf: + "\U0001f637", // 😷 :mask: + "\U0001f918", // 🤘 :metal: + "\U0001f52c", // 🔬 :microscope: + "\U0001f4b0", // 💰 :moneybag: + "\U0001f412", // 🐒 :monkey: + "\U0001f5fb", // 🗻 :mount_fuji: + "\U0001f4aa", // 💪 :muscle: + "\U0001f344", // 🍄 :mushroom: + "\U0001f3b9", // 🎹 :musical_keyboard: + "\U0001f3bc", // 🎼 :musical_score: + "\U0001f485", // 💅 :nail_care: + "\U0001f311", // 🌑 :new_moon: + "\u26d4", // ⛔ :no_entry: + "\U0001f443", // 👃 :nose: + "\U0001f39b\ufe0f", // 🎛️ :control_knobs: + "\U0001f529", // 🔩 :nut_and_bolt: + "\u2b55", // ⭕ :o: + "\U0001f30a", // 🌊 :ocean: + "\U0001f44c", // 👌 :ok_hand: + "\U0001f51b", // 🔛 :on: + "\U0001f4e6", // 📦 :package: + "\U0001f334", // 🌴 :palm_tree: + "\U0001f43c", // 🐼 :panda_face: + "\U0001f4ce", // 📎 :paperclip: + "\u26c5", // ⛅ :partly_sunny: + "\U0001f6c2", // 🛂 :passport_control: + "\U0001f43e", // 🐾 :paw_prints: + "\U0001f351", // 🍑 :peach: + "\U0001f427", // 🐧 :penguin: + "\u260e\ufe0f", // ☎️ :phone: + "\U0001f437", // 🐷 :pig: + "\U0001f48a", // 💊 :pill: + "\U0001f34d", // 🍍 :pineapple: + "\U0001f355", // 🍕 :pizza: + "\U0001f448", // 👈 :point_left: + "\U0001f449", // 👉 :point_right: + "\U0001f4a9", // 💩 :poop: + "\U0001f357", // 🍗 :poultry_leg: + "\U0001f64f", // 🙏 :pray: + "\U0001f478", // 👸 :princess: + "\U0001f45b", // 👛 :purse: + "\U0001f4cc", // 📌 :pushpin: + "\U0001f430", // 🐰 :rabbit: + "\U0001f308", // 🌈 :rainbow: + "\u270b", // ✋ :raised_hand: + "\u267b\ufe0f", // ♻️ :recycle: + "\U0001f697", // 🚗 :red_car: + "\u00ae\ufe0f", // ®️ :registered: + "\U0001f380", // 🎀 :ribbon: + "\U0001f35a", // 🍚 :rice: + "\U0001f680", // 🚀 :rocket: + "\U0001f3a2", // 🎢 :roller_coaster: + "\U0001f413", // 🐓 :rooster: + "\U0001f1f7\U0001f1fa", // 🇷🇺 :ru: + "\u26f5", // ⛵ :sailboat: + "\U0001f385", // 🎅 :santa: + "\U0001f6f0\ufe0f", // 🛰️ :satellite: + "\U0001f606", // 😆 :satisfied: + "\U0001f3b7", // 🎷 :saxophone: + "\u2702\ufe0f", // ✂️ :scissors: + "\U0001f648", // 🙈 :see_no_evil: + "\U0001f411", // 🐑 :sheep: + "\U0001f41a", // 🐚 :shell: + "\U0001f45e", // 👞 :shoe: + "\U0001f3bf", // 🎿 :ski: + "\U0001f480", // 💀 :skull: + "\U0001f62a", // 😪 :sleepy: + "\U0001f604", // 😄 :smile: + "\U0001f63a", // 😺 :smiley_cat: + "\U0001f60f", // 😏 :smirk: + "\U0001f6ac", // 🚬 :smoking: + "\U0001f40c", // 🐌 :snail: + "\U0001f40d", // 🐍 :snake: + "\u2744\ufe0f", // ❄️ :snowflake: + "\u26bd", // ⚽ :soccer: + "\U0001f51c", // 🔜 :soon: + "\U0001f47e", // 👾 :space_invader: + "\u2660\ufe0f", // ♠️ :spades: + "\U0001f64a", // 🙊 :speak_no_evil: + "\u2b50", // ⭐ :star: + "\u26f2", // ⛲ :fountain: + "\U0001f5fd", // 🗽 :statue_of_liberty: + "\U0001f682", // 🚂 :steam_locomotive: + "\U0001f33b", // 🌻 :sunflower: + "\U0001f60e", // 😎 :sunglasses: + "\u2600\ufe0f", // ☀️ :sunny: + "\U0001f305", // 🌅 :sunrise: + "\U0001f3c4\u200d\u2642\ufe0f", // 🏄‍♂️ :surfer: + "\U0001f3ca\u200d\u2642\ufe0f", // 🏊‍♂️ :swimmer: + "\U0001f489", // 💉 :syringe: + "\U0001f389", // 🎉 :tada: + "\U0001f34a", // 🍊 :tangerine: + "\U0001f695", // 🚕 :taxi: + "\U0001f3be", // 🎾 :tennis: + "\u26fa", // ⛺ :tent: + "\U0001f4ad", // 💭 :thought_balloon: + "\u2122\ufe0f", // ™️ :tm: + "\U0001f6bd", // 🚽 :toilet: + "\U0001f445", // 👅 :tongue: + "\U0001f3a9", // 🎩 :tophat: + "\U0001f69c", // 🚜 :tractor: + "\U0001f68e", // 🚎 :trolleybus: + "\U0001f922", // 🤢 :nauseated_face: + "\U0001f3c6", // 🏆 :trophy: + "\U0001f3ba", // 🎺 :trumpet: + "\U0001f422", // 🐢 :turtle: + "\U0001f3a0", // 🎠 :carousel_horse: + "\U0001f46d", // 👭 :two_women_holding_hands: + "\U0001f1ec\U0001f1e7", // 🇬🇧 :uk: + "\u2602\ufe0f", // ☂️ :umbrella: + "\U0001f513", // 🔓 :unlock: + "\U0001f1fa\U0001f1f8", // 🇺🇸 :us: + "\u270c\ufe0f", // ✌️ :v: + "\U0001f4fc", // 📼 :vhs: + "\U0001f3bb", // 🎻 :violin: + "\u26a0\ufe0f", // ⚠️ :warning: + "\U0001f349", // 🍉 :watermelon: + "\U0001f44b", // 👋 :wave: + "\u3030\ufe0f", // 〰️ :wavy_dash: + "\U0001f6be", // 🚾 :wc: + "\u267f", // ♿ :wheelchair: + "\U0001f469", // 👩 :woman: + "\u274c", // ❌ :x: + "\U0001f60b", // 😋 :yum: + "\u26a1", // ⚡ :zap: + "\U0001f4a4", // 💤 :zzz: +} diff --git a/vendor/go.step.sm/crypto/internal/utils/io.go b/vendor/go.step.sm/crypto/internal/utils/io.go new file mode 100644 index 0000000000..ccccf5f94f --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/io.go @@ -0,0 +1,70 @@ +package utils + +import ( + "bytes" + "io" + "os" + "unicode" + + "github.com/pkg/errors" + + "go.step.sm/crypto/internal/utils/utfbom" +) + +func maybeUnwrap(err error) error { + if wrapped := errors.Unwrap(err); wrapped != nil { + return wrapped + } + return err +} + +// stdinFilename is the name of the file that is used in many command +// line utilities to denote input is to be read from STDIN. +const stdinFilename = "-" + +// stdin points to STDIN through os.Stdin. +var stdin = os.Stdin + +// ReadFile reads the file identified by filename and returns +// the contents. If filename is equal to "-", it will read from +// STDIN. +func ReadFile(filename string) (b []byte, err error) { + if filename == stdinFilename { + filename = "/dev/stdin" + b, err = io.ReadAll(stdin) + } else { + var contents []byte + contents, err = os.ReadFile(filename) + if err != nil { + return nil, errors.Wrapf(maybeUnwrap(err), "error reading %q", filename) + } + b, err = io.ReadAll(utfbom.SkipOnly(bytes.NewReader(contents))) + } + if err != nil { + return nil, errors.Wrapf(maybeUnwrap(err), "error reading %q", filename) + } + return +} + +// ReadPasswordFromFile reads and returns the password from the given filename. +// The contents of the file will be trimmed at the right. +func ReadPasswordFromFile(filename string) ([]byte, error) { + password, err := ReadFile(filename) + if err != nil { + return nil, errors.Wrapf(err, "error reading %s", filename) + } + password = bytes.TrimRightFunc(password, unicode.IsSpace) + return password, nil +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm +// (before umask); otherwise WriteFile truncates it before writing. +// +// It wraps os.WriteFile wrapping the errors. +func WriteFile(filename string, data []byte, perm os.FileMode) error { + if err := os.WriteFile(filename, data, perm); err != nil { + return errors.Wrapf(maybeUnwrap(err), "error writing %s", filename) + } + return nil +} diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE b/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE new file mode 100644 index 0000000000..6279cb87f4 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md b/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md new file mode 100644 index 0000000000..8ece280089 --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go b/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go new file mode 100644 index 0000000000..93a144fd2c --- /dev/null +++ b/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go @@ -0,0 +1,195 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +// +// This package was copied from https://github.com/dimchansky/utfbom. Only minor changes +// were made to not depend on the io/ioutil package and to make our linters pass. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && !errors.Is(err, io.EOF)) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { //nolint:wastedassign // copied code + if n, err = rd.Read(bom[len(buf):]); n < 0 { + return nil, errNegativeRead + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/go.step.sm/crypto/jose/encrypt.go b/vendor/go.step.sm/crypto/jose/encrypt.go new file mode 100644 index 0000000000..9b61a5f448 --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/encrypt.go @@ -0,0 +1,135 @@ +package jose + +import ( + "encoding/json" + + "github.com/pkg/errors" + "go.step.sm/crypto/randutil" +) + +// MaxDecryptTries is the maximum number of attempts to decrypt a file. +const MaxDecryptTries = 3 + +// PasswordPrompter defines the function signature for the PromptPassword +// callback. +type PasswordPrompter func(s string) ([]byte, error) + +// PromptPassword is a method used to prompt for a password to decode encrypted +// keys. If this method is not defined and the key or password are not passed, +// the parse of the key will fail. +var PromptPassword PasswordPrompter + +// Encrypt returns the given data encrypted with the default encryption +// algorithm (PBES2-HS256+A128KW). +func Encrypt(data []byte, opts ...Option) (*JSONWebEncryption, error) { + ctx, err := new(context).apply(opts...) + if err != nil { + return nil, err + } + + var passphrase []byte + switch { + case len(ctx.password) > 0: + passphrase = ctx.password + case ctx.passwordPrompter != nil: + if passphrase, err = ctx.passwordPrompter(ctx.passwordPrompt); err != nil { + return nil, err + } + case PromptPassword != nil: + if passphrase, err = PromptPassword("Please enter the password to encrypt the data"); err != nil { + return nil, err + } + default: + return nil, errors.New("failed to encrypt the data: missing password") + } + + salt, err := randutil.Salt(PBKDF2SaltSize) + if err != nil { + return nil, err + } + + // Encrypt private key using PBES2 + recipient := Recipient{ + Algorithm: PBES2_HS256_A128KW, + Key: passphrase, + PBES2Count: PBKDF2Iterations, + PBES2Salt: salt, + } + + encrypterOptions := new(EncrypterOptions) + if ctx.contentType != "" { + encrypterOptions.WithContentType(ContentType(ctx.contentType)) + } + + encrypter, err := NewEncrypter(DefaultEncAlgorithm, recipient, encrypterOptions) + if err != nil { + return nil, errors.Wrap(err, "error creating cipher") + } + + jwe, err := encrypter.Encrypt(data) + if err != nil { + return nil, errors.Wrap(err, "error encrypting data") + } + + return jwe, nil +} + +// EncryptJWK returns the given JWK encrypted with the default encryption +// algorithm (PBES2-HS256+A128KW). +func EncryptJWK(jwk *JSONWebKey, passphrase []byte) (*JSONWebEncryption, error) { + b, err := json.Marshal(jwk) + if err != nil { + return nil, errors.Wrap(err, "error marshaling JWK") + } + + return Encrypt(b, WithPassword(passphrase), WithContentType("jwk+json")) +} + +// Decrypt returns the decrypted version of the given data if it's encrypted, +// it will return the raw data if it's not encrypted or the format is not +// valid. +func Decrypt(data []byte, opts ...Option) ([]byte, error) { + ctx, err := new(context).apply(opts...) + if err != nil { + return nil, err + } + + enc, err := ParseEncrypted(string(data)) + if err != nil { + return data, nil //nolint:nilerr // Return the given data if we cannot parse it as encrypted. + } + + // Try with the given password. + if len(ctx.password) > 0 { + if data, err = enc.Decrypt(ctx.password); err == nil { + return data, nil + } + return nil, errors.New("failed to decrypt JWE: invalid password") + } + + // Try with a given password prompter. + if ctx.passwordPrompter != nil || PromptPassword != nil { + var pass []byte + for i := 0; i < MaxDecryptTries; i++ { + switch { + case ctx.passwordPrompter != nil: + if pass, err = ctx.passwordPrompter(ctx.passwordPrompt); err != nil { + return nil, err + } + case ctx.filename != "": + if pass, err = PromptPassword("Please enter the password to decrypt " + ctx.filename); err != nil { + return nil, err + } + default: + if pass, err = PromptPassword("Please enter the password to decrypt the JWE"); err != nil { + return nil, err + } + } + if data, err = enc.Decrypt(pass); err == nil { + return data, nil + } + } + } + + return nil, errors.New("failed to decrypt JWE: invalid password") +} diff --git a/vendor/go.step.sm/crypto/jose/generate.go b/vendor/go.step.sm/crypto/jose/generate.go new file mode 100644 index 0000000000..4bdc6c44dc --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/generate.go @@ -0,0 +1,204 @@ +package jose + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + + "github.com/pkg/errors" + "go.step.sm/crypto/keyutil" + "go.step.sm/crypto/pemutil" + "go.step.sm/crypto/x25519" +) + +const ( + jwksUsageSig = "sig" + jwksUsageEnc = "enc" + // defaultKeyType is the default type of the one-time token key. + defaultKeyType = EC + // defaultKeyCurve is the default curve of the one-time token key. + defaultKeyCurve = P256 + // defaultKeyAlg is the default algorithm of the one-time token key. + defaultKeyAlg = ES256 + // defaultKeySize is the default size of the one-time token key. + defaultKeySize = 0 +) + +var ( + errAmbiguousCertKeyUsage = errors.New("jose/generate: certificate's key usage is ambiguous, it should be for signature or encipherment, but not both (use --subtle to ignore usage field)") + errNoCertKeyUsage = errors.New("jose/generate: certificate doesn't contain any key usage (use --subtle to ignore usage field)") +) + +// Thumbprint computes the JWK Thumbprint of a key using SHA256 as the hash +// algorithm. It returns the hash encoded in the Base64 raw url encoding. +func Thumbprint(jwk *JSONWebKey) (string, error) { + var sum []byte + var err error + switch key := jwk.Key.(type) { + case x25519.PublicKey: + sum, err = x25519Thumbprint(key, crypto.SHA256) + case x25519.PrivateKey: + var pub x25519.PublicKey + if pub, err = key.PublicKey(); err == nil { + sum, err = x25519Thumbprint(pub, crypto.SHA256) + } + case OpaqueSigner: + sum, err = key.Public().Thumbprint(crypto.SHA256) + default: + sum, err = jwk.Thumbprint(crypto.SHA256) + } + if err != nil { + return "", errors.Wrap(err, "error generating JWK thumbprint") + } + return base64.RawURLEncoding.EncodeToString(sum), nil +} + +// GenerateDefaultKeyPair generates an asymmetric public/private key pair. +// Returns the public key as a JWK and the private key as an encrypted JWE. +func GenerateDefaultKeyPair(passphrase []byte) (*JSONWebKey, *JSONWebEncryption, error) { + if len(passphrase) == 0 { + return nil, nil, errors.New("step-jose: password cannot be empty when encryptying a JWK") + } + + // Generate the OTT key + jwk, err := GenerateJWK(defaultKeyType, defaultKeyCurve, defaultKeyAlg, jwksUsageSig, "", defaultKeySize) + if err != nil { + return nil, nil, err + } + + jwk.KeyID, err = Thumbprint(jwk) + if err != nil { + return nil, nil, err + } + + jwe, err := EncryptJWK(jwk, passphrase) + if err != nil { + return nil, nil, err + } + + public := jwk.Public() + return &public, jwe, nil +} + +// GenerateJWK generates a JWK given the key type, curve, alg, use, kid and +// the size of the RSA or oct keys if necessary. +func GenerateJWK(kty, crv, alg, use, kid string, size int) (jwk *JSONWebKey, err error) { + if kty == "OKP" && use == "enc" && (crv == "" || crv == "Ed25519") { + return nil, errors.New("invalid algorithm: Ed25519 cannot be used for encryption") + } + + switch { + case kty == "EC" && crv == "": + crv = P256 + case kty == "OKP" && crv == "": + crv = Ed25519 + case kty == "RSA" && size == 0: + size = DefaultRSASize + case kty == "oct" && size == 0: + size = DefaultOctSize + } + + key, err := keyutil.GenerateKey(kty, crv, size) + if err != nil { + return nil, err + } + jwk = &JSONWebKey{ + Key: key, + KeyID: kid, + Use: use, + Algorithm: alg, + } + guessJWKAlgorithm(&context{alg: alg}, jwk) + if jwk.KeyID == "" && kty != "oct" { + jwk.KeyID, err = Thumbprint(jwk) + } + return jwk, err +} + +// GenerateJWKFromPEM returns an incomplete JSONWebKey using the key from a +// PEM file. +func GenerateJWKFromPEM(filename string, subtle bool) (*JSONWebKey, error) { + key, err := pemutil.Read(filename) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey, *rsa.PublicKey: + return &JSONWebKey{ + Key: key, + }, nil + case *ecdsa.PrivateKey, *ecdsa.PublicKey, ed25519.PrivateKey, ed25519.PublicKey: + return &JSONWebKey{ + Key: key, + Algorithm: algForKey(key), + }, nil + case *x509.Certificate: + var use string + if !subtle { + use, err = keyUsageForCert(key) + if err != nil { + return nil, err + } + } + return &JSONWebKey{ + Key: key.PublicKey, + Certificates: []*x509.Certificate{key}, + Algorithm: algForKey(key.PublicKey), + Use: use, + }, nil + default: + return nil, errors.Errorf("error parsing %s: unsupported key type '%T'", filename, key) + } +} + +func algForKey(key crypto.PublicKey) string { + switch key := key.(type) { + case *ecdsa.PrivateKey: + return getECAlgorithm(key.Curve) + case *ecdsa.PublicKey: + return getECAlgorithm(key.Curve) + case ed25519.PrivateKey, ed25519.PublicKey: + return EdDSA + default: + return "" + } +} + +func keyUsageForCert(cert *x509.Certificate) (string, error) { + isDigitalSignature := containsUsage(cert.KeyUsage, + x509.KeyUsageDigitalSignature, + x509.KeyUsageContentCommitment, + x509.KeyUsageCertSign, + x509.KeyUsageCRLSign, + ) + isEncipherment := containsUsage(cert.KeyUsage, + x509.KeyUsageKeyEncipherment, + x509.KeyUsageDataEncipherment, + x509.KeyUsageKeyAgreement, + x509.KeyUsageEncipherOnly, + x509.KeyUsageDecipherOnly, + ) + if isDigitalSignature && isEncipherment { + return "", errAmbiguousCertKeyUsage + } + if isDigitalSignature { + return jwksUsageSig, nil + } + if isEncipherment { + return jwksUsageEnc, nil + } + return "", errNoCertKeyUsage +} + +func containsUsage(usage x509.KeyUsage, queries ...x509.KeyUsage) bool { + for _, query := range queries { + if usage&query == query { + return true + } + } + return false +} diff --git a/vendor/go.step.sm/crypto/jose/options.go b/vendor/go.step.sm/crypto/jose/options.go new file mode 100644 index 0000000000..e1e1393b48 --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/options.go @@ -0,0 +1,125 @@ +package jose + +import ( + "go.step.sm/crypto/internal/utils" +) + +type context struct { + filename string + use, alg, kid string + subtle, insecure bool + noDefaults bool + password []byte + passwordPrompt string + passwordPrompter PasswordPrompter + contentType string +} + +// apply the options to the context and returns an error if one of the options +// fails. +func (ctx *context) apply(opts ...Option) (*context, error) { + for _, opt := range opts { + if err := opt(ctx); err != nil { + return nil, err + } + } + return ctx, nil +} + +// Option is the type used to add attributes to the context. +type Option func(ctx *context) error + +// WithFilename adds the given filename to the context. +func WithFilename(filename string) Option { + return func(ctx *context) error { + ctx.filename = filename + return nil + } +} + +// WithUse adds the use claim to the context. +func WithUse(use string) Option { + return func(ctx *context) error { + ctx.use = use + return nil + } +} + +// WithAlg adds the alg claim to the context. +func WithAlg(alg string) Option { + return func(ctx *context) error { + ctx.alg = alg + return nil + } +} + +// WithKid adds the kid property to the context. +func WithKid(kid string) Option { + return func(ctx *context) error { + ctx.kid = kid + return nil + } +} + +// WithSubtle marks the context as subtle. +func WithSubtle(subtle bool) Option { + return func(ctx *context) error { + ctx.subtle = subtle + return nil + } +} + +// WithInsecure marks the context as insecure. +func WithInsecure(insecure bool) Option { + return func(ctx *context) error { + ctx.insecure = insecure + return nil + } +} + +// WithNoDefaults avoids that the parser loads defaults values, specially the +// default algorithms. +func WithNoDefaults(val bool) Option { + return func(ctx *context) error { + ctx.noDefaults = val + return nil + } +} + +// WithPassword is a method that adds the given password to the context. +func WithPassword(pass []byte) Option { + return func(ctx *context) error { + ctx.password = pass + return nil + } +} + +// WithPasswordFile is a method that adds the password in a file to the context. +func WithPasswordFile(filename string) Option { + return func(ctx *context) error { + b, err := utils.ReadPasswordFromFile(filename) + if err != nil { + return err + } + ctx.password = b + return nil + } +} + +// WithPasswordPrompter defines a method that can be used to prompt for the +// password to decrypt an encrypted JWE. +func WithPasswordPrompter(prompt string, fn PasswordPrompter) Option { + return func(ctx *context) error { + ctx.passwordPrompt = prompt + ctx.passwordPrompter = fn + return nil + } +} + +// WithContentType adds the content type when encrypting data. +func WithContentType(cty string) Option { + return func(ctx *context) error { + ctx.contentType = cty + return nil + } +} diff --git a/vendor/go.step.sm/crypto/jose/parse.go b/vendor/go.step.sm/crypto/jose/parse.go new file mode 100644 index 0000000000..760c4f161f --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/parse.go @@ -0,0 +1,411 @@ +package jose + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/pkg/errors" + "go.step.sm/crypto/pemutil" + "go.step.sm/crypto/x25519" +) + +type keyType int + +const ( + jwkKeyType keyType = iota + pemKeyType + octKeyType +) + +// read returns the bytes from reading a file, or from a url if the filename has +// the prefix https:// +func read(filename string) ([]byte, error) { + if strings.HasPrefix(filename, "https://") { + resp, err := http.Get(filename) //nolint:gosec // no SSRF + if err != nil { + return nil, errors.Wrapf(err, "error retrieving %s", filename) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, errors.Errorf("error retrieving %s: status code %d", filename, resp.StatusCode) + } + b, err := io.ReadAll(resp.Body) + return b, errors.Wrapf(err, "error retrieving %s", filename) + } + + b, err := os.ReadFile(filename) + if err != nil { + return nil, errors.Wrapf(err, "error reading %s", filename) + } + return b, nil +} + +// ReadKey returns a JSONWebKey from the given JWK or PEM file. If the file is +// password protected, and no password or prompt password function is given it +// will fail. +func ReadKey(filename string, opts ...Option) (*JSONWebKey, error) { + b, err := read(filename) + if err != nil { + return nil, err + } + opts = append(opts, WithFilename(filename)) + return ParseKey(b, opts...) +} + +// ParseKey returns a JSONWebKey from the given JWK file or a PEM file. If the +// file is password protected, and no password or prompt password function is +// given it will fail. +func ParseKey(b []byte, opts ...Option) (*JSONWebKey, error) { + ctx, err := new(context).apply(opts...) + if err != nil { + return nil, err + } + if ctx.filename == "" { + ctx.filename = "key" + } + + jwk := new(JSONWebKey) + switch guessKeyType(ctx, b) { + case jwkKeyType: + // Attempt to parse an encrypted file + if b, err = Decrypt(b, opts...); err != nil { + return nil, err + } + + // Unmarshal the plain (or decrypted JWK) + if err = json.Unmarshal(b, jwk); err != nil { + return nil, errors.Errorf("error reading %s: unsupported format", ctx.filename) + } + + // If KeyID not set by environment, then use the default. + // NOTE: we do not set this value by default in the case of jwkKeyType + // because it is assumed to have been left empty on purpose. + case pemKeyType: + pemOptions := []pemutil.Options{ + pemutil.WithFilename(ctx.filename), + } + if ctx.password != nil { + pemOptions = append(pemOptions, pemutil.WithPassword(ctx.password)) + } + if ctx.passwordPrompter != nil { + pemOptions = append(pemOptions, pemutil.WithPasswordPrompt(ctx.passwordPrompt, pemutil.PasswordPrompter(ctx.passwordPrompter))) + } + if pemutil.PromptPassword == nil && PromptPassword != nil { + pemutil.PromptPassword = pemutil.PasswordPrompter(PromptPassword) + } + + jwk.Key, err = pemutil.ParseKey(b, pemOptions...) + if err != nil { + return nil, err + } + if ctx.kid == "" { + if jwk.KeyID, err = Thumbprint(jwk); err != nil { + return nil, err + } + } + case octKeyType: + jwk.Key = b + } + + // Validate key id + if ctx.kid != "" && jwk.KeyID != "" && ctx.kid != jwk.KeyID { + return nil, errors.Errorf("kid %s does not match the kid on %s", ctx.kid, ctx.filename) + } + if jwk.KeyID == "" { + jwk.KeyID = ctx.kid + } + if jwk.Use == "" { + jwk.Use = ctx.use + } + + // Set the algorithm if empty + guessJWKAlgorithm(ctx, jwk) + + // Validate alg: if the flag '--subtle' is passed we will allow to overwrite it + if !ctx.subtle && ctx.alg != "" && jwk.Algorithm != "" && ctx.alg != jwk.Algorithm { + return nil, errors.Errorf("alg %s does not match the alg on %s", ctx.alg, ctx.filename) + } + if ctx.subtle && ctx.alg != "" { + jwk.Algorithm = ctx.alg + } + + return jwk, nil +} + +// ReadKeySet reads a JWK Set from a URL or filename. URLs must start with +// "https://". +func ReadKeySet(filename string, opts ...Option) (*JSONWebKey, error) { + b, err := read(filename) + if err != nil { + return nil, err + } + opts = append(opts, WithFilename(filename)) + return ParseKeySet(b, opts...) +} + +// ParseKeySet returns the JWK with the given key after parsing a JWKSet from +// a given file. +func ParseKeySet(b []byte, opts ...Option) (*JSONWebKey, error) { + ctx, err := new(context).apply(opts...) + if err != nil { + return nil, err + } + + // Attempt to parse an encrypted file + if b, err = Decrypt(b, opts...); err != nil { + return nil, err + } + + // Unmarshal the plain or decrypted JWKSet + jwkSet := new(JSONWebKeySet) + if err := json.Unmarshal(b, jwkSet); err != nil { + return nil, errors.Errorf("error reading %s: unsupported format", ctx.filename) + } + + jwks := jwkSet.Key(ctx.kid) + switch len(jwks) { + case 0: + return nil, errors.Errorf("cannot find key with kid %s on %s", ctx.kid, ctx.filename) + case 1: + jwk := &jwks[0] + + // Set the algorithm if empty + guessJWKAlgorithm(ctx, jwk) + + // Validate alg: if the flag '--subtle' is passed we will allow the + // overwrite of the alg + if !ctx.subtle && ctx.alg != "" && jwk.Algorithm != "" && ctx.alg != jwk.Algorithm { + return nil, errors.Errorf("alg %s does not match the alg on %s", ctx.alg, ctx.filename) + } + if ctx.subtle && ctx.alg != "" { + jwk.Algorithm = ctx.alg + } + return jwk, nil + default: + return nil, errors.Errorf("multiple keys with kid %s have been found on %s", ctx.kid, ctx.filename) + } +} + +func decodeCerts(l []interface{}) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, len(l)) + for i, j := range l { + certStr, ok := j.(string) + if !ok { + return nil, errors.Errorf("wrong type in x5c header list; expected string but %T", i) + } + certB, err := base64.StdEncoding.DecodeString(certStr) + if err != nil { + return nil, errors.Wrap(err, "error decoding base64 encoded x5c cert") + } + cert, err := x509.ParseCertificate(certB) + if err != nil { + return nil, errors.Wrap(err, "error parsing x5c cert") + } + certs[i] = cert + } + return certs, nil +} + +// X5cInsecureKey is the key used to store the x5cInsecure cert chain in the JWT header. +var X5cInsecureKey = "x5cInsecure" + +// GetX5cInsecureHeader extracts the x5cInsecure certificate chain from the token. +func GetX5cInsecureHeader(jwt *JSONWebToken) ([]*x509.Certificate, error) { + x5cVal, ok := jwt.Headers[0].ExtraHeaders[HeaderKey(X5cInsecureKey)] + if !ok { + return nil, errors.New("ssh check-host token missing x5cInsecure header") + } + interfaces, ok := x5cVal.([]interface{}) + if !ok { + return nil, errors.Errorf("ssh check-host token x5cInsecure header has wrong type; expected []string, but got %T", x5cVal) + } + chain, err := decodeCerts(interfaces) + if err != nil { + return nil, errors.Wrap(err, "error decoding x5cInsecure header certs") + } + return chain, nil +} + +// ParseX5cInsecure parses an x5cInsecure token, validates the certificate chain +// in the token, and returns the JWT struct along with all the verified chains. +func ParseX5cInsecure(tok string, roots []*x509.Certificate) (*JSONWebToken, [][]*x509.Certificate, error) { + jwt, err := ParseSigned(tok) + if err != nil { + return nil, nil, errors.Wrapf(err, "error parsing x5cInsecure token") + } + + chain, err := GetX5cInsecureHeader(jwt) + if err != nil { + return nil, nil, errors.Wrap(err, "error extracting x5cInsecure cert chain") + } + leaf := chain[0] + + interPool := x509.NewCertPool() + for _, crt := range chain[1:] { + interPool.AddCert(crt) + } + rootPool := x509.NewCertPool() + for _, crt := range roots { + rootPool.AddCert(crt) + } + // Correctly parse and validate the x5c certificate chain. + verifiedChains, err := leaf.Verify(x509.VerifyOptions{ + Roots: rootPool, + Intermediates: interPool, + // A hack so we skip validity period validation. + CurrentTime: leaf.NotAfter.Add(-1 * time.Minute), + KeyUsages: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + }, + }) + if err != nil { + return nil, nil, errors.Wrap(err, "error verifying x5cInsecure certificate chain") + } + leaf = verifiedChains[0][0] + + if leaf.KeyUsage&x509.KeyUsageDigitalSignature == 0 { + return nil, nil, errors.New("certificate used to sign x5cInsecure token cannot be used for digital signature") + } + + return jwt, verifiedChains, nil +} + +// guessKeyType returns the key type of the given data. Key types are JWK, PEM +// or oct. +func guessKeyType(ctx *context, data []byte) keyType { + switch ctx.alg { + // jwk or file with oct data + case "HS256", "HS384", "HS512": + // Encrypted JWK ? + if _, err := ParseEncrypted(string(data)); err == nil { + return jwkKeyType + } + // JSON JWK ? + if err := json.Unmarshal(data, &JSONWebKey{}); err == nil { + return jwkKeyType + } + // Default to oct + return octKeyType + default: + // PEM or default to JWK + if bytes.HasPrefix(data, []byte("-----BEGIN ")) { + return pemKeyType + } + return jwkKeyType + } +} + +// guessJWKAlgorithm set the algorithm if it's not set and we can guess it +func guessJWKAlgorithm(ctx *context, jwk *JSONWebKey) { + if jwk.Algorithm == "" { + // Force default algorithm if passed. + if ctx.alg != "" { + jwk.Algorithm = ctx.alg + return + } + + // Guess only fixed algorithms if no defaults is enabled + if ctx.noDefaults { + guessKnownJWKAlgorithm(ctx, jwk) + return + } + + // Use defaults for each key type + switch k := jwk.Key.(type) { + case []byte: + if jwk.Use == "enc" { + jwk.Algorithm = string(DefaultOctKeyAlgorithm) + } else { + jwk.Algorithm = string(DefaultOctSigAlgorithm) + } + case *ecdsa.PrivateKey: + if jwk.Use == "enc" { + jwk.Algorithm = string(DefaultECKeyAlgorithm) + } else { + jwk.Algorithm = getECAlgorithm(k.Curve) + } + case *ecdsa.PublicKey: + if jwk.Use == "enc" { + jwk.Algorithm = string(DefaultECKeyAlgorithm) + } else { + jwk.Algorithm = getECAlgorithm(k.Curve) + } + case *rsa.PrivateKey, *rsa.PublicKey: + if jwk.Use == "enc" { + jwk.Algorithm = string(DefaultRSAKeyAlgorithm) + } else { + jwk.Algorithm = string(DefaultRSASigAlgorithm) + } + // Ed25519 can only be used for signing operations + case ed25519.PrivateKey, ed25519.PublicKey: + jwk.Algorithm = EdDSA + case x25519.PrivateKey, x25519.PublicKey: + jwk.Algorithm = XEdDSA + } + } +} + +// guessSignatureAlgorithm returns the signature algorithm for a given private key. +func guessSignatureAlgorithm(key crypto.PrivateKey) SignatureAlgorithm { + switch k := key.(type) { + case []byte: + return DefaultOctSigAlgorithm + case *ecdsa.PrivateKey: + return SignatureAlgorithm(getECAlgorithm(k.Curve)) + case *rsa.PrivateKey: + return DefaultRSASigAlgorithm + case ed25519.PrivateKey: + return EdDSA + case x25519.PrivateKey, X25519Signer: + return XEdDSA + default: + return "" + } +} + +// guessKnownJWKAlgorithm sets the algorithm for keys that only have one +// possible algorithm. +func guessKnownJWKAlgorithm(_ *context, jwk *JSONWebKey) { + if jwk.Algorithm == "" && jwk.Use != "enc" { + switch k := jwk.Key.(type) { + case *ecdsa.PrivateKey: + jwk.Algorithm = getECAlgorithm(k.Curve) + case *ecdsa.PublicKey: + jwk.Algorithm = getECAlgorithm(k.Curve) + case ed25519.PrivateKey, ed25519.PublicKey: + jwk.Algorithm = EdDSA + case x25519.PrivateKey, x25519.PublicKey: + jwk.Algorithm = XEdDSA + } + } +} + +// getECAlgorithm returns the JWA algorithm name for the given elliptic curve. +// If the curve is not supported it will return an empty string. +// +// Supported curves are P-256, P-384, and P-521. +func getECAlgorithm(crv elliptic.Curve) string { + switch crv.Params().Name { + case P256: + return ES256 + case P384: + return ES384 + case P521: + return ES512 + default: + return "" + } +} diff --git a/vendor/go.step.sm/crypto/jose/types.go b/vendor/go.step.sm/crypto/jose/types.go new file mode 100644 index 0000000000..f034763850 --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/types.go @@ -0,0 +1,310 @@ +// Package jose is a wrapper for github.com/go-jose/go-jose/v3 and implements +// utilities to parse and generate JWT, JWK and JWKSets. +package jose + +import ( + "crypto" + "errors" + "strings" + "time" + + jose "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/cryptosigner" + "github.com/go-jose/go-jose/v3/jwt" + "go.step.sm/crypto/x25519" +) + +// SupportsPBKDF2 constant to know if the underlaying library supports +// password based cryptography algorithms. +const SupportsPBKDF2 = true + +// PBKDF2SaltSize is the default size of the salt for PBKDF2, 128-bit salt. +const PBKDF2SaltSize = 16 + +// PBKDF2Iterations is the default number of iterations for PBKDF2. +// +// 600k is the current OWASP recommendation (Dec 2022) +// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 +// +// Nist recommends at least 10k (800-63B), 1Password increased in 2023 the +// number of iterations from 100k to 650k. +const PBKDF2Iterations = 600000 + +// JSONWebSignature represents a signed JWS object after parsing. +type JSONWebSignature = jose.JSONWebSignature + +// JSONWebToken represents a JSON Web Token (as specified in RFC7519). +type JSONWebToken = jwt.JSONWebToken + +// JSONWebKey represents a public or private key in JWK format. +type JSONWebKey = jose.JSONWebKey + +// JSONWebKeySet represents a JWK Set object. +type JSONWebKeySet = jose.JSONWebKeySet + +// JSONWebEncryption represents an encrypted JWE object after parsing. +type JSONWebEncryption = jose.JSONWebEncryption + +// Recipient represents an algorithm/key to encrypt messages to. +type Recipient = jose.Recipient + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions = jose.EncrypterOptions + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter = jose.Encrypter + +// ContentType represents type of the contained data. +type ContentType = jose.ContentType + +// KeyAlgorithm represents a key management algorithm. +type KeyAlgorithm = jose.KeyAlgorithm + +// ContentEncryption represents a content encryption algorithm. +type ContentEncryption = jose.ContentEncryption + +// SignatureAlgorithm represents a signature (or MAC) algorithm. +type SignatureAlgorithm = jose.SignatureAlgorithm + +// Signature represents a signature. +type Signature = jose.Signature + +// ErrCryptoFailure indicates an error in a cryptographic primitive. +var ErrCryptoFailure = jose.ErrCryptoFailure + +// Claims represents public claim values (as specified in RFC 7519). +type Claims = jwt.Claims + +// Builder is a utility for making JSON Web Tokens. Calls can be chained, and +// errors are accumulated until the final call to CompactSerialize/FullSerialize. +type Builder = jwt.Builder + +// NumericDate represents date and time as the number of seconds since the +// epoch, including leap seconds. Non-integer values can be represented +// in the serialized format, but we round to the nearest second. +type NumericDate = jwt.NumericDate + +// Audience represents the recipients that the token is intended for. +type Audience = jwt.Audience + +// Expected defines values used for protected claims validation. +// If field has zero value then validation is skipped. +type Expected = jwt.Expected + +// Signer represents a signer which takes a payload and produces a signed JWS object. +type Signer = jose.Signer + +// OpaqueSigner represents a jose.Signer that wraps a crypto.Signer +type OpaqueSigner = jose.OpaqueSigner + +// SigningKey represents an algorithm/key used to sign a message. +type SigningKey = jose.SigningKey + +// SignerOptions represents options that can be set when creating signers. +type SignerOptions = jose.SignerOptions + +// Header represents the read-only JOSE header for JWE/JWS objects. +type Header = jose.Header + +// HeaderKey represents the type used as a key in the protected header of a JWS +// object. +type HeaderKey = jose.HeaderKey + +// ErrInvalidIssuer indicates invalid iss claim. +var ErrInvalidIssuer = jwt.ErrInvalidIssuer + +// ErrInvalidAudience indicated invalid aud claim. +var ErrInvalidAudience = jwt.ErrInvalidAudience + +// ErrNotValidYet indicates that token is used before time indicated in nbf claim. +var ErrNotValidYet = jwt.ErrNotValidYet + +// ErrExpired indicates that token is used after expiry time indicated in exp claim. +var ErrExpired = jwt.ErrExpired + +// ErrInvalidSubject indicates invalid sub claim. +var ErrInvalidSubject = jwt.ErrInvalidSubject + +// ErrInvalidID indicates invalid jti claim. +var ErrInvalidID = jwt.ErrInvalidID + +// ErrIssuedInTheFuture indicates that the iat field is in the future. +var ErrIssuedInTheFuture = jwt.ErrIssuedInTheFuture + +// Key management algorithms +// +//nolint:stylecheck,revive // use standard names in upper-case +const ( + RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5 + RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1 + RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256 + A128KW = KeyAlgorithm("A128KW") // AES key wrap (128) + A192KW = KeyAlgorithm("A192KW") // AES key wrap (192) + A256KW = KeyAlgorithm("A256KW") // AES key wrap (256) + DIRECT = KeyAlgorithm("dir") // Direct encryption + ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES + ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128) + ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192) + ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256) + A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128) + A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192) + A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256) + PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128) + PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192) + PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256) +) + +// Signature algorithms +const ( + HS256 = "HS256" // HMAC using SHA-256 + HS384 = "HS384" // HMAC using SHA-384 + HS512 = "HS512" // HMAC using SHA-512 + RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 + RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 + RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 + ES256 = "ES256" // ECDSA using P-256 and SHA-256 + ES384 = "ES384" // ECDSA using P-384 and SHA-384 + ES512 = "ES512" // ECDSA using P-521 and SHA-512 + PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 + EdDSA = "EdDSA" // Ed25519 with EdDSA signature schema + XEdDSA = "XEdDSA" // X25519 with XEdDSA signature schema +) + +// Content encryption algorithms +// +//nolint:revive,stylecheck // use standard names in upper-case +const ( + A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128) + A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192) + A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256) + A128GCM = ContentEncryption("A128GCM") // AES-GCM (128) + A192GCM = ContentEncryption("A192GCM") // AES-GCM (192) + A256GCM = ContentEncryption("A256GCM") // AES-GCM (256) +) + +// Elliptic curves +const ( + P256 = "P-256" // P-256 curve (FIPS 186-3) + P384 = "P-384" // P-384 curve (FIPS 186-3) + P521 = "P-521" // P-521 curve (FIPS 186-3) +) + +// Key types +const ( + EC = "EC" // Elliptic curves + RSA = "RSA" // RSA + OKP = "OKP" // Ed25519 + OCT = "oct" // Octet sequence +) + +// Ed25519 is the EdDSA signature scheme using SHA-512/256 and Curve25519 +const Ed25519 = "Ed25519" + +// Default key management, signature, and content encryption algorithms to use if none is specified. +const ( + // Key management algorithms + DefaultECKeyAlgorithm = ECDH_ES + DefaultRSAKeyAlgorithm = RSA_OAEP_256 + DefaultOctKeyAlgorithm = A256GCMKW + // Signature algorithms + DefaultRSASigAlgorithm = RS256 + DefaultOctSigAlgorithm = HS256 + // Content encryption algorithm + DefaultEncAlgorithm = A256GCM +) + +// Default sizes +const ( + DefaultRSASize = 2048 + DefaultOctSize = 32 +) + +// ParseEncrypted parses an encrypted message in compact or full serialization format. +func ParseEncrypted(input string) (*JSONWebEncryption, error) { + return jose.ParseEncrypted(input) +} + +// NewEncrypter creates an appropriate encrypter based on the key type. +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + return jose.NewEncrypter(enc, rcpt, opts) +} + +// NewNumericDate constructs NumericDate from time.Time value. +func NewNumericDate(t time.Time) *NumericDate { + return jwt.NewNumericDate(t) +} + +// UnixNumericDate returns a NumericDate from the given seconds since the UNIX +// Epoch time. For backward compatibility is s is 0, a nil value will be returned. +func UnixNumericDate(s int64) *NumericDate { + if s == 0 { + return nil + } + out := NumericDate(s) + return &out +} + +// NewSigner creates an appropriate signer based on the key type +func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) { + if k, ok := sig.Key.(x25519.PrivateKey); ok { + sig.Key = X25519Signer(k) + } + if sig.Algorithm == "" { + sig.Algorithm = guessSignatureAlgorithm(sig.Key) + } + return jose.NewSigner(sig, opts) +} + +// NewOpaqueSigner creates a new OpaqueSigner for JWT signing from a crypto.Signer +func NewOpaqueSigner(signer crypto.Signer) OpaqueSigner { + return cryptosigner.Opaque(signer) +} + +// Verify validates the token payload with the given public key and deserializes +// the token into the destination. +func Verify(token *JSONWebToken, publicKey interface{}, dest ...interface{}) error { + if k, ok := publicKey.(x25519.PublicKey); ok { + publicKey = X25519Verifier(k) + } + return token.Claims(publicKey, dest...) +} + +// ParseSigned parses token from JWS form. +func ParseSigned(s string) (*JSONWebToken, error) { + return jwt.ParseSigned(s) +} + +// Signed creates builder for signed tokens. +func Signed(sig Signer) Builder { + return jwt.Signed(sig) +} + +// ParseJWS parses a signed message in compact or full serialization format. +func ParseJWS(s string) (*JSONWebSignature, error) { + return jose.ParseSigned(s) +} + +// Determine whether a JSONWebKey is symmetric +func IsSymmetric(k *JSONWebKey) bool { + switch k.Key.(type) { + case []byte: + return true + default: + return false + } +} + +// Determine whether a JSONWebKey is asymmetric +func IsAsymmetric(k *JSONWebKey) bool { + return !IsSymmetric(k) +} + +// TrimPrefix removes the string "go-jose/go-jose" from all errors. +func TrimPrefix(err error) error { + if err == nil { + return nil + } + return errors.New(strings.TrimPrefix(err.Error(), "go-jose/go-jose: ")) +} diff --git a/vendor/go.step.sm/crypto/jose/validate.go b/vendor/go.step.sm/crypto/jose/validate.go new file mode 100644 index 0000000000..6a904167e7 --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/validate.go @@ -0,0 +1,221 @@ +package jose + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha1" //nolint:gosec // RFC 7515 - X.509 Certificate SHA-1 Thumbprint + "crypto/x509" + "encoding/base64" + "fmt" + "os" + + "github.com/pkg/errors" + "go.step.sm/crypto/keyutil" + "golang.org/x/crypto/ssh" +) + +// ValidateSSHPOP validates the given SSH certificate and key for use in an +// sshpop header. +func ValidateSSHPOP(certFile string, key interface{}) (string, error) { + if certFile == "" { + return "", errors.New("ssh certfile cannot be empty") + } + certBytes, err := os.ReadFile(certFile) + if err != nil { + return "", errors.Wrapf(err, "error reading ssh certificate from %s", certFile) + } + sshpub, _, _, _, err := ssh.ParseAuthorizedKey(certBytes) + if err != nil { + return "", errors.Wrapf(err, "error parsing ssh public key from %s", certFile) + } + cert, ok := sshpub.(*ssh.Certificate) + if !ok { + return "", errors.New("error casting ssh public key to ssh certificate") + } + pubkey, err := keyutil.ExtractKey(cert) + if err != nil { + return "", errors.Wrap(err, "error extracting public key from ssh public key interface") + } + if err = validateKeyPair(pubkey, key); err != nil { + return "", errors.Wrap(err, "error verifying ssh key pair") + } + + return base64.StdEncoding.EncodeToString(cert.Marshal()), nil +} + +func validateKeyPair(pub crypto.PublicKey, priv crypto.PrivateKey) error { + switch key := priv.(type) { + case *JSONWebKey: + return keyutil.VerifyPair(pub, key.Key) + case OpaqueSigner: + if !keyutil.Equal(pub, key.Public().Key) { + return errors.New("private key does not match public key") + } + return nil + default: + return keyutil.VerifyPair(pub, priv) + } +} + +func validateX5(certs []*x509.Certificate, key interface{}) error { + if len(certs) == 0 { + return errors.New("certs cannot be empty") + } + + if err := validateKeyPair(certs[0].PublicKey, key); err != nil { + return errors.Wrap(err, "error verifying certificate and key") + } + + if certs[0].KeyUsage&x509.KeyUsageDigitalSignature == 0 { + return errors.New("certificate/private-key pair used to sign " + + "token is not approved for digital signature") + } + return nil +} + +// ValidateX5C validates the given certificate chain and key for use as a token +// signer and x5t header. +func ValidateX5C(certs []*x509.Certificate, key interface{}) ([]string, error) { + if err := validateX5(certs, key); err != nil { + return nil, errors.Wrap(err, "ValidateX5C") + } + strs := make([]string, len(certs)) + for i, cert := range certs { + strs[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + return strs, nil +} + +// ValidateX5T validates the given certificate and key for use as a token signer +// and x5t header. +func ValidateX5T(certs []*x509.Certificate, key interface{}) (string, error) { + if err := validateX5(certs, key); err != nil { + return "", errors.Wrap(err, "ValidateX5T") + } + // x5t is the base64 URL encoded SHA1 thumbprint + // (see https://tools.ietf.org/html/rfc7515#section-4.1.7) + //nolint:gosec // RFC 7515 - X.509 Certificate SHA-1 Thumbprint + fingerprint := sha1.Sum(certs[0].Raw) + return base64.URLEncoding.EncodeToString(fingerprint[:]), nil +} + +// ValidateJWK validates the given JWK. +func ValidateJWK(jwk *JSONWebKey) error { + switch jwk.Use { + case "sig": + return validateSigJWK(jwk) + case "enc": + return validateEncJWK(jwk) + default: + return validateGeneric(jwk) + } +} + +// validateSigJWK validates the given JWK for signature operations. +func validateSigJWK(jwk *JSONWebKey) error { + if jwk.Algorithm == "" { + return errors.New("flag '--alg' is required with the given key") + } + errctx := "the given key" + + switch k := jwk.Key.(type) { + case []byte: + switch jwk.Algorithm { + case HS256, HS384, HS512: + return nil + } + errctx = "kty 'oct'" + case *rsa.PrivateKey, *rsa.PublicKey: + switch jwk.Algorithm { + case RS256, RS384, RS512: + return nil + case PS256, PS384, PS512: + return nil + } + errctx = "kty 'RSA'" + case *ecdsa.PrivateKey: + curve := k.Params().Name + switch { + case jwk.Algorithm == ES256 && curve == P256: + return nil + case jwk.Algorithm == ES384 && curve == P384: + return nil + case jwk.Algorithm == ES512 && curve == P521: + return nil + } + errctx = fmt.Sprintf("kty 'EC' and crv '%s'", curve) + case *ecdsa.PublicKey: + curve := k.Params().Name + switch { + case jwk.Algorithm == ES256 && curve == P256: + return nil + case jwk.Algorithm == ES384 && curve == P384: + return nil + case jwk.Algorithm == ES512 && curve == P521: + return nil + } + errctx = fmt.Sprintf("kty 'EC' and crv '%s'", curve) + case ed25519.PrivateKey, ed25519.PublicKey: + if jwk.Algorithm == EdDSA { + return nil + } + errctx = "kty 'OKP' and crv 'Ed25519'" + case OpaqueSigner: + for _, alg := range k.Algs() { + if jwk.Algorithm == string(alg) { + return nil + } + } + } + + return errors.Errorf("alg '%s' is not compatible with %s", jwk.Algorithm, errctx) +} + +// validatesEncJWK validates the given JWK for encryption operations. +func validateEncJWK(jwk *JSONWebKey) error { + alg := KeyAlgorithm(jwk.Algorithm) + var kty string + + switch jwk.Key.(type) { + case []byte: + switch alg { + case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW: + return nil + } + kty = "oct" + case *rsa.PrivateKey, *rsa.PublicKey: + switch alg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + return nil + } + kty = "RSA" + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + switch alg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + return nil + } + kty = "EC" + case ed25519.PrivateKey, ed25519.PublicKey: + return errors.New("key Ed25519 cannot be used for encryption") + } + + return errors.Errorf("alg '%s' is not compatible with kty '%s'", jwk.Algorithm, kty) +} + +// validateGeneric validates just the supported key types. +func validateGeneric(jwk *JSONWebKey) error { + switch jwk.Key.(type) { + case []byte: + return nil + case *rsa.PrivateKey, *rsa.PublicKey: + return nil + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + return nil + case ed25519.PrivateKey, ed25519.PublicKey: + return nil + } + + return errors.Errorf("unsupported key type '%T'", jwk.Key) +} diff --git a/vendor/go.step.sm/crypto/jose/x25519.go b/vendor/go.step.sm/crypto/jose/x25519.go new file mode 100644 index 0000000000..25e90e8ad7 --- /dev/null +++ b/vendor/go.step.sm/crypto/jose/x25519.go @@ -0,0 +1,66 @@ +package jose + +import ( + "crypto" + "crypto/rand" + "encoding/base64" + "fmt" + + "github.com/pkg/errors" + "go.step.sm/crypto/x25519" +) + +const x25519ThumbprintTemplate = `{"crv":"X25519","kty":"OKP","x":%q}` + +func x25519Thumbprint(key x25519.PublicKey, hash crypto.Hash) ([]byte, error) { + if len(key) != 32 { + return nil, errors.New("invalid elliptic key") + } + h := hash.New() + fmt.Fprintf(h, x25519ThumbprintTemplate, base64.RawURLEncoding.EncodeToString(key)) + return h.Sum(nil), nil +} + +// X25519Signer implements the jose.OpaqueSigner using an X25519 key and XEdDSA +// as the signing algorithm. +type X25519Signer x25519.PrivateKey + +// Public returns the public key of the current signing key. +func (s X25519Signer) Public() *JSONWebKey { + return &JSONWebKey{ + Key: x25519.PrivateKey(s).Public(), + } +} + +// Algs returns a list of supported signing algorithms, in this case only +// XEdDSA. +func (s X25519Signer) Algs() []SignatureAlgorithm { + return []SignatureAlgorithm{ + XEdDSA, + } +} + +// SignPayload signs a payload with the current signing key using the given +// algorithm, it will fail if it's not XEdDSA. +func (s X25519Signer) SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error) { + if alg != XEdDSA { + return nil, errors.Errorf("x25519 key does not support the signature algorithm %s", alg) + } + return x25519.PrivateKey(s).Sign(rand.Reader, payload, crypto.Hash(0)) +} + +// X25519Verifier implements the jose.OpaqueVerifier interface using an X25519 +// key and XEdDSA as a signing algorithm. +type X25519Verifier x25519.PublicKey + +// VerifyPayload verifies the given signature using the X25519 public key, it +// will fail if the signature algorithm is not XEdDSA. +func (v X25519Verifier) VerifyPayload(payload, signature []byte, alg SignatureAlgorithm) error { + if alg != XEdDSA { + return errors.Errorf("x25519 key does not support the signature algorithm %s", alg) + } + if !x25519.Verify(x25519.PublicKey(v), payload, signature) { + return errors.New("failed to verify XEdDSA signature") + } + return nil +} diff --git a/vendor/go.step.sm/crypto/keyutil/fingerprint.go b/vendor/go.step.sm/crypto/keyutil/fingerprint.go new file mode 100644 index 0000000000..4447ff31e7 --- /dev/null +++ b/vendor/go.step.sm/crypto/keyutil/fingerprint.go @@ -0,0 +1,74 @@ +package keyutil + +import ( + "crypto" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + + "go.step.sm/crypto/fingerprint" +) + +// FingerprintEncoding defines the supported encodings in certificate +// fingerprints. +type FingerprintEncoding = fingerprint.Encoding + +// Supported fingerprint encodings. +const ( + // DefaultFingerprint represents the base64 encoding of the fingerprint. + DefaultFingerprint = FingerprintEncoding(0) + // HexFingerprint represents the hex encoding of the fingerprint. + HexFingerprint = fingerprint.HexFingerprint + // Base64Fingerprint represents the base64 encoding of the fingerprint. + Base64Fingerprint = fingerprint.Base64Fingerprint + // Base64URLFingerprint represents the base64URL encoding of the fingerprint. + Base64URLFingerprint = fingerprint.Base64URLFingerprint + // Base64RawFingerprint represents the base64RawStd encoding of the fingerprint. + Base64RawFingerprint = fingerprint.Base64RawFingerprint + // Base64RawURLFingerprint represents the base64RawURL encoding of the fingerprint. + Base64RawURLFingerprint = fingerprint.Base64RawURLFingerprint + // EmojiFingerprint represents the emoji encoding of the fingerprint. + EmojiFingerprint = fingerprint.EmojiFingerprint +) + +// subjectPublicKeyInfo is a PKIX public key structure defined in RFC 5280. +type subjectPublicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + SubjectPublicKey asn1.BitString +} + +// Fingerprint returns the SHA-256 fingerprint of an public key. +// +// The fingerprint is calculated from the encoding of the key according to RFC +// 5280 section 4.2.1.2, but using SHA-256 instead of SHA-1. +func Fingerprint(pub crypto.PublicKey) (string, error) { + return EncodedFingerprint(pub, DefaultFingerprint) +} + +// EncodedFingerprint returns the SHA-256 hash of the certificate using the +// specified encoding. +// +// The fingerprint is calculated from the encoding of the key according to RFC +// 5280 section 4.2.1.2, but using SHA-256 instead of SHA-1. +func EncodedFingerprint(pub crypto.PublicKey, encoding FingerprintEncoding) (string, error) { + b, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return "", fmt.Errorf("error marshaling public key: %w", err) + } + var info subjectPublicKeyInfo + if _, err = asn1.Unmarshal(b, &info); err != nil { + return "", fmt.Errorf("error unmarshaling public key: %w", err) + } + if encoding == DefaultFingerprint { + encoding = Base64Fingerprint + } + + sum := sha256.Sum256(info.SubjectPublicKey.Bytes) + fp := fingerprint.Fingerprint(sum[:], encoding) + if fp == "" { + return "", fmt.Errorf("error formatting fingerprint: unsupported encoding") + } + return "SHA256:" + fp, nil +} diff --git a/vendor/go.step.sm/crypto/keyutil/key.go b/vendor/go.step.sm/crypto/keyutil/key.go new file mode 100644 index 0000000000..171cdf3f6e --- /dev/null +++ b/vendor/go.step.sm/crypto/keyutil/key.go @@ -0,0 +1,265 @@ +// Package keyutil implements utilities to generate cryptographic keys. +package keyutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "math/big" + "sync/atomic" + + "github.com/pkg/errors" + "go.step.sm/crypto/x25519" + "golang.org/x/crypto/ssh" +) + +var ( + // DefaultKeyType is the default type of a private key. + DefaultKeyType = "EC" + // DefaultKeySize is the default size (in # of bits) of a private key. + DefaultKeySize = 2048 + // DefaultKeyCurve is the default curve of a private key. + DefaultKeyCurve = "P-256" + // DefaultSignatureAlgorithm is the default signature algorithm used on a + // certificate with the default key type. + DefaultSignatureAlgorithm = x509.ECDSAWithSHA256 + // MinRSAKeyBytes is the minimum acceptable size (in bytes) for RSA keys + // signed by the authority. + MinRSAKeyBytes = 256 +) + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } + +var insecureMode atomicBool + +// Insecure enables the insecure mode in this package and returns a function to +// revert the configuration. The insecure mode removes the minimum limits when +// generating RSA keys. +func Insecure() (revert func()) { + insecureMode.setTrue() + return func() { + insecureMode.setFalse() + } +} + +// PublicKey extracts a public key from a private key. +func PublicKey(priv interface{}) (crypto.PublicKey, error) { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &k.PublicKey, nil + case *ecdsa.PrivateKey: + return &k.PublicKey, nil + case ed25519.PrivateKey: + return k.Public(), nil + case x25519.PrivateKey: + return k.Public(), nil + case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey, x25519.PublicKey: + return k, nil + case crypto.Signer: + return k.Public(), nil + default: + return nil, errors.Errorf("unrecognized key type: %T", priv) + } +} + +// GenerateDefaultKey generates a public/private key pair using sane defaults +// for key type, curve, and size. +func GenerateDefaultKey() (crypto.PrivateKey, error) { + return GenerateKey(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) +} + +// GenerateDefaultKeyPair generates a public/private key pair using configured +// default values for key type, curve, and size. +func GenerateDefaultKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + return GenerateKeyPair(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) +} + +// GenerateKey generates a key of the given type (kty). +func GenerateKey(kty, crv string, size int) (crypto.PrivateKey, error) { + switch kty { + case "EC", "RSA", "OKP": + return GenerateSigner(kty, crv, size) + case "oct": + return generateOctKey(size) + default: + return nil, errors.Errorf("unrecognized key type: %s", kty) + } +} + +// GenerateKeyPair creates an asymmetric crypto keypair using input +// configuration. +func GenerateKeyPair(kty, crv string, size int) (crypto.PublicKey, crypto.PrivateKey, error) { + signer, err := GenerateSigner(kty, crv, size) + if err != nil { + return nil, nil, err + } + return signer.Public(), signer, nil +} + +// GenerateDefaultSigner returns an asymmetric crypto key that implements +// crypto.Signer using sane defaults. +func GenerateDefaultSigner() (crypto.Signer, error) { + return GenerateSigner(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) +} + +// GenerateSigner creates an asymmetric crypto key that implements +// crypto.Signer. +func GenerateSigner(kty, crv string, size int) (crypto.Signer, error) { + switch kty { + case "EC": + return generateECKey(crv) + case "RSA": + return generateRSAKey(size) + case "OKP": + return generateOKPKey(crv) + default: + return nil, errors.Errorf("unrecognized key type: %s", kty) + } +} + +// ExtractKey returns the given public or private key or extracts the public key +// if a x509.Certificate or x509.CertificateRequest is given. +func ExtractKey(in interface{}) (interface{}, error) { + switch k := in.(type) { + case *rsa.PublicKey, *rsa.PrivateKey, + *ecdsa.PublicKey, *ecdsa.PrivateKey, + ed25519.PublicKey, ed25519.PrivateKey, + x25519.PublicKey, x25519.PrivateKey: + return in, nil + case []byte: + return in, nil + case *x509.Certificate: + return k.PublicKey, nil + case *x509.CertificateRequest: + return k.PublicKey, nil + case ssh.CryptoPublicKey: + return k.CryptoPublicKey(), nil + case *ssh.Certificate: + return ExtractKey(k.Key) + default: + return nil, errors.Errorf("cannot extract the key from type '%T'", k) + } +} + +// VerifyPair that the public key matches the given private key. +func VerifyPair(pub crypto.PublicKey, priv crypto.PrivateKey) error { + signer, ok := priv.(crypto.Signer) + if !ok { + return errors.New("private key type does implement crypto.Signer") + } + if !Equal(pub, signer.Public()) { + return errors.New("private key does not match public key") + } + return nil +} + +// Equal reports if x and y are the same key. +func Equal(x, y any) bool { + switch xx := x.(type) { + case *ecdsa.PublicKey: + yy, ok := y.(*ecdsa.PublicKey) + return ok && xx.Equal(yy) + case *ecdsa.PrivateKey: + yy, ok := y.(*ecdsa.PrivateKey) + return ok && xx.Equal(yy) + case *rsa.PublicKey: + yy, ok := y.(*rsa.PublicKey) + return ok && xx.Equal(yy) + case *rsa.PrivateKey: + yy, ok := y.(*rsa.PrivateKey) + return ok && xx.Equal(yy) + case ed25519.PublicKey: + yy, ok := y.(ed25519.PublicKey) + return ok && xx.Equal(yy) + case ed25519.PrivateKey: + yy, ok := y.(ed25519.PrivateKey) + return ok && xx.Equal(yy) + case x25519.PublicKey: + yy, ok := y.(x25519.PublicKey) + return ok && xx.Equal(yy) + case x25519.PrivateKey: + yy, ok := y.(x25519.PrivateKey) + return ok && xx.Equal(yy) + case []byte: // special case for symmetric keys + yy, ok := y.([]byte) + return ok && bytes.Equal(xx, yy) + default: + return false + } +} + +func generateECKey(crv string) (crypto.Signer, error) { + var c elliptic.Curve + switch crv { + case "P-256": + c = elliptic.P256() + case "P-384": + c = elliptic.P384() + case "P-521": + c = elliptic.P521() + default: + return nil, errors.Errorf("invalid value for argument crv (crv: '%s')", crv) + } + + key, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, errors.Wrap(err, "error generating EC key") + } + + return key, nil +} + +func generateRSAKey(bits int) (crypto.Signer, error) { + if minBits := MinRSAKeyBytes * 8; !insecureMode.isSet() && bits < minBits { + return nil, errors.Errorf("the size of the RSA key should be at least %d bits", minBits) + } + + key, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, errors.Wrap(err, "error generating RSA key") + } + + return key, nil +} + +func generateOKPKey(crv string) (crypto.Signer, error) { + switch crv { + case "Ed25519": + _, key, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, errors.Wrap(err, "error generating Ed25519 key") + } + return key, nil + case "X25519": + _, key, err := x25519.GenerateKey(rand.Reader) + if err != nil { + return nil, errors.Wrap(err, "error generating X25519 key") + } + return key, nil + default: + return nil, errors.Errorf("missing or invalid value for argument 'crv'. "+ + "expected 'Ed25519' or 'X25519', but got '%s'", crv) + } +} + +func generateOctKey(size int) (interface{}, error) { + const chars = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + result := make([]byte, size) + for i := range result { + num, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars)))) + if err != nil { + return nil, err + } + result[i] = chars[num.Int64()] + } + return result, nil +} diff --git a/vendor/go.step.sm/crypto/pemutil/cosign.go b/vendor/go.step.sm/crypto/pemutil/cosign.go new file mode 100644 index 0000000000..d28c9f7d70 --- /dev/null +++ b/vendor/go.step.sm/crypto/pemutil/cosign.go @@ -0,0 +1,79 @@ +package pemutil + +import ( + "crypto" + "crypto/x509" + "encoding/json" + + "github.com/pkg/errors" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/scrypt" +) + +type cosignEnvelope struct { + KDF cosignKDF `json:"kdf"` + Cipher cosignCipher `json:"cipher"` + Ciphertext []byte `json:"ciphertext"` +} + +type cosignKDF struct { + Name string `json:"name"` + Params cosignScryptParams `json:"params"` + Salt []byte `json:"salt"` +} + +type cosignScryptParams struct { + N int `json:"N"` + R int `json:"r"` + P int `json:"p"` +} + +type cosignCipher struct { + Name string `json:"name"` + Nonce []byte `json:"nonce"` +} + +// ParseCosignPrivateKey returns the private key encoded using cosign envelope. +// If an incorrect password is detected an x509.IncorrectPasswordError is +// returned. +// +// Cosign keys are encrypted under a password using scrypt as a KDF and +// nacl/secretbox for encryption. +func ParseCosignPrivateKey(data, password []byte) (crypto.PrivateKey, error) { + var env cosignEnvelope + if err := json.Unmarshal(data, &env); err != nil { + return nil, errors.Wrap(err, "error unmarshaling key") + } + if env.KDF.Name != "scrypt" { + return nil, errors.Errorf("error parsing key: unsupported kdf %s", env.KDF.Name) + } + if env.Cipher.Name != "nacl/secretbox" { + return nil, errors.Errorf("error parsing key: unsupported cipher %s", env.Cipher.Name) + } + if len(env.Cipher.Nonce) != 24 { + return nil, errors.New("error parsing key: nonce must be 24 bytes long") + } + + params := env.KDF.Params + k, err := scrypt.Key(password, env.KDF.Salt, params.N, params.R, params.P, 32) + if err != nil { + return nil, errors.Wrap(err, "error generating key") + } + + var nonce [24]byte + var key [32]byte + copy(nonce[:], env.Cipher.Nonce) + copy(key[:], k) + + out, ok := secretbox.Open(nil, env.Ciphertext, &nonce, &key) + if !ok { + return nil, x509.IncorrectPasswordError + } + + priv, err := x509.ParsePKCS8PrivateKey(out) + if err != nil { + return nil, errors.Wrap(err, "error parsing pkcs8 key") + } + + return priv, nil +} diff --git a/vendor/go.step.sm/crypto/pemutil/pem.go b/vendor/go.step.sm/crypto/pemutil/pem.go new file mode 100644 index 0000000000..9202510d2d --- /dev/null +++ b/vendor/go.step.sm/crypto/pemutil/pem.go @@ -0,0 +1,856 @@ +// Package pemutil implements utilities to parse keys and certificates. It also +// includes a method to serialize keys, X.509 certificates and certificate +// requests to PEM. +package pemutil + +import ( + "bytes" + "crypto/ecdh" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "fmt" + "math/big" + "os" + "strings" + + "github.com/pkg/errors" + "go.step.sm/crypto/internal/utils" + "go.step.sm/crypto/keyutil" + "go.step.sm/crypto/x25519" + "golang.org/x/crypto/ssh" +) + +// DefaultEncCipher is the default algorithm used when encrypting sensitive +// data in the PEM format. +var DefaultEncCipher = x509.PEMCipherAES256 + +// PasswordPrompter defines the function signature for the PromptPassword +// callback. +type PasswordPrompter func(s string) ([]byte, error) + +// FileWriter defines the function signature for the WriteFile callback. +type FileWriter func(filename string, data []byte, perm os.FileMode) error + +// PromptPassword is a method used to prompt for a password to decode encrypted +// keys. If this method is not defined and the key or password are not passed, +// the parse of the key will fail. +var PromptPassword PasswordPrompter + +// WriteFile is a method used to write a file, by default it uses a wrapper over +// ioutil.WriteFile, but it can be set to a custom method, that for example can +// check if a file exists and prompts the user if it should be overwritten. +var WriteFile FileWriter = utils.WriteFile + +// PEMBlockHeader is the expected header for any PEM formatted block. +var PEMBlockHeader = []byte("-----BEGIN ") + +// context add options to the pem methods. +type context struct { + filename string + perm os.FileMode + password []byte + pkcs8 bool + openSSH bool + comment string + firstBlock bool + passwordPrompt string + passwordPrompter PasswordPrompter +} + +// newContext initializes the context with a filename. +func newContext(name string) *context { + return &context{ + filename: name, + perm: 0600, + } +} + +// apply the context options and return the first error if exists. +func (c *context) apply(opts []Options) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +// promptPassword returns the password or prompts for one. +func (c *context) promptPassword() ([]byte, error) { + switch { + case len(c.password) > 0: + return c.password, nil + case c.passwordPrompter != nil: + return c.passwordPrompter(c.passwordPrompt) + case PromptPassword != nil: + return PromptPassword(fmt.Sprintf("Please enter the password to decrypt %s", c.filename)) + default: + return nil, errors.Errorf("error decoding %s: key is password protected", c.filename) + } +} + +// promptEncryptPassword returns the password or prompts for one if +// WithPassword, WithPasswordFile or WithPasswordPrompt have been used. This +// method is used to encrypt keys, and it will only use the options passed, it +// will not use the global PromptPassword. +func (c *context) promptEncryptPassword() ([]byte, error) { + switch { + case len(c.password) > 0: + return c.password, nil + case c.passwordPrompter != nil: + return c.passwordPrompter(c.passwordPrompt) + default: + return nil, nil + } +} + +// Options is the type to add attributes to the context. +type Options func(o *context) error + +// withContext replaces the context with the given one. +func withContext(c *context) Options { + return func(ctx *context) error { + *ctx = *c + return nil + } +} + +// WithFilename is a method that adds the given filename to the context. +func WithFilename(name string) Options { + return func(ctx *context) error { + ctx.filename = name + // Default perm mode if not set + if ctx.perm == 0 { + ctx.perm = 0600 + } + return nil + } +} + +// ToFile is a method that adds the given filename and permissions to the +// context. It is used in the Serialize to store PEM in disk. +func ToFile(name string, perm os.FileMode) Options { + return func(ctx *context) error { + ctx.filename = name + ctx.perm = perm + return nil + } +} + +// WithPassword is a method that adds the given password to the context. +func WithPassword(pass []byte) Options { + return func(ctx *context) error { + ctx.password = pass + return nil + } +} + +// WithPasswordFile is a method that adds the password in a file to the context. +func WithPasswordFile(filename string) Options { + return func(ctx *context) error { + b, err := utils.ReadPasswordFromFile(filename) + if err != nil { + return err + } + ctx.password = b + return nil + } +} + +// WithPasswordPrompt ask the user for a password and adds it to the context. +func WithPasswordPrompt(prompt string, fn PasswordPrompter) Options { + return func(ctx *context) error { + ctx.passwordPrompt = prompt + ctx.passwordPrompter = fn + return nil + } +} + +// WithPKCS8 with v set to true returns an option used in the Serialize method +// to use the PKCS#8 encoding form on the private keys. With v set to false +// default form will be used. +func WithPKCS8(v bool) Options { + return func(ctx *context) error { + ctx.pkcs8 = v + return nil + } +} + +// WithOpenSSH is an option used in the Serialize method to use OpenSSH encoding +// form on the private keys. With v set to false default form will be used. +func WithOpenSSH(v bool) Options { + return func(ctx *context) error { + ctx.openSSH = v + return nil + } +} + +// WithComment is an option used in the Serialize method to add a comment in the +// OpenSSH private keys. WithOpenSSH must be set to true too. +func WithComment(comment string) Options { + return func(ctx *context) error { + ctx.comment = comment + return nil + } +} + +// WithFirstBlock will avoid failing if a PEM contains more than one block or +// certificate and it will only look at the first. +func WithFirstBlock() Options { + return func(ctx *context) error { + ctx.firstBlock = true + return nil + } +} + +// ParseCertificate extracts the first certificate from the given pem. +func ParseCertificate(pemData []byte) (*x509.Certificate, error) { + var block *pem.Block + for len(pemData) > 0 { + block, pemData = pem.Decode(pemData) + if block == nil { + return nil, errors.New("error decoding pem block") + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, errors.Wrap(err, "error parsing certificate") + } + return cert, nil + } + + return nil, errors.New("error parsing certificate: no certificate found") +} + +// ParseCertificateBundle returns a list of *x509.Certificate parsed from +// the given bytes. +// +// - supports PEM and DER certificate formats +// - If a DER-formatted file is given only one certificate will be returned. +func ParseCertificateBundle(data []byte) ([]*x509.Certificate, error) { + var err error + + // PEM format + if bytes.Contains(data, PEMBlockHeader) { + var block *pem.Block + var bundle []*x509.Certificate + for len(data) > 0 { + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + var crt *x509.Certificate + crt, err = x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, &InvalidPEMError{ + Err: err, + Type: PEMTypeCertificate, + } + } + bundle = append(bundle, crt) + } + if len(bundle) == 0 { + return nil, &InvalidPEMError{ + Type: PEMTypeCertificate, + } + } + return bundle, nil + } + + // DER format (binary) + crt, err := x509.ParseCertificate(data) + if err != nil { + return nil, &InvalidPEMError{ + Message: fmt.Sprintf("error parsing certificate as DER format: %v", err), + Type: PEMTypeCertificate, + } + } + return []*x509.Certificate{crt}, nil +} + +// ParseCertificateRequest extracts the first *x509.CertificateRequest +// from the given data. +// +// - supports PEM and DER certificate formats +// - If a DER-formatted file is given only one certificate will be returned. +func ParseCertificateRequest(data []byte) (*x509.CertificateRequest, error) { + // PEM format + if bytes.Contains(data, PEMBlockHeader) { + var block *pem.Block + for len(data) > 0 { + block, data = pem.Decode(data) + if block == nil { + break + } + if !strings.HasSuffix(block.Type, "CERTIFICATE REQUEST") { + continue + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, &InvalidPEMError{ + Type: PEMTypeCertificateRequest, + Err: err, + } + } + + return csr, nil + } + } + + // DER format (binary) + csr, err := x509.ParseCertificateRequest(data) + if err != nil { + return nil, &InvalidPEMError{ + Message: fmt.Sprintf("error parsing certificate request as DER format: %v", err), + Type: PEMTypeCertificateRequest, + } + } + return csr, nil +} + +// PEMType represents a PEM block type. (e.g., CERTIFICATE, CERTIFICATE REQUEST, etc.) +type PEMType int + +func (pt PEMType) String() string { + switch pt { + case PEMTypeCertificate: + return "certificate" + case PEMTypeCertificateRequest: + return "certificate request" + default: + return "undefined" + } +} + +const ( + // PEMTypeUndefined undefined + PEMTypeUndefined = iota + // PEMTypeCertificate CERTIFICATE + PEMTypeCertificate + // PEMTypeCertificateRequest CERTIFICATE REQUEST + PEMTypeCertificateRequest +) + +// InvalidPEMError represents an error that occurs when parsing a file with +// PEM encoded data. +type InvalidPEMError struct { + Type PEMType + File string + Message string + Err error +} + +func (e *InvalidPEMError) Error() string { + switch { + case e.Message != "": + return e.Message + case e.Err != nil: + return fmt.Sprintf("error decoding PEM data: %v", e.Err) + default: + if e.Type == PEMTypeUndefined { + return "does not contain valid PEM encoded data" + } + return fmt.Sprintf("does not contain a valid PEM encoded %s", e.Type) + } +} + +func (e *InvalidPEMError) Unwrap() error { + return e.Err +} + +// ReadCertificate returns a *x509.Certificate from the given filename. It +// supports certificates formats PEM and DER. +func ReadCertificate(filename string, opts ...Options) (*x509.Certificate, error) { + // Populate options + ctx := newContext(filename) + if err := ctx.apply(opts); err != nil { + return nil, err + } + + bundle, err := ReadCertificateBundle(filename) + switch { + case err != nil: + return nil, err + case len(bundle) == 0: + return nil, errors.Errorf("file %s does not contain a valid PEM or DER formatted certificate", filename) + case len(bundle) > 1 && !ctx.firstBlock: + return nil, errors.Errorf("error decoding %s: contains more than one PEM encoded block", filename) + default: + return bundle[0], nil + } +} + +// ReadCertificateBundle reads the given filename and returns a list of +// *x509.Certificate. +// +// - supports PEM and DER certificate formats +// - If a DER-formatted file is given only one certificate will be returned. +func ReadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := utils.ReadFile(filename) + if err != nil { + return nil, err + } + + bundle, err := ParseCertificateBundle(b) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %w", filename, err) + } + return bundle, nil +} + +// ReadCertificateRequest reads the given filename and returns a +// *x509.CertificateRequest. +// +// - supports PEM and DER Certificate formats. +// - supports reading from STDIN with filename `-`. +func ReadCertificateRequest(filename string) (*x509.CertificateRequest, error) { + b, err := utils.ReadFile(filename) + if err != nil { + return nil, err + } + + cr, err := ParseCertificateRequest(b) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %w", filename, err) + } + return cr, nil +} + +// Parse returns the key or certificate PEM-encoded in the given bytes. +func Parse(b []byte, opts ...Options) (interface{}, error) { + // Populate options + ctx := newContext("PEM") + if err := ctx.apply(opts); err != nil { + return nil, err + } + + block, rest := pem.Decode(b) + switch { + case block == nil: + return nil, errors.Errorf("error decoding %s: not a valid PEM encoded block", ctx.filename) + case len(bytes.TrimSpace(rest)) > 0 && !ctx.firstBlock: + return nil, errors.Errorf("error decoding %s: contains more than one PEM encoded block", ctx.filename) + } + + // PEM is encrypted: ask for password + if block.Headers["Proc-Type"] == "4,ENCRYPTED" || block.Type == "ENCRYPTED PRIVATE KEY" { + pass, err := ctx.promptPassword() + if err != nil { + return nil, err + } + + block.Bytes, err = DecryptPEMBlock(block, pass) + if err != nil { + return nil, errors.Wrapf(err, "error decrypting %s", ctx.filename) + } + } + + switch block.Type { + case "PUBLIC KEY": + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + return pub, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "RSA PRIVATE KEY": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "EC PRIVATE KEY": + priv, err := x509.ParseECPrivateKey(block.Bytes) + return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY": + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "OPENSSH PRIVATE KEY": + priv, err := ParseOpenSSHPrivateKey(b, withContext(ctx)) + return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "CERTIFICATE": + crt, err := x509.ParseCertificate(block.Bytes) + return crt, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST": + csr, err := x509.ParseCertificateRequest(block.Bytes) + return csr, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "ENCRYPTED COSIGN PRIVATE KEY": + pass, err := ctx.promptPassword() + if err != nil { + return nil, err + } + priv, err := ParseCosignPrivateKey(block.Bytes, pass) + return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) + case "NEBULA X25519 PUBLIC KEY": + if len(block.Bytes) != x25519.PublicKeySize { + return nil, errors.Errorf("error parsing %s: key is not 32 bytes", ctx.filename) + } + return x25519.PublicKey(block.Bytes), nil + case "NEBULA X25519 PRIVATE KEY": + if len(block.Bytes) != x25519.PrivateKeySize { + return nil, errors.Errorf("error parsing %s: key is not 32 bytes", ctx.filename) + } + return x25519.PrivateKey(block.Bytes), nil + default: + return nil, errors.Errorf("error decoding %s: contains an unexpected header '%s'", ctx.filename, block.Type) + } +} + +// ParseKey returns the key or the public key of a certificate or certificate +// signing request in the given PEM-encoded bytes. +func ParseKey(b []byte, opts ...Options) (interface{}, error) { + k, err := Parse(b, opts...) + if err != nil { + return nil, err + } + return keyutil.ExtractKey(k) +} + +// Read returns the key or certificate encoded in the given PEM file. +// If the file is encrypted it will ask for a password and it will try +// to decrypt it. +// +// Supported keys algorithms are RSA and EC. Supported standards for private +// keys are PKCS#1, PKCS#8, RFC5915 for EC, and base64-encoded DER for +// certificates and public keys. +func Read(filename string, opts ...Options) (interface{}, error) { + b, err := utils.ReadFile(filename) + if err != nil { + return nil, err + } + + // force given filename + opts = append(opts, WithFilename(filename)) + return Parse(b, opts...) +} + +// Serialize will serialize the input to a PEM formatted block and apply +// modifiers. +func Serialize(in interface{}, opts ...Options) (*pem.Block, error) { + ctx := new(context) + if err := ctx.apply(opts); err != nil { + return nil, err + } + + var p *pem.Block + var isPrivateKey bool + switch k := in.(type) { + case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: + b, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, errors.WithStack(err) + } + p = &pem.Block{ + Type: "PUBLIC KEY", + Bytes: b, + } + case *rsa.PrivateKey: + isPrivateKey = true + switch { + case ctx.pkcs8: + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil, err + } + p = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: b, + } + case ctx.openSSH: + return SerializeOpenSSHPrivateKey(k, withContext(ctx)) + default: + p = &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(k), + } + } + case *ecdsa.PrivateKey: + isPrivateKey = true + switch { + case ctx.pkcs8: + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil, err + } + p = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: b, + } + case ctx.openSSH: + return SerializeOpenSSHPrivateKey(k, withContext(ctx)) + default: + b, err := x509.MarshalECPrivateKey(k) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal private key") + } + p = &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: b, + } + } + case ed25519.PrivateKey: + isPrivateKey = true + switch { + case !ctx.pkcs8 && ctx.openSSH: + return SerializeOpenSSHPrivateKey(k, withContext(ctx)) + default: // Ed25519 keys will use pkcs8 by default + ctx.pkcs8 = true + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil, err + } + p = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: b, + } + } + case *x509.Certificate: + p = &pem.Block{ + Type: "CERTIFICATE", + Bytes: k.Raw, + } + case *x509.CertificateRequest: + p = &pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: k.Raw, + } + default: + return nil, errors.Errorf("cannot serialize type '%T', value '%v'", k, k) + } + + if isPrivateKey { + // Request password if needed. + password, err := ctx.promptEncryptPassword() + if err != nil { + return nil, err + } + + // Apply options on the PEM blocks. + if password != nil { + if ctx.pkcs8 { + var err error + p, err = EncryptPKCS8PrivateKey(rand.Reader, p.Bytes, password, DefaultEncCipher) + if err != nil { + return nil, err + } + } else { + var err error + p, err = x509.EncryptPEMBlock(rand.Reader, p.Type, p.Bytes, password, DefaultEncCipher) + if err != nil { + return nil, errors.Wrap(err, "failed to serialize to PEM") + } + } + } + } + + if ctx.filename != "" { + if err := WriteFile(ctx.filename, pem.EncodeToMemory(p), ctx.perm); err != nil { + return nil, err + } + } + + return p, nil +} + +// ParseDER parses the given DER-encoded bytes and results the public or private +// key encoded. +func ParseDER(b []byte) (interface{}, error) { + // Try private keys + key, err := x509.ParsePKCS8PrivateKey(b) + if err != nil { + if key, err = x509.ParseECPrivateKey(b); err != nil { + key, err = x509.ParsePKCS1PrivateKey(b) + } + } + + // Try public key + if err != nil { + if key, err = x509.ParsePKIXPublicKey(b); err != nil { + if key, err = x509.ParsePKCS1PublicKey(b); err != nil { + return nil, errors.New("error decoding DER; bad format") + } + } + } + + return key, nil +} + +// ParseSSH parses parses a public key from an authorized_keys file used in +// OpenSSH according to the sshd(8) manual page. +func ParseSSH(b []byte) (interface{}, error) { + key, _, _, _, err := ssh.ParseAuthorizedKey(b) + if err != nil { + return nil, errors.Wrap(err, "error parsing OpenSSH key") + } + + if cert, ok := key.(*ssh.Certificate); ok { + key = cert.Key + } + + switch key.Type() { + case ssh.KeyAlgoRSA: + var w struct { + Name string + E *big.Int + N *big.Int + } + if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { + return nil, errors.Wrap(err, "error unmarshaling key") + } + + if w.E.BitLen() > 24 { + return nil, errors.New("error unmarshaling key: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, errors.New("error unmarshaling key: incorrect exponent") + } + + key := new(rsa.PublicKey) + key.E = int(e) + key.N = w.N + return key, nil + + case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: + var w struct { + Name string + ID string + KeyBytes []byte + } + if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { + return nil, errors.Wrap(err, "error unmarshaling key") + } + + var c ecdh.Curve + switch w.Name { + case ssh.KeyAlgoECDSA256: + c = ecdh.P256() + case ssh.KeyAlgoECDSA384: + c = ecdh.P384() + case ssh.KeyAlgoECDSA521: + c = ecdh.P521() + default: + return nil, errors.Errorf("unsupported ecdsa curve %s", w.Name) + } + + var p *ecdh.PublicKey + if p, err = c.NewPublicKey(w.KeyBytes); err != nil { + return nil, errors.Wrapf(err, "failed decoding %s key", w.Name) + } + + // convert ECDH public key to ECDSA public key to keep + // the returned type backwards compatible. + rawKey := p.Bytes() + switch p.Curve() { + case ecdh.P256(): + return &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: big.NewInt(0).SetBytes(rawKey[1:33]), + Y: big.NewInt(0).SetBytes(rawKey[33:]), + }, nil + case ecdh.P384(): + return &ecdsa.PublicKey{ + Curve: elliptic.P384(), + X: big.NewInt(0).SetBytes(rawKey[1:49]), + Y: big.NewInt(0).SetBytes(rawKey[49:]), + }, nil + case ecdh.P521(): + return &ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: big.NewInt(0).SetBytes(rawKey[1:67]), + Y: big.NewInt(0).SetBytes(rawKey[67:]), + }, nil + default: + return nil, errors.New("cannot convert non-NIST *ecdh.PublicKey to *ecdsa.PublicKey") + } + case ssh.KeyAlgoED25519: + var w struct { + Name string + KeyBytes []byte + } + if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { + return nil, errors.Wrap(err, "error unmarshaling key") + } + return ed25519.PublicKey(w.KeyBytes), nil + case ssh.KeyAlgoDSA: + return nil, errors.Errorf("DSA keys not supported") + default: + return nil, errors.Errorf("unsupported key type %T", key) + } +} + +// BundleCertificate adds PEM-encoded certificates to a PEM-encoded certificate +// bundle if not already in the bundle. +func BundleCertificate(bundlePEM []byte, certsPEM ...[]byte) ([]byte, bool, error) { + bundle, err := ParseCertificateBundle(bundlePEM) + if err != nil { + return nil, false, fmt.Errorf("invalid bundle: %w", err) + } + + sums := make(map[[sha256.Size224]byte]bool, len(bundle)+len(certsPEM)) + for i := range bundle { + sums[sha256.Sum224(bundle[i].Raw)] = true + } + + modified := false + + for i := range certsPEM { + cert, err := ParseCertificate(certsPEM[i]) + if err != nil { + return nil, false, fmt.Errorf("invalid certificate %d: %w", i, err) + } + certSum := sha256.Sum224(cert.Raw) + if sums[certSum] { + continue + } + sums[certSum] = true + bundlePEM = append(bundlePEM, certsPEM[i]...) + modified = true + } + + return bundlePEM, modified, nil +} + +// UnbundleCertificate removes PEM-encoded certificates from a PEM-encoded +// certificate bundle. +func UnbundleCertificate(bundlePEM []byte, certsPEM ...[]byte) ([]byte, bool, error) { + if len(certsPEM) == 0 { + return bundlePEM, false, nil + } + drop := make(map[[sha256.Size224]byte]bool, len(certsPEM)) + for i := range certsPEM { + certs, err := ParseCertificateBundle(certsPEM[i]) + if err != nil { + return nil, false, fmt.Errorf("invalid certificate %d: %w", i, err) + } + for _, cert := range certs { + drop[sha256.Sum224(cert.Raw)] = true + } + } + + var modified bool + var keep []byte + + bundle, err := ParseCertificateBundle(bundlePEM) + if err != nil { + return nil, false, fmt.Errorf("invalid bundle: %w", err) + } + for _, cert := range bundle { + sum := sha256.Sum224(cert.Raw) + if drop[sum] { + modified = true + continue + } + keep = append(keep, pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + })...) + } + + return keep, modified, nil +} diff --git a/vendor/go.step.sm/crypto/pemutil/pkcs8.go b/vendor/go.step.sm/crypto/pemutil/pkcs8.go new file mode 100644 index 0000000000..fb6c96c295 --- /dev/null +++ b/vendor/go.step.sm/crypto/pemutil/pkcs8.go @@ -0,0 +1,353 @@ +package pemutil + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" //nolint:gosec // support for legacy keys + "crypto/sha1" //nolint:gosec // support for legacy keys + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "hash" + "io" + + "github.com/pkg/errors" + "golang.org/x/crypto/pbkdf2" +) + +// PBKDF2SaltSize is the default size of the salt for PBKDF2, 128-bit salt. +const PBKDF2SaltSize = 16 + +// PBKDF2Iterations is the default number of iterations for PBKDF2. +// +// 600k is the current OWASP recommendation (Dec 2022) +// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 +// +// Nist recommends at least 10k (800-63B), 1Password increased in 2023 the +// number of iterations from 100k to 650k. +const PBKDF2Iterations = 600000 + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algo pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// Encrypted pkcs8 +// Based on https://github.com/youmark/pkcs8 +// MIT license +type prfParam struct { + Algo asn1.ObjectIdentifier + NullParam asn1.RawValue +} + +type pbkdf2Params struct { + Salt []byte + IterationCount int + PrfParam prfParam `asn1:"optional"` +} + +type pbkdf2Algorithms struct { + Algo asn1.ObjectIdentifier + PBKDF2Params pbkdf2Params +} + +type pbkdf2Encs struct { + EncryAlgo asn1.ObjectIdentifier + IV []byte +} + +type pbes2Params struct { + KeyDerivationFunc pbkdf2Algorithms + EncryptionScheme pbkdf2Encs +} + +type encryptedlAlgorithmIdentifier struct { + Algorithm asn1.ObjectIdentifier + Parameters pbes2Params +} + +type encryptedPrivateKeyInfo struct { + Algo encryptedlAlgorithmIdentifier + PrivateKey []byte +} + +var ( + // key derivation functions + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} + oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} + + // encryption + oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + oidAES192CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + oidDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} + oidD3DESCBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} +) + +// rfc1423Algo holds a method for enciphering a PEM block. +type rfc1423Algo struct { + cipher x509.PEMCipher + name string + cipherFunc func(key []byte) (cipher.Block, error) + keySize int + blockSize int + identifier asn1.ObjectIdentifier +} + +// rfc1423Algos holds a slice of the possible ways to encrypt a PEM +// block. The ivSize numbers were taken from the OpenSSL source. +var rfc1423Algos = []rfc1423Algo{{ + cipher: x509.PEMCipherDES, + name: "DES-CBC", + cipherFunc: des.NewCipher, + keySize: 8, + blockSize: des.BlockSize, + identifier: oidDESCBC, +}, { + cipher: x509.PEMCipher3DES, + name: "DES-EDE3-CBC", + cipherFunc: des.NewTripleDESCipher, + keySize: 24, + blockSize: des.BlockSize, + identifier: oidD3DESCBC, +}, { + cipher: x509.PEMCipherAES128, + name: "AES-128-CBC", + cipherFunc: aes.NewCipher, + keySize: 16, + blockSize: aes.BlockSize, + identifier: oidAES128CBC, +}, { + cipher: x509.PEMCipherAES192, + name: "AES-192-CBC", + cipherFunc: aes.NewCipher, + keySize: 24, + blockSize: aes.BlockSize, + identifier: oidAES192CBC, +}, { + cipher: x509.PEMCipherAES256, + name: "AES-256-CBC", + cipherFunc: aes.NewCipher, + keySize: 32, + blockSize: aes.BlockSize, + identifier: oidAES256CBC, +}, +} + +func cipherByKey(key x509.PEMCipher) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.cipher == key { + return alg + } + } + return nil +} + +// deriveKey uses a key derivation function to stretch the password into a key +// with the number of bits our cipher requires. This algorithm was derived from +// the OpenSSL source. +func (c rfc1423Algo) deriveKey(password, salt []byte, h func() hash.Hash) []byte { + return pbkdf2.Key(password, salt, PBKDF2Iterations, c.keySize, h) +} + +// DecryptPEMBlock takes a password encrypted PEM block and the password used +// to encrypt it and returns a slice of decrypted DER encoded bytes. +// +// If the PEM blocks has the Proc-Type header set to "4,ENCRYPTED" it uses +// x509.DecryptPEMBlock to decrypt the block. If not it tries to decrypt the +// block using AES-128-CBC, AES-192-CBC, AES-256-CBC, DES, or 3DES using the +// key derived using PBKDF2 over the given password. +func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return x509.DecryptPEMBlock(block, password) + } + + // PKCS#8 header defined in RFC7468 section 11 + if block.Type == "ENCRYPTED PRIVATE KEY" { + return DecryptPKCS8PrivateKey(block.Bytes, password) + } + + return nil, errors.New("unsupported encrypted PEM") +} + +// DecryptPKCS8PrivateKey takes a password encrypted private key using the +// PKCS#8 encoding and returns the decrypted data in PKCS#8 form. If an +// incorrect password is detected an x509.IncorrectPasswordError is returned. +// Because of deficiencies in the format, it's not always possible to detect an +// incorrect password. In these cases no error will be returned but the +// decrypted DER bytes will be random noise. +// +// It supports AES-128-CBC, AES-192-CBC, AES-256-CBC, DES, or 3DES encrypted +// data using the key derived with PBKDF2 over the given password. +func DecryptPKCS8PrivateKey(data, password []byte) ([]byte, error) { + var pki encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(data, &pki); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal private key") + } + + if !pki.Algo.Algorithm.Equal(oidPBES2) { + return nil, errors.New("unsupported encrypted PEM: only PBES2 is supported") + } + + if !pki.Algo.Parameters.KeyDerivationFunc.Algo.Equal(oidPKCS5PBKDF2) { + return nil, errors.New("unsupported encrypted PEM: only PBKDF2 is supported") + } + + encParam := pki.Algo.Parameters.EncryptionScheme + kdfParam := pki.Algo.Parameters.KeyDerivationFunc.PBKDF2Params + + iv := encParam.IV + salt := kdfParam.Salt + iter := kdfParam.IterationCount + + // pbkdf2 hash function + keyHash := sha1.New + if kdfParam.PrfParam.Algo.Equal(oidHMACWithSHA256) { + keyHash = sha256.New + } + + var symkey []byte + var block cipher.Block + var err error + switch { + // AES-128-CBC, AES-192-CBC, AES-256-CBC + case encParam.EncryAlgo.Equal(oidAES128CBC): + symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) + block, err = aes.NewCipher(symkey) + case encParam.EncryAlgo.Equal(oidAES192CBC): + symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) + block, err = aes.NewCipher(symkey) + case encParam.EncryAlgo.Equal(oidAES256CBC): + symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) + block, err = aes.NewCipher(symkey) + // DES, TripleDES + case encParam.EncryAlgo.Equal(oidDESCBC): + symkey = pbkdf2.Key(password, salt, iter, 8, keyHash) + block, err = des.NewCipher(symkey) //nolint:gosec // support for legacy keys + case encParam.EncryAlgo.Equal(oidD3DESCBC): + symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) + block, err = des.NewTripleDESCipher(symkey) //nolint:gosec // support for legacy keys + default: + return nil, errors.Errorf("unsupported encrypted PEM: unknown algorithm %v", encParam.EncryAlgo) + } + if err != nil { + return nil, err + } + + data = pki.PrivateKey + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(data, data) + + // Blocks are padded using a scheme where the last n bytes of padding are all + // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. + // For example: + // [x y z 2 2] + // [x y 7 7 7 7 7 7 7] + // If we detect a bad padding, we assume it is an invalid password. + blockSize := block.BlockSize() + dlen := len(data) + if dlen == 0 || dlen%blockSize != 0 { + return nil, errors.New("error decrypting PEM: invalid padding") + } + + last := int(data[dlen-1]) + if dlen < last { + return nil, x509.IncorrectPasswordError + } + if last == 0 || last > blockSize { + return nil, x509.IncorrectPasswordError + } + for _, val := range data[dlen-last:] { + if int(val) != last { + return nil, x509.IncorrectPasswordError + } + } + + return data[:dlen-last], nil +} + +// EncryptPKCS8PrivateKey returns a PEM block holding the given PKCS#8 encroded +// private key, encrypted with the specified algorithm and a PBKDF2 derived key +// from the given password. +func EncryptPKCS8PrivateKey(rand io.Reader, data, password []byte, alg x509.PEMCipher) (*pem.Block, error) { + ciph := cipherByKey(alg) + if ciph == nil { + return nil, errors.Errorf("failed to encrypt PEM: unknown algorithm %v", alg) + } + + salt := make([]byte, PBKDF2SaltSize) + if _, err := io.ReadFull(rand, salt); err != nil { + return nil, errors.Wrap(err, "failed to generate salt") + } + iv := make([]byte, ciph.blockSize) + if _, err := io.ReadFull(rand, iv); err != nil { + return nil, errors.Wrap(err, "failed to generate IV") + } + + key := ciph.deriveKey(password, salt, sha256.New) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, errors.Wrap(err, "failed to create cipher") + } + enc := cipher.NewCBCEncrypter(block, iv) + pad := ciph.blockSize - len(data)%ciph.blockSize + encrypted := make([]byte, len(data), len(data)+pad) + // We could save this copy by encrypting all the whole blocks in + // the data separately, but it doesn't seem worth the additional + // code. + copy(encrypted, data) + // See RFC 1423, section 1.1 + for i := 0; i < pad; i++ { + encrypted = append(encrypted, byte(pad)) + } + enc.CryptBlocks(encrypted, encrypted) + + // Build encrypted asn1 data + pki := encryptedPrivateKeyInfo{ + Algo: encryptedlAlgorithmIdentifier{ + Algorithm: oidPBES2, + Parameters: pbes2Params{ + KeyDerivationFunc: pbkdf2Algorithms{ + Algo: oidPKCS5PBKDF2, + PBKDF2Params: pbkdf2Params{ + Salt: salt, + IterationCount: PBKDF2Iterations, + PrfParam: prfParam{ + Algo: oidHMACWithSHA256, + NullParam: asn1.NullRawValue, + }, + }, + }, + EncryptionScheme: pbkdf2Encs{ + EncryAlgo: ciph.identifier, + IV: iv, + }, + }, + }, + PrivateKey: encrypted, + } + + b, err := asn1.Marshal(pki) + if err != nil { + return nil, errors.Wrap(err, "error marshaling encrypted key") + } + return &pem.Block{ + Type: "ENCRYPTED PRIVATE KEY", + Bytes: b, + }, nil +} diff --git a/vendor/go.step.sm/crypto/pemutil/ssh.go b/vendor/go.step.sm/crypto/pemutil/ssh.go new file mode 100644 index 0000000000..00698dae19 --- /dev/null +++ b/vendor/go.step.sm/crypto/pemutil/ssh.go @@ -0,0 +1,299 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pemutil + +import ( + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "encoding/binary" + "encoding/pem" + "math/big" + + "github.com/pkg/errors" + bcryptpbkdf "go.step.sm/crypto/internal/bcrypt_pbkdf" + "go.step.sm/crypto/randutil" + "golang.org/x/crypto/ssh" +) + +const ( + sshMagic = "openssh-key-v1\x00" + sshDefaultKdf = "bcrypt" + sshDefaultCiphername = "aes256-ctr" + sshDefaultKeyLength = 32 + sshDefaultSaltLength = 16 + sshDefaultRounds = 16 +) + +type openSSHPrivateKey struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte +} + +type openSSHPrivateKeyBlock struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` +} + +// ParseOpenSSHPrivateKey parses a private key in OpenSSH PEM format. +// +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +// +// This method is based on the implementation at +// https://github.com/golang/crypto/blob/master/ssh/keys.go +func ParseOpenSSHPrivateKey(pemBytes []byte, opts ...Options) (crypto.PrivateKey, error) { + // Populate options + ctx := newContext("PEM") + if err := ctx.apply(opts); err != nil { + return nil, err + } + + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.Errorf("error decoding %s: not a valid PEM encoded block", ctx.filename) + } + + if len(block.Bytes) < len(sshMagic) || string(block.Bytes[:len(sshMagic)]) != sshMagic { + return nil, errors.New("invalid openssh private key format") + } + remaining := block.Bytes[len(sshMagic):] + + var w openSSHPrivateKey + if err := ssh.Unmarshal(remaining, &w); err != nil { + return nil, errors.Wrap(err, "error unmarshaling private key") + } + + var err error + var key crypto.PrivateKey + if w.KdfName != "none" || w.CipherName != "none" { + password, err := ctx.promptPassword() + if err != nil { + return nil, err + } + key, err = ssh.ParseRawPrivateKeyWithPassphrase(pemBytes, password) + if err != nil { + return nil, errors.Wrap(err, "error parsing private key") + } + } else { + key, err = ssh.ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, errors.Wrap(err, "error parsing private key") + } + } + + // Convert *ed25519.PrivateKey to ed25519.PrivateKey: + switch k := key.(type) { + case *ed25519.PrivateKey: + return *k, nil + default: + return k, nil + } +} + +// SerializeOpenSSHPrivateKey serialize a private key in the OpenSSH PEM format. +func SerializeOpenSSHPrivateKey(key crypto.PrivateKey, opts ...Options) (*pem.Block, error) { + ctx := new(context) + if err := ctx.apply(opts); err != nil { + return nil, err + } + + // Random check bytes. + var check uint32 + if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { + return nil, errors.Wrap(err, "error generating random check ") + } + + w := openSSHPrivateKey{ + NumKeys: 1, + } + pk1 := openSSHPrivateKeyBlock{ + Check1: check, + Check2: check, + } + + password, err := ctx.promptEncryptPassword() + if err != nil { + return nil, err + } + + var blockSize int + if password == nil { + w.CipherName = "none" + w.KdfName = "none" + blockSize = 8 + } else { + w.CipherName = sshDefaultCiphername + w.KdfName = sshDefaultKdf + blockSize = aes.BlockSize + } + + switch k := key.(type) { + case *rsa.PrivateKey: + e := new(big.Int).SetInt64(int64(k.PublicKey.E)) + // Marshal public key: + // E and N are in reversed order in the public and private key. + pubKey := struct { + KeyType string + E *big.Int + N *big.Int + }{ + ssh.KeyAlgoRSA, + e, k.PublicKey.N, + } + w.PubKey = ssh.Marshal(pubKey) + + // Marshal private key. + key := struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + }{ + k.PublicKey.N, e, + k.D, k.Precomputed.Qinv, k.Primes[0], k.Primes[1], + ctx.comment, + } + pk1.Keytype = ssh.KeyAlgoRSA + pk1.Rest = ssh.Marshal(key) + case *ecdsa.PrivateKey: + var curve, keyType string + switch k.Curve.Params().Name { + case "P-256": + curve = "nistp256" + keyType = ssh.KeyAlgoECDSA256 + case "P-384": + curve = "nistp384" + keyType = ssh.KeyAlgoECDSA384 + case "P-521": + curve = "nistp521" + keyType = ssh.KeyAlgoECDSA521 + default: + return nil, errors.Errorf("error serializing key: unsupported curve %s", k.Curve.Params().Name) + } + + p, err := k.PublicKey.ECDH() + if err != nil { + return nil, errors.Wrapf(err, "failed converting *ecdsa.PublicKey to *ecdh.PublicKey") + } + + // Marshal public key. + pubKey := struct { + KeyType string + Curve string + Pub []byte + }{ + keyType, curve, p.Bytes(), + } + w.PubKey = ssh.Marshal(pubKey) + + // Marshal private key. + key := struct { + Curve string + Pub []byte + D *big.Int + Comment string + }{ + curve, p.Bytes(), k.D, + ctx.comment, + } + pk1.Keytype = keyType + pk1.Rest = ssh.Marshal(key) + case ed25519.PrivateKey: + pub := make([]byte, ed25519.PublicKeySize) + priv := make([]byte, ed25519.PrivateKeySize) + copy(pub, k[ed25519.PublicKeySize:]) + copy(priv, k) + + // Marshal public key. + pubKey := struct { + KeyType string + Pub []byte + }{ + ssh.KeyAlgoED25519, pub, + } + w.PubKey = ssh.Marshal(pubKey) + + // Marshal private key. + key := struct { + Pub []byte + Priv []byte + Comment string + }{ + pub, priv, + ctx.comment, + } + pk1.Keytype = ssh.KeyAlgoED25519 + pk1.Rest = ssh.Marshal(key) + default: + return nil, errors.Errorf("unsupported key type %T", k) + } + + w.PrivKeyBlock = ssh.Marshal(pk1) + + // Add padding until the private key block matches the block size, + // 16 with AES encryption, 8 without. + for i, l := 0, len(w.PrivKeyBlock); (l+i)%blockSize != 0; i++ { + w.PrivKeyBlock = append(w.PrivKeyBlock, byte(i+1)) + } + + if password != nil { + // Create encryption key derivation the password. + salt, err := randutil.Salt(sshDefaultSaltLength) + if err != nil { + return nil, err + } + kdfOpts := struct { + Salt []byte + Rounds uint32 + }{salt, sshDefaultRounds} + w.KdfOpts = string(ssh.Marshal(kdfOpts)) + + // Derive key to encrypt the private key block. + k, err := bcryptpbkdf.Key(password, salt, sshDefaultRounds, sshDefaultKeyLength+aes.BlockSize) + if err != nil { + return nil, errors.Wrap(err, "error deriving decryption key") + } + + // Encrypt the private key using the derived secret. + dst := make([]byte, len(w.PrivKeyBlock)) + iv := k[sshDefaultKeyLength : sshDefaultKeyLength+aes.BlockSize] + block, err := aes.NewCipher(k[:sshDefaultKeyLength]) + if err != nil { + return nil, errors.Wrap(err, "error creating cipher") + } + + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(dst, w.PrivKeyBlock) + w.PrivKeyBlock = dst + } + + b := ssh.Marshal(w) + block := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Bytes: append([]byte(sshMagic), b...), + } + + if ctx.filename != "" { + if err := WriteFile(ctx.filename, pem.EncodeToMemory(block), ctx.perm); err != nil { + return nil, err + } + } + + return block, nil +} diff --git a/vendor/go.step.sm/crypto/randutil/random.go b/vendor/go.step.sm/crypto/randutil/random.go new file mode 100644 index 0000000000..dce7931b18 --- /dev/null +++ b/vendor/go.step.sm/crypto/randutil/random.go @@ -0,0 +1,113 @@ +// Package randutil provides methods to generate random strings and salts. +package randutil + +import ( + "crypto/rand" + "encoding/hex" + "io" + "math/big" + + "github.com/pkg/errors" +) + +var ascii string + +func init() { + // initialize the charcters in ascii + aciiBytes := make([]byte, 94) + for i := range aciiBytes { + aciiBytes[i] = byte(i + 33) + } + ascii = string(aciiBytes) +} + +// Salt generates a new random salt of the given size. +func Salt(size int) ([]byte, error) { + salt := make([]byte, size) + _, err := io.ReadFull(rand.Reader, salt) + if err != nil { + return nil, errors.Wrap(err, "error generating salt") + } + return salt, nil +} + +// Bytes generates a new byte slice of the given size. +func Bytes(size int) ([]byte, error) { + bytes := make([]byte, size) + _, err := io.ReadFull(rand.Reader, bytes) + if err != nil { + return nil, errors.Wrap(err, "error generating bytes") + } + return bytes, nil +} + +// String returns a random string of a given length using the characters in +// the given string. It splits the string on runes to support UTF-8 +// characters. +func String(length int, chars string) (string, error) { + result := make([]rune, length) + runes := []rune(chars) + x := int64(len(runes)) + for i := range result { + num, err := rand.Int(rand.Reader, big.NewInt(x)) + if err != nil { + return "", errors.Wrap(err, "error creating random number") + } + result[i] = runes[num.Int64()] + } + return string(result), nil +} + +// Hex returns a random string of the given length using the hexadecimal +// characters in lower case (0-9+a-f). +func Hex(length int) (string, error) { + return String(length, "0123456789abcdef") +} + +// Alphanumeric returns a random string of the given length using the 62 +// alphanumeric characters in the POSIX/C locale (a-z+A-Z+0-9). +func Alphanumeric(length int) (string, error) { + return String(length, "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") +} + +// ASCII returns a securely generated random ASCII string. It reads random +// numbers from crypto/rand and searches for printable characters. It will +// return an error if the system's secure random number generator fails to +// function correctly, in which case the caller must not continue. +func ASCII(length int) (string, error) { + return String(length, ascii) +} + +// Alphabet returns a random string of the given length using the 52 +// alphabetic characters in the POSIX/C locale (a-z+A-Z). +func Alphabet(length int) (string, error) { + return String(length, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") +} + +// UUIDv4 returns the string representation of a UUID version 4. Because 6 bits +// are used to indicate the version 4 and the variant 10, the randomly generated +// part has 122 bits. +func UUIDv4() (string, error) { + var uuid [16]byte + _, err := io.ReadFull(rand.Reader, uuid[:]) + if err != nil { + return "", errors.Wrap(err, "error generating uuid") + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return encodeUUID(uuid), nil +} + +func encodeUUID(uuid [16]byte) string { + buf := make([]byte, 36) + hex.Encode(buf, uuid[:4]) + buf[8] = '-' + hex.Encode(buf[9:13], uuid[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], uuid[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], uuid[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], uuid[10:]) + return string(buf) +} diff --git a/vendor/go.step.sm/crypto/x25519/x25519.go b/vendor/go.step.sm/crypto/x25519/x25519.go new file mode 100644 index 0000000000..c6d239bfe2 --- /dev/null +++ b/vendor/go.step.sm/crypto/x25519/x25519.go @@ -0,0 +1,321 @@ +package x25519 + +import ( + "bytes" + "crypto" + "crypto/ed25519" + "crypto/sha512" + "crypto/subtle" + "errors" + "io" + "strconv" + + "filippo.io/edwards25519" + "filippo.io/edwards25519/field" + "golang.org/x/crypto/curve25519" +) + +const ( + // PrivateKeySize is the size in bytes of a X25519 private key. + PrivateKeySize = 32 + + // PublicKeySize is the size in bytes of a X25519 public key. + PublicKeySize = 32 + + SignatureSize = 64 +) + +var one = (&field.Element{}).One() + +// PrivateKey is the type used to represent a X25519 private key. +type PrivateKey []byte + +// PublicKey is the type used to represent a X25519 public key. +type PublicKey []byte + +// GenerateKey generates a public/private key pair using entropy from rand. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + priv := make([]byte, PrivateKeySize) + if _, err := io.ReadFull(rand, priv); err != nil { + return nil, nil, err + } + + pub, err := curve25519.X25519(priv, curve25519.Basepoint) + if err != nil { + return nil, nil, err + } + + return pub, priv, err +} + +// ToEd25519 converts the public key p into a ed25519 key. +// +// (x, y) = (sqrt(-486664)*u/v, (u-1)/(u+1)) +func (p PublicKey) ToEd25519() (ed25519.PublicKey, error) { + a, err := convertMont(p) + if err != nil { + return nil, err + } + return a.Bytes(), nil +} + +// Equal reports whether p and x have the same value. +func (p PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + if !ok { + return false + } + return bytes.Equal(p, xx) +} + +// Public returns the public key using scalar multiplication (scalar * point) +// using the Curve25519 basepoint. It will return nil if the private key is not +// a valid one. +func (p PrivateKey) Public() crypto.PublicKey { + pub, _ := p.PublicKey() + return pub +} + +// Equal reports whether p and x have the same value. +func (p PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + if !ok { + return false + } + return bytes.Equal(p, xx) +} + +// Public returns the public key using scalar multiplication (scalar * point) +// using the Curve25519 basepoint. +func (p PrivateKey) PublicKey() (PublicKey, error) { + pub, err := curve25519.X25519(p, curve25519.Basepoint) + if err != nil { + return nil, err + } + return pub, nil +} + +// SharedKey returns the result of the scalar multiplication (scalar * point), +// using the PrivateKey as the scalar value and the given key as the point. Both +// scalar and point must be slices of 32 bytes. +func (p PrivateKey) SharedKey(peerPublicKey []byte) ([]byte, error) { + sharedKey, err := curve25519.X25519(p, peerPublicKey) + if err != nil { + return nil, err + } + return sharedKey, nil +} + +// Sign signs the given message with the private key p and returns a signature. +// +// It implements the XEdDSA sign method defined in +// https://signal.org/docs/specifications/xeddsa/#xeddsa +// +// XEdDSA performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to indicate +// the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (p PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("x25519: cannot sign hashed message") + } + + return Sign(rand, p, message) +} + +// Sign signs the message with privateKey and returns a signature. It will panic +// if len(privateKey) is not PrivateKeySize. +// +// It implements the XEdDSA sign method defined in +// https://signal.org/docs/specifications/xeddsa/#xeddsa +// +// xeddsa_sign(k, M, Z): +// A, a = calculate_key_pair(k) +// r = hash1(a || M || Z) (mod q) +// R = rB +// h = hash(R || A || M) (mod q) +// s = r + ha (mod q) +// return R || s +func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) { + if l := len(p); l != PrivateKeySize { + panic("x25519: bad private key length: " + strconv.Itoa(l)) + } + + pub, priv, err := p.calculateKeyPair() + if err != nil { + return nil, err + } + + random := make([]byte, 64) + if _, err := io.ReadFull(rand, random); err != nil { + return nil, err + } + + // Using same prefix in libsignal-protocol-c implementation, but can be any + // 32 byte prefix. Golang's ed25519 implementation uses: + // + // ph := sha512.Sum512(a.Bytes()) + // prefix := ph[32:] + prefix := [32]byte{ + 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } + + rh := sha512.New() + rh.Write(prefix[:]) + rh.Write(priv.Bytes()) + rh.Write(message) + rh.Write(random) + rDigest := make([]byte, 0, sha512.Size) + rDigest = rh.Sum(rDigest) + + r, err := edwards25519.NewScalar().SetUniformBytes(rDigest) + if err != nil { + return nil, err + } + + R := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs + + hh := sha512.New() + hh.Write(R.Bytes()) + hh.Write(pub) + hh.Write(message) + hDigest := make([]byte, 0, sha512.Size) + hDigest = hh.Sum(hDigest) + h, err := edwards25519.NewScalar().SetUniformBytes(hDigest) + if err != nil { + return nil, err + } + + s := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv)) + + sig := make([]byte, 64) + copy(sig[:32], R.Bytes()) + copy(sig[32:], s.Bytes()) + return sig, nil +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +// +// It implements the XEdDSA verify method defined in +// https://signal.org/docs/specifications/xeddsa/#xeddsa +// +// xeddsa_verify(u, M, (R || s)): +// if u >= p or R.y >= 2|p| or s >= 2|q|: +// return false +// A = convert_mont(u) +// if not on_curve(A): +// return false +// h = hash(R || A || M) (mod q) +// Rcheck = sB - hA +// if bytes_equal(R, Rcheck): +// return true +// return false +func Verify(publicKey PublicKey, message, sig []byte) bool { + // The following code should be equivalent to: + // + // pub, err := publicKey.ToEd25519() + // if err != nil { + // return false + // } + // return ed25519.Verify(pub, message, sig) + + if l := len(publicKey); l != PublicKeySize { + panic("x25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&0xE0 != 0 { + return false + } + + a, err := convertMont(publicKey) + + if err != nil { + return false + } + + hh := sha512.New() + hh.Write(sig[:32]) + hh.Write(a.Bytes()) + hh.Write(message) + hDigest := make([]byte, 0, sha512.Size) + hDigest = hh.Sum(hDigest) + h, err := edwards25519.NewScalar().SetUniformBytes(hDigest) + if err != nil { + return false + } + + s, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:]) + if err != nil { + return false + } + + minusA := (&edwards25519.Point{}).Negate(a) + r := (&edwards25519.Point{}).VarTimeDoubleScalarBaseMult(h, minusA, s) + return subtle.ConstantTimeCompare(sig[:32], r.Bytes()) == 1 +} + +// calculateKeyPair converts a Montgomery private key k to a twisted Edwards +// public key and private key (A, a) as defined in +// https://signal.org/docs/specifications/xeddsa/#elliptic-curve-conversions +// +// calculate_key_pair(k): +// E = kB +// A.y = E.y +// A.s = 0 +// if E.s == 1: +// a = -k (mod q) +// else: +// a = k (mod q) +// return A, a +func (p PrivateKey) calculateKeyPair() ([]byte, *edwards25519.Scalar, error) { + var pA edwards25519.Point + var sa edwards25519.Scalar + + k, err := (&edwards25519.Scalar{}).SetBytesWithClamping(p) + if err != nil { + return nil, nil, err + } + + pub := pA.ScalarBaseMult(k).Bytes() + signBit := (pub[31] & 0x80) >> 7 + + if signBit == 1 { + sa.Negate(k) + // Set sig bit to 0 + pub[31] &= 0x7F + } else { + sa.Set(k) + } + + return pub, &sa, nil +} + +// convertMont converts from a Montgomery u-coordinate to a twisted Edwards +// point P, according to +// https://signal.org/docs/specifications/xeddsa/#elliptic-curve-conversions +// +// convert_mont(u): +// umasked = u (mod 2|p|) +// P.y = u_to_y(umasked) +// P.s = 0 +// return P +func convertMont(u PublicKey) (*edwards25519.Point, error) { + um, err := (&field.Element{}).SetBytes(u) + if err != nil { + return nil, err + } + + // y = (u - 1)/(u + 1) + a := new(field.Element).Subtract(um, one) + b := new(field.Element).Add(um, one) + y := new(field.Element).Multiply(a, b.Invert(b)).Bytes() + + // Set sign to 0 + y[31] &= 0x7F + + return (&edwards25519.Point{}).SetBytes(y) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 212538cb5a..5ef1aad9f0 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -26,7 +26,11 @@ type pbeCipher interface { create(key []byte) (cipher.Block, error) // deriveKey returns a key derived from the given password and salt. deriveKey(salt, password []byte, iterations int) []byte +<<<<<<< HEAD // deriveIV returns an IV derived from the given password and salt. +======= + // deriveKey returns an IV derived from the given password and salt. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) deriveIV(salt, password []byte, iterations int) []byte } diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index fef687db0e..56cdc7c21c 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -80,7 +80,6 @@ type handshakeTransport struct { pendingPackets [][]byte // Used when a key exchange is in progress. writePacketsLeft uint32 writeBytesLeft int64 - userAuthComplete bool // whether the user authentication phase is complete // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -553,25 +552,16 @@ func (t *handshakeTransport) sendKexInit() error { return nil } -var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") - func (t *handshakeTransport) writePacket(p []byte) error { - t.mu.Lock() - defer t.mu.Unlock() - switch p[0] { case msgKexInit: return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: return errors.New("ssh: only handshakeTransport can send newKeys") - case msgUserAuthBanner: - if t.userAuthComplete { - return errSendBannerPhase - } - case msgUserAuthSuccess: - t.userAuthComplete = true } + t.mu.Lock() + defer t.mu.Unlock() if t.writeError != nil { return t.writeError } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 1839ddc6a4..5b5ccd96f4 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -59,27 +59,6 @@ type GSSAPIWithMICConfig struct { Server GSSAPIServer } -// SendAuthBanner implements [ServerPreAuthConn]. -func (s *connection) SendAuthBanner(msg string) error { - return s.transport.writePacket(Marshal(&userAuthBannerMsg{ - Message: msg, - })) -} - -func (*connection) unexportedMethodForFutureProofing() {} - -// ServerPreAuthConn is the interface available on an incoming server -// connection before authentication has completed. -type ServerPreAuthConn interface { - unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB - - ConnMetadata - - // SendAuthBanner sends a banner message to the client. - // It returns an error once the authentication phase has ended. - SendAuthBanner(string) error -} - // ServerConfig holds server specific configuration data. type ServerConfig struct { // Config contains configuration shared between client and server. @@ -139,12 +118,6 @@ type ServerConfig struct { // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) - // PreAuthConnCallback, if non-nil, is called upon receiving a new connection - // before any authentication has started. The provided ServerPreAuthConn - // can be used at any time before authentication is complete, including - // after this callback has returned. - PreAuthConnCallback func(ServerPreAuthConn) - // ServerVersion is the version identification string to announce in // the public handshake. // If empty, a reasonable default is used. @@ -515,10 +488,6 @@ func (b *BannerError) Error() string { } func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - if config.PreAuthConnCallback != nil { - config.PreAuthConnCallback(s) - } - sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions @@ -526,7 +495,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err authFailures := 0 noneAuthCount := 0 var authErrs []error - var calledBannerCallback bool + var displayedBanner bool partialSuccessReturned := false // Set the initial authentication callbacks from the config. They can be // changed if a PartialSuccessError is returned. @@ -573,10 +542,14 @@ userAuthLoop: s.user = userAuthReq.User - if !calledBannerCallback && config.BannerCallback != nil { - calledBannerCallback = true - if msg := config.BannerCallback(s); msg != "" { - if err := s.SendAuthBanner(msg); err != nil { + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { return nil, err } } @@ -789,7 +762,10 @@ userAuthLoop: var bannerErr *BannerError if errors.As(authErr, &bannerErr) { if bannerErr.Message != "" { - if err := s.SendAuthBanner(bannerErr.Message); err != nil { + bannerMsg := &userAuthBannerMsg{ + Message: bannerErr.Message, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { return nil, err } } diff --git a/vendor/golang.org/x/mod/sumdb/dirhash/hash.go b/vendor/golang.org/x/mod/sumdb/dirhash/hash.go new file mode 100644 index 0000000000..51ec4db873 --- /dev/null +++ b/vendor/golang.org/x/mod/sumdb/dirhash/hash.go @@ -0,0 +1,135 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dirhash defines hashes over directory trees. +// These hashes are recorded in go.sum files and in the Go checksum database, +// to allow verifying that a newly-downloaded module has the expected content. +package dirhash + +import ( + "archive/zip" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" +) + +// DefaultHash is the default hash function used in new go.sum entries. +var DefaultHash Hash = Hash1 + +// A Hash is a directory hash function. +// It accepts a list of files along with a function that opens the content of each file. +// It opens, reads, hashes, and closes each file and returns the overall directory hash. +type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error) + +// Hash1 is the "h1:" directory hash function, using SHA-256. +// +// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary +// prepared as if by the Unix command: +// +// sha256sum $(find . -type f | sort) | sha256sum +// +// More precisely, the hashed summary contains a single line for each file in the list, +// ordered by sort.Strings applied to the file names, where each line consists of +// the hexadecimal SHA-256 hash of the file content, +// two spaces (U+0020), the file name, and a newline (U+000A). +// +// File names with newlines (U+000A) are disallowed. +func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) { + h := sha256.New() + files = append([]string(nil), files...) + sort.Strings(files) + for _, file := range files { + if strings.Contains(file, "\n") { + return "", errors.New("dirhash: filenames with newlines are not supported") + } + r, err := open(file) + if err != nil { + return "", err + } + hf := sha256.New() + _, err = io.Copy(hf, r) + r.Close() + if err != nil { + return "", err + } + fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file) + } + return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil +} + +// HashDir returns the hash of the local file system directory dir, +// replacing the directory name itself with prefix in the file names +// used in the hash function. +func HashDir(dir, prefix string, hash Hash) (string, error) { + files, err := DirFiles(dir, prefix) + if err != nil { + return "", err + } + osOpen := func(name string) (io.ReadCloser, error) { + return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix))) + } + return hash(files, osOpen) +} + +// DirFiles returns the list of files in the tree rooted at dir, +// replacing the directory name dir with prefix in each name. +// The resulting names always use forward slashes. +func DirFiles(dir, prefix string) ([]string, error) { + var files []string + dir = filepath.Clean(dir) + err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } else if file == dir { + return fmt.Errorf("%s is not a directory", dir) + } + + rel := file + if dir != "." { + rel = file[len(dir)+1:] + } + f := filepath.Join(prefix, rel) + files = append(files, filepath.ToSlash(f)) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} + +// HashZip returns the hash of the file content in the named zip file. +// Only the file names and their contents are included in the hash: +// the exact zip file format encoding, compression method, +// per-file modification times, and other metadata are ignored. +func HashZip(zipfile string, hash Hash) (string, error) { + z, err := zip.OpenReader(zipfile) + if err != nil { + return "", err + } + defer z.Close() + var files []string + zfiles := make(map[string]*zip.File) + for _, file := range z.File { + files = append(files, file.Name) + zfiles[file.Name] = file + } + zipOpen := func(name string) (io.ReadCloser, error) { + f := zfiles[name] + if f == nil { + return nil, fmt.Errorf("file %q not found in zip", name) // should never happen + } + return f.Open() + } + return hash(files, zipOpen) +} diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index ca645d9a1a..7ea6410a2f 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,11 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } +<<<<<<< HEAD // configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 +======= +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index 5b516c55ff..49c0ceb03f 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,11 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } +<<<<<<< HEAD // fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. +======= +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b2e2ed3373..02c51ae677 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -375,7 +375,10 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool +<<<<<<< HEAD closedOnIdle bool // true if conn was closed for idleness +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1090,12 +1093,19 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). +<<<<<<< HEAD // If the conn was closed for idleness, we're racing the idle timer; // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { +======= + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) st.canTakeNewRequest = true } @@ -1158,7 +1168,10 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true +<<<<<<< HEAD cc.closedOnIdle = true +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -2438,12 +2451,18 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. +<<<<<<< HEAD unusedWaitTime := 5 * time.Second if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } idleTime := cc.t.now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { +======= + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 0260935bab..05d5431813 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -251,12 +251,15 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh // token JSON), or the JSON configuration file for workload identity federation in non-Google cloud // platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential JSON/File/Stream) from an // external source for authentication to Google Cloud Platform, you must validate it before // providing it to any Google API or library. Providing an unvalidated credential configuration to // Google APIs can compromise the security of your systems and data. For more information, refer to // [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -300,12 +303,15 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params } // CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential JSON/File/Stream) from an // external source for authentication to Google Cloud Platform, you must validate it before // providing it to any Google API or library. Providing an unvalidated credential configuration to // Google APIs can compromise the security of your systems and data. For more information, refer to // [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { var params CredentialsParams params.Scopes = scopes diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go index 55d59999e0..178eb8ec88 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -28,7 +28,11 @@ import ( // AwsSecurityCredentials models AWS security credentials. type AwsSecurityCredentials struct { +<<<<<<< HEAD // AccessKeyID is the AWS Access Key ID - Required. +======= + // AccessKeyId is the AWS Access Key ID - Required. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AccessKeyID string `json:"AccessKeyID"` // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go index fc106347d8..9ff57b5f08 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -278,6 +278,7 @@ type Format struct { type CredentialSource struct { // File is the location for file sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -286,10 +287,13 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) File string `json:"file"` // Url is the URL to call for URL sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -298,12 +302,15 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) URL string `json:"url"` // Headers are the headers to attach to the request for URL sourced credentials. Headers map[string]string `json:"headers"` // Executable is the configuration object for executable sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -312,10 +319,13 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Executable *ExecutableConfig `json:"executable"` // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -324,6 +334,8 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EnvironmentID string `json:"environment_id"` // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. RegionURL string `json:"region_url"` @@ -361,7 +373,11 @@ type SubjectTokenSupplier interface { type AwsSecurityCredentialsSupplier interface { // AwsRegion should return the AWS region or an error. AwsRegion(ctx context.Context, options SupplierOptions) (string, error) +<<<<<<< HEAD // AwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. +======= + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The external account token source does not cache the returned security credentials, so caching // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 74f052aa9f..9e8fc49eda 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,11 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string +<<<<<<< HEAD // Scopes specifies optional requested permissions. +======= + // Scope specifies optional requested permissions. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b8322598ae..948a3ee63d 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -118,7 +118,6 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. -// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 9c105f23af..02609d5b21 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,9 +72,6 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 - HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add - HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions - HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 1e642f3304..600a680786 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,9 +53,6 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, - {Name: "avxifma", Feature: &X86.HasAVXIFMA}, - {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, - {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -109,7 +106,7 @@ func archInit() { return } - eax7, ebx7, ecx7, edx7 := cpuid(7, 0) + _, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -137,24 +134,14 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) - - // These features depend on the second level of extended features. - if eax7 >= 1 { - eax71, _, _, edx71 := cpuid(7, 1) - if X86.HasAVX512 { - X86.HasAVX512BF16 = isSet(5, eax71) - } - if X86.HasAVX { - X86.HasAVXIFMA = isSet(23, eax71) - X86.HasAVXVNNI = isSet(4, eax71) - X86.HasAVXVNNIInt8 = isSet(4, edx71) - } - } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go deleted file mode 100644 index 37a82528f5..0000000000 --- a/vendor/golang.org/x/sys/unix/auxv.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) - -package unix - -import ( - "syscall" - "unsafe" -) - -//go:linkname runtime_getAuxv runtime.getAuxv -func runtime_getAuxv() []uintptr - -// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. -// The returned slice is always a fresh copy, owned by the caller. -// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, -// which happens in some locked-down environments and build modes. -func Auxv() ([][2]uintptr, error) { - vec := runtime_getAuxv() - vecLen := len(vec) - - if vecLen == 0 { - return nil, syscall.ENOENT - } - - if vecLen%2 != 0 { - return nil, syscall.EINVAL - } - - result := make([]uintptr, vecLen) - copy(result, vec) - return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil -} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go deleted file mode 100644 index 1200487f2e..0000000000 --- a/vendor/golang.org/x/sys/unix/auxv_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) - -package unix - -import "syscall" - -func Auxv() ([][2]uintptr, error) { - return nil, syscall.ENOTSUP -} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc3955477..21974af064 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,90 +1102,3 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } - -// Ucred Helpers -// See ucred(3c) and getpeerucred(3c) - -//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) -//sys ucredFree(ucred uintptr) = ucred_free -//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get -//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid -//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid -//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid -//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid -//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid -//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid -//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid - -// Ucred is an opaque struct that holds user credentials. -type Ucred struct { - ucred uintptr -} - -// We need to ensure that ucredFree is called on the underlying ucred -// when the Ucred is garbage collected. -func ucredFinalizer(u *Ucred) { - ucredFree(u.ucred) -} - -func GetPeerUcred(fd uintptr) (*Ucred, error) { - var ucred uintptr - err := getpeerucred(fd, &ucred) - if err != nil { - return nil, err - } - result := &Ucred{ - ucred: ucred, - } - // set the finalizer on the result so that the ucred will be freed - runtime.SetFinalizer(result, ucredFinalizer) - return result, nil -} - -func UcredGet(pid int) (*Ucred, error) { - ucred, err := ucredGet(pid) - if err != nil { - return nil, err - } - result := &Ucred{ - ucred: ucred, - } - // set the finalizer on the result so that the ucred will be freed - runtime.SetFinalizer(result, ucredFinalizer) - return result, nil -} - -func (u *Ucred) Geteuid() int { - defer runtime.KeepAlive(u) - return ucredGeteuid(u.ucred) -} - -func (u *Ucred) Getruid() int { - defer runtime.KeepAlive(u) - return ucredGetruid(u.ucred) -} - -func (u *Ucred) Getsuid() int { - defer runtime.KeepAlive(u) - return ucredGetsuid(u.ucred) -} - -func (u *Ucred) Getegid() int { - defer runtime.KeepAlive(u) - return ucredGetegid(u.ucred) -} - -func (u *Ucred) Getrgid() int { - defer runtime.KeepAlive(u) - return ucredGetrgid(u.ucred) -} - -func (u *Ucred) Getsgid() int { - defer runtime.KeepAlive(u) - return ucredGetsgid(u.ucred) -} - -func (u *Ucred) Getpid() int { - defer runtime.KeepAlive(u) - return ucredGetpid(u.ucred) -} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4f432bfe8f..6ebc48b3fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,7 +1245,6 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 - FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1331,10 +1330,8 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 - F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 - F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1554,7 +1551,6 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 - IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1627,8 +1623,6 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a - IPV6_VERSION = 0x60 - IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1873,7 +1867,6 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 - MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1974,7 +1967,6 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 - MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2091,7 +2083,6 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 - NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2172,7 +2163,6 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 - NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2501,7 +2491,6 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b - PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2510,7 +2499,6 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 - PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2537,8 +2525,6 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c - PR_PMLEN_MASK = 0x7f000000 - PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2606,7 +2592,6 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c - PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2617,9 +2602,6 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 - PR_SHADOW_STACK_ENABLE = 0x1 - PR_SHADOW_STACK_PUSH = 0x4 - PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2929,6 +2911,7 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 + RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2937,7 +2920,6 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 - RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 75207613c7..c0d45e3205 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,8 +116,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -306,7 +304,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c68acda535..c731d24f02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,8 +116,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -307,7 +305,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a8c607ab86..680018a4a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -312,7 +310,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 18563dd8d3..a63909f308 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,7 +109,6 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 - GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -120,8 +119,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +302,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 22912cdaa9..9b0a2573fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,8 +116,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -299,7 +297,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 29344eb37a..958e6e0645 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 20d51fb96a..50c7f25bd1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 321b60902a..ced21d66d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9bacdf1e27..226c044190 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,7 +303,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c224272615..3122737cd4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -360,7 +358,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6270c8ee13..eb5d3467ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -364,7 +362,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 9966c1941f..e921ebc60b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -364,7 +362,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 848e5fcc42..38ba81c55c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xffffff0f - IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -296,7 +294,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 669b2adb80..71f0400977 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,8 +115,6 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -368,7 +366,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 - SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 4834e57514..c44a313322 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,8 +119,6 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPV6_FLOWINFO_MASK = 0xfffffff - IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -359,7 +357,6 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 - SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c4..829b87feb8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,16 +141,6 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" -//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" -//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" -//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" -//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" -//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" -//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" -//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" -//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" -//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" -//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -290,16 +280,6 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom -//go:linkname procgetpeerucred libc_getpeerucred -//go:linkname procucred_get libc_ucred_get -//go:linkname procucred_geteuid libc_ucred_geteuid -//go:linkname procucred_getegid libc_ucred_getegid -//go:linkname procucred_getruid libc_ucred_getruid -//go:linkname procucred_getrgid libc_ucred_getrgid -//go:linkname procucred_getsuid libc_ucred_getsuid -//go:linkname procucred_getsgid libc_ucred_getsgid -//go:linkname procucred_getpid libc_ucred_getpid -//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -440,16 +420,6 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, - procgetpeerucred, - procucred_get, - procucred_geteuid, - procucred_getegid, - procucred_getruid, - procucred_getrgid, - procucred_getsuid, - procucred_getsgid, - procucred_getpid, - procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2059,90 +2029,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getpeerucred(fd uintptr, ucred *uintptr) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGet(pid int) (ucred uintptr, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) - ucred = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGeteuid(ucred uintptr) (uid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetegid(ucred uintptr) (gid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetruid(ucred uintptr) (uid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetrgid(ucred uintptr) (gid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetsuid(ucred uintptr) (uid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - uid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetsgid(ucred uintptr) (gid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - gid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredGetpid(ucred uintptr) (pid int) { - r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ucredFree(ucred uintptr) { - sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff306..524b0820cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,8 +458,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695e..f485dbf456 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,8 +381,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e5029744..70b35bf3b0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,8 +422,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51b..1893e2fe88 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,8 +325,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a18..16a4017da0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,8 +321,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336b..7e567f1eff 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,8 +442,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 - SYS_SETXATTRAT = 4463 - SYS_GETXATTRAT = 4464 - SYS_LISTXATTRAT = 4465 - SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b9962278..38ae55e5ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,8 +372,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 - SYS_SETXATTRAT = 5463 - SYS_GETXATTRAT = 5464 - SYS_LISTXATTRAT = 5465 - SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e6..55e92e60a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,8 +372,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 - SYS_SETXATTRAT = 5463 - SYS_GETXATTRAT = 5464 - SYS_LISTXATTRAT = 5465 - SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc22..60658d6a02 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,8 +442,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 - SYS_SETXATTRAT = 4463 - SYS_GETXATTRAT = 4464 - SYS_LISTXATTRAT = 4465 - SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb1..e203e8a7ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,8 +449,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b446365025..5944b97d54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,8 +421,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c188..c66d416dad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,8 +421,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 8405391698..a5459e766f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,8 +326,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d6..01d86825bb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,8 +387,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9d..7b703e77cd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,8 +400,4 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 - SYS_SETXATTRAT = 463 - SYS_GETXATTRAT = 464 - SYS_LISTXATTRAT = 465 - SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index a46abe6472..5537148dcb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14d + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x7 + NL80211_MNTR_FLAG_MAX = 0x6 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,5 +6174,3 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } - -const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 3a73084a53..0f1686bec4 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -50,7 +50,11 @@ type Analyzer struct { // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a // package that contains parse or type errors. +<<<<<<< HEAD // The [Pass.TypeErrors] field may consequently be non-empty. +======= + // The Pass.TypeErrors field may consequently be non-empty. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) RunDespiteErrors bool // Requires is a set of analyzers that must run successfully @@ -156,17 +160,23 @@ type Pass struct { // AllPackageFacts returns a new slice containing all package // facts of the analysis's FactTypes in unspecified order. +<<<<<<< HEAD // See comments for AllObjectFacts. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AllPackageFacts func() []PackageFact // AllObjectFacts returns a new slice containing all object // facts of the analysis's FactTypes in unspecified order. +<<<<<<< HEAD // // The result includes all facts exported by packages // whose symbols are referenced by the current package // (by qualified identifiers or field/method selections). // And it includes all facts exported from the current // package by the current analysis pass. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AllObjectFacts func() []ObjectFact /* Further fields may be added in future. */ diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index e7434e8fed..fdbe6671d0 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -15,6 +15,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +<<<<<<< HEAD +======= + "golang.org/x/tools/internal/versions" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const Doc = "check //go:build and // +build directives" @@ -370,6 +374,14 @@ func (check *checker) finish() { // tags reports issues in go versions in tags within the expression e. func (check *checker) tags(pos token.Pos, e constraint.Expr) { +<<<<<<< HEAD +======= + // Check that constraint.GoVersion is meaningful (>= go1.21). + if versions.ConstraintGoVersion == nil { + return + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Use Eval to visit each tag. _ = e.Eval(func(tag string) bool { if malformedGoTag(tag) { @@ -387,8 +399,15 @@ func malformedGoTag(tag string) bool { // Check for close misspellings of the "go1." prefix. for _, pre := range []string{"go.", "g1.", "go"} { suffix := strings.TrimPrefix(tag, pre) +<<<<<<< HEAD if suffix != tag && validGoVersion("go1."+suffix) { return true +======= + if suffix != tag { + if valid, ok := validTag("go1." + suffix); ok && valid { + return true + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return false @@ -396,10 +415,24 @@ func malformedGoTag(tag string) bool { // The tag starts with "go1" so it is almost certainly a GoVersion. // Report it if it is not a valid build constraint. +<<<<<<< HEAD return !validGoVersion(tag) } // validGoVersion reports when a tag is a valid go version. func validGoVersion(tag string) bool { return constraint.GoVersion(&constraint.TagExpr{Tag: tag}) != "" +======= + valid, ok := validTag(tag) + return ok && !valid +} + +// validTag returns (valid, ok) where valid reports when a tag is valid, +// and ok reports determining if the tag is valid succeeded. +func validTag(tag string) (valid bool, ok bool) { + if versions.ConstraintGoVersion != nil { + return versions.ConstraintGoVersion(&constraint.TagExpr{Tag: tag}) != "", true + } + return false, false +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index 171ad20137..d53312f0cd 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -433,9 +433,12 @@ func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, return nil, 0 } +<<<<<<< HEAD // Facts are associated with generic declarations, not instantiations. fn = fn.Origin() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, ok := isPrint[fn.FullName()] if !ok { // Next look up just "printf", for use with -printf.funcs. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index 4115ef7694..14318f4cdb 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -89,7 +89,11 @@ var checkTagSpaces = map[string]bool{"json": true, "xml": true, "asn1": true} // checkCanonicalFieldTag checks a single struct field tag. func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, seen *namesSeen) { switch pass.Pkg.Path() { +<<<<<<< HEAD case "encoding/json", "encoding/json/v2", "encoding/xml": +======= + case "encoding/json", "encoding/xml": +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // These packages know how to use their own APIs. // Sometimes they are testing what happens to incorrect programs. return diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index cfda893433..4d6d13b15e 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -36,7 +36,10 @@ package inspector import ( "go/ast" +<<<<<<< HEAD _ "unsafe" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // An Inspector provides methods for inspecting @@ -45,9 +48,12 @@ type Inspector struct { events []event } +<<<<<<< HEAD //go:linkname events func events(in *Inspector) []event { return in.events } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -56,10 +62,16 @@ func New(files []*ast.File) *Inspector { // An event represents a push or a pop // of an ast.Node during a traversal. type event struct { +<<<<<<< HEAD node ast.Node typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events index int32 // index of corresponding push or pop event parent int32 // index of parent's push node (defined for push nodes only) +======= + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -88,7 +100,11 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // }) mask := maskOf(types) +<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { +======= + for i := 0; i < len(in.events); { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -118,7 +134,11 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // matches an element of the types slice. func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) +<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { +======= + for i := 0; i < len(in.events); { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -152,7 +172,11 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node +<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { +======= + for i := 0; i < len(in.events); { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -201,12 +225,17 @@ func traverse(files []*ast.File) []event { events := make([]event, 0, capacity) var stack []event +<<<<<<< HEAD stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent +======= + stack = append(stack, event{}) // include an extra event so file nodes have a parent +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, f := range files { ast.Inspect(f, func(n ast.Node) bool { if n != nil { // push ev := event{ +<<<<<<< HEAD node: n, typ: 0, // temporarily used to accumulate type bits of subtree index: int32(len(events)), // push event temporarily holds own index @@ -219,6 +248,14 @@ func traverse(files []*ast.File) []event { if int32(len(events)) < 0 { panic("event index exceeded int32") } +======= + node: n, + typ: 0, // temporarily used to accumulate type bits of subtree + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { // pop top := len(stack) - 1 @@ -227,9 +264,15 @@ func traverse(files []*ast.File) []event { push := ev.index parent := top - 1 +<<<<<<< HEAD events[push].typ = typ // set type of push stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. events[push].index = int32(len(events)) // make push refer to pop +======= + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stack = stack[:top] events = append(events, ev) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go index c576dc70ac..1dd0b70743 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/iter.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -26,7 +26,11 @@ func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { return func(yield func(ast.Node) bool) { mask := maskOf(types) +<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { +======= + for i := 0; i < len(in.events); { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -63,7 +67,11 @@ func All[N interface { mask := typeOf((N)(nil)) return func(yield func(N) bool) { +<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { +======= + for i := 0; i < len(in.events); { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 40b1bfd7e6..6c1be29f5c 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,11 @@ package inspector import ( "go/ast" "math" +<<<<<<< HEAD _ "unsafe" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -217,7 +220,10 @@ func typeOf(n ast.Node) uint64 { return 0 } +<<<<<<< HEAD //go:linkname maskOf +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maskOf(nodes []ast.Node) uint64 { if nodes == nil { return math.MaxUint64 // match all node types diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 65fe2628e9..1a721098b2 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -106,11 +106,16 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) +<<<<<<< HEAD size, err := gcimporter.FindExportData(buf) +======= + _, size, err := gcimporter.FindExportData(buf) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } +<<<<<<< HEAD // We were given an archive and found the __.PKGDEF in it. // This tells us the size of the export data, and we don't // need to return the entire file. @@ -118,6 +123,21 @@ func NewReader(r io.Reader) (io.Reader, error) { R: buf, N: size, }, nil +======= + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // readAll works the same way as io.ReadAll, but avoids allocations and copies diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 91bd62e83b..b8f9e4ae2d 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,7 +13,10 @@ import ( "fmt" "os" "os/exec" +<<<<<<< HEAD "slices" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" ) @@ -132,7 +135,11 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) +<<<<<<< HEAD cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) +======= + cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -151,3 +158,10 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } +<<<<<<< HEAD +======= + +// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// TODO(adonovan): use go1.21 slices.Clip. +func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 0458b4f9c4..aeb6497cc0 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -322,7 +322,10 @@ type jsonPackage struct { ImportPath string Dir string Name string +<<<<<<< HEAD Target string +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Export string GoFiles []string CompiledGoFiles []string @@ -506,15 +509,22 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, +<<<<<<< HEAD Dir: p.Dir, Target: p.Target, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), +<<<<<<< HEAD ForTest: p.ForTest, +======= + forTest: p.ForTest, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) depsErrors: p.DepsErrors, Module: p.Module, } @@ -798,7 +808,11 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } +<<<<<<< HEAD if cfg.Mode&NeedForTest != 0 { +======= + if cfg.Mode&needInternalForTest != 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -813,9 +827,12 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } +<<<<<<< HEAD if cfg.Mode&NeedTarget != 0 { addFields("Target") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "-json=" + strings.Join(fields, ",") } diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 69eec9f44d..6aee3e686a 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -23,11 +23,17 @@ var modes = [...]struct { {NeedSyntax, "NeedSyntax"}, {NeedTypesInfo, "NeedTypesInfo"}, {NeedTypesSizes, "NeedTypesSizes"}, +<<<<<<< HEAD {NeedForTest, "NeedForTest"}, {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, {NeedTarget, "NeedTarget"}, +======= + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (mode LoadMode) String() string { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0147d9080a..a3b8e538b5 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -43,6 +43,7 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // +<<<<<<< HEAD // The Mode flag is a union of several bits named NeedName, // NeedFiles, and so on, each of which determines whether // a given field of Package (Name, Files, etc) should be @@ -57,6 +58,8 @@ import ( // [LoadSyntax] ... plus type-annotated syntax // [LoadAllSyntax] ... for all dependencies // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: // - https://github.com/golang/go/issues/56633 @@ -69,7 +72,11 @@ const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota +<<<<<<< HEAD // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles +======= + // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -100,10 +107,16 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors +<<<<<<< HEAD // NeedForTest adds ForTest. // // Tests must also be set on the context for this field to be populated. NeedForTest +======= + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -118,14 +131,18 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns +<<<<<<< HEAD // NeedTarget adds Target. NeedTarget +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Be sure to update loadmode_string.go when adding new items! ) const ( // LoadFiles loads the name and file names for the initial packages. +<<<<<<< HEAD LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles // LoadImports loads the name, file names, and import mapping for the initial packages. @@ -138,6 +155,35 @@ const ( LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. +======= + // + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // LoadImports loads the name, file names, and import mapping for the initial packages. + // + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // LoadTypes loads exported type information for the initial packages. + // + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // LoadSyntax loads typed syntax for the initial packages. + // + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. @@ -437,12 +483,15 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string +<<<<<<< HEAD // Dir is the directory associated with the package, if it exists. // // For packages listed by the go command, this is the directory containing // the package files. Dir string +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -482,10 +531,13 @@ type Package struct { // information for the package as provided by the build system. ExportFile string +<<<<<<< HEAD // Target is the absolute install path of the .a file, for libraries, // and of the executable file, for binaries. Target string +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package @@ -534,8 +586,13 @@ type Package struct { // -- internal -- +<<<<<<< HEAD // ForTest is the package under test, if any. ForTest string +======= + // forTest is the package under test, if any. + forTest string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -564,6 +621,12 @@ type ModuleError struct { } func init() { +<<<<<<< HEAD +======= + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } @@ -575,6 +638,10 @@ func init() { } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) +<<<<<<< HEAD +======= + packagesinternal.ForTest = int(needInternalForTest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // An Error describes a problem with a package's metadata, syntax, or types. diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go index 764b73529e..5595e9cb32 100644 --- a/vendor/golang.org/x/tools/go/ssa/const.go +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -12,9 +12,15 @@ import ( "go/token" "go/types" "strconv" +<<<<<<< HEAD "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" +======= + "strings" + + "golang.org/x/tools/internal/typeparams" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewConst returns a new constant of the specified value and type. @@ -78,7 +84,11 @@ func zeroConst(t types.Type) *Const { func (c *Const) RelString(from *types.Package) string { var s string if c.Value == nil { +<<<<<<< HEAD s, _ = typesinternal.ZeroString(c.typ, types.RelativeTo(from)) +======= + s = zeroString(c.typ, from) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if c.Value.Kind() == constant.String { s = constant.StringVal(c.Value) const max = 20 @@ -93,6 +103,47 @@ func (c *Const) RelString(from *types.Package) string { return s + ":" + relType(c.Type(), from) } +<<<<<<< HEAD +======= +// zeroString returns the string representation of the "zero" value of the type t. +func zeroString(t types.Type, from *types.Package) string { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false" + case t.Info()&types.IsNumeric != 0: + return "0" + case t.Info()&types.IsString != 0: + return `""` + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil" + default: + panic(fmt.Sprint("zeroString for unexpected type:", t)) + } + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return "nil" + case *types.Named, *types.Alias: + return zeroString(t.Underlying(), from) + case *types.Array, *types.Struct: + return relType(t, from) + "{}" + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + components[i] = zeroString(t.At(i).Type(), from) + } + return "(" + strings.Join(components, ", ") + ")" + case *types.TypeParam: + return "*new(" + relType(t, from) + ")" + } + panic(fmt.Sprint("zeroString: unexpected ", t)) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Const) Name() string { return c.RelString(nil) } diff --git a/vendor/golang.org/x/tools/go/ssa/dom.go b/vendor/golang.org/x/tools/go/ssa/dom.go index f490986140..30c8bd9c40 100644 --- a/vendor/golang.org/x/tools/go/ssa/dom.go +++ b/vendor/golang.org/x/tools/go/ssa/dom.go @@ -318,7 +318,10 @@ func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. +<<<<<<< HEAD // (unused; retained for debugging) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func printDomTreeDot(buf *bytes.Buffer, f *Function) { fmt.Fprintln(buf, "//", f) fmt.Fprintln(buf, "digraph domtree {") diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index aa070eacdc..7e52f1dbc6 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -14,7 +14,10 @@ import ( "io" "os" "sync" +<<<<<<< HEAD _ "unsafe" // for go:linkname hack +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/typeparams" @@ -409,6 +412,20 @@ func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctx } // Exposed to ssautil using the linkname hack. +<<<<<<< HEAD // //go:linkname isSyntactic golang.org/x/tools/go/ssa.isSyntactic func isSyntactic(pkg *Package) bool { return pkg.syntax } +======= +func isSyntactic(pkg *Package) bool { return pkg.syntax } + +// mapValues returns a new unordered array of map values. +func mapValues[K comparable, V any](m map[K]V) []V { + vals := make([]V, 0, len(m)) + for _, fn := range m { + vals = append(vals, fn) + } + return vals + +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 93b3090c68..5e9538885a 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -2,28 +2,43 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +<<<<<<< HEAD // Package typeutil defines various utilities for types, such as [Map], // a hash table that maps [types.Type] to any value. package typeutil +======= +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to any values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) import ( "bytes" "fmt" "go/types" +<<<<<<< HEAD "hash/maphash" "unsafe" +======= + "reflect" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/internal/typeparams" ) // Map is a hash-table-based mapping from types (types.Type) to +<<<<<<< HEAD // arbitrary values. The concrete types that implement +======= +// arbitrary any values. The concrete types that implement +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // the Type interface are pointers. Since they are not canonicalized, // == cannot be used to check for equivalence, and thus we cannot // simply use a Go map. // // Just as with map[K]V, a nil *Map is a valid empty map. // +<<<<<<< HEAD // Read-only map operations ([Map.At], [Map.Len], and so on) may // safely be called concurrently. // @@ -31,6 +46,11 @@ import ( // and 69559, if the latter proposals for a generic hash-map type and // a types.Hash function are accepted. type Map struct { +======= +// Not thread-safe. +type Map struct { + hasher Hasher // shared by many Maps +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused length int // number of map entries } @@ -41,17 +61,47 @@ type entry struct { value any } +<<<<<<< HEAD // SetHasher has no effect. // // It is a relic of an optimization that is no longer profitable. Do // not use [Hasher], [MakeHasher], or [SetHasher] in new code. func (m *Map) SetHasher(Hasher) {} +======= +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Delete removes the entry with the given key, if any. // It returns true if the entry was found. func (m *Map) Delete(key types.Type) bool { if m != nil && m.table != nil { +<<<<<<< HEAD hash := hash(key) +======= + hash := m.hasher.Hash(key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bucket := m.table[hash] for i, e := range bucket { if e.key != nil && types.Identical(key, e.key) { @@ -70,7 +120,11 @@ func (m *Map) Delete(key types.Type) bool { // The result is nil if the entry is not present. func (m *Map) At(key types.Type) any { if m != nil && m.table != nil { +<<<<<<< HEAD for _, e := range m.table[hash(key)] { +======= + for _, e := range m.table[m.hasher.Hash(key)] { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if e.key != nil && types.Identical(key, e.key) { return e.value } @@ -83,7 +137,11 @@ func (m *Map) At(key types.Type) any { // and returns the previous entry, if any. func (m *Map) Set(key types.Type, value any) (prev any) { if m.table != nil { +<<<<<<< HEAD hash := hash(key) +======= + hash := m.hasher.Hash(key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bucket := m.table[hash] var hole *entry for i, e := range bucket { @@ -102,7 +160,14 @@ func (m *Map) Set(key types.Type, value any) (prev any) { m.table[hash] = append(bucket, entry{key, value}) } } else { +<<<<<<< HEAD hash := hash(key) +======= + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) m.table = map[uint32][]entry{hash: {entry{key, value}}} } @@ -179,6 +244,7 @@ func (m *Map) KeysString() string { return m.toString(false) } +<<<<<<< HEAD // -- Hasher -- // hash returns the hash of type t. @@ -196,10 +262,48 @@ var theHasher Hasher // MakeHasher returns Hasher{}. // Hashers are stateless; all are equivalent. func MakeHasher() Hasher { return theHasher } +======= +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 + + // ptrMap records pointer identity. + ptrMap map[any]uint32 + + // sigTParams holds type parameters from the signature being hashed. + // Signatures are considered identical modulo renaming of type parameters, so + // within the scope of a signature type the identity of the signature's type + // parameters is just their index. + // + // Since the language does not currently support referring to uninstantiated + // generic types or functions, and instantiated signatures do not have type + // parameter lists, we should never encounter a second non-empty type + // parameter list when hashing a generic signature. + sigTParams *types.TypeParamList +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{ + memo: make(map[types.Type]uint32), + ptrMap: make(map[any]uint32), + sigTParams: nil, + } +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Hash computes a hash value for the given type t such that // Identical(t, t') => Hash(t) == Hash(t'). func (h Hasher) Hash(t types.Type) uint32 { +<<<<<<< HEAD return hasher{inGenericSig: false}.hash(t) } @@ -208,6 +312,16 @@ func (h Hasher) Hash(t types.Type) uint32 { // optimize [hasher.hashTypeParam]. type hasher struct{ inGenericSig bool } +======= + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // hashString computes the Fowler–Noll–Vo hash of s. func hashString(s string) uint32 { var h uint32 @@ -218,14 +332,20 @@ func hashString(s string) uint32 { return h } +<<<<<<< HEAD // hash computes the hash of t. func (h hasher) hash(t types.Type) uint32 { +======= +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // See Identical for rationale. switch t := t.(type) { case *types.Basic: return uint32(t.Kind()) case *types.Alias: +<<<<<<< HEAD return h.hash(types.Unalias(t)) case *types.Array: @@ -233,6 +353,15 @@ func (h hasher) hash(t types.Type) uint32 { case *types.Slice: return 9049 + 2*h.hash(t.Elem()) +======= + return h.Hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *types.Struct: var hash uint32 = 9059 @@ -243,12 +372,20 @@ func (h hasher) hash(t types.Type) uint32 { } hash += hashString(t.Tag(i)) hash += hashString(f.Name()) // (ignore f.Pkg) +<<<<<<< HEAD hash += h.hash(f.Type()) +======= + hash += h.Hash(f.Type()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash case *types.Pointer: +<<<<<<< HEAD return 9067 + 2*h.hash(t.Elem()) +======= + return 9067 + 2*h.Hash(t.Elem()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *types.Signature: var hash uint32 = 9091 @@ -256,11 +393,41 @@ func (h hasher) hash(t types.Type) uint32 { hash *= 8863 } +<<<<<<< HEAD tparams := t.TypeParams() for i := range tparams.Len() { h.inGenericSig = true tparam := tparams.At(i) hash += 7 * h.hash(tparam.Constraint()) +======= + // Use a separate hasher for types inside of the signature, where type + // parameter identity is modified to be (index, constraint). We must use a + // new memo for this hasher as type identity may be affected by this + // masking. For example, in func[T any](*T), the identity of *T depends on + // whether we are mapping the argument in isolation, or recursively as part + // of hashing the signature. + // + // We should never encounter a generic signature while hashing another + // generic signature, but defensively set sigTParams only if h.mask is + // unset. + tparams := t.TypeParams() + if h.sigTParams == nil && tparams.Len() != 0 { + h = Hasher{ + // There may be something more efficient than discarding the existing + // memo, but it would require detecting whether types are 'tainted' by + // references to type parameters. + memo: make(map[types.Type]uint32), + // Re-using ptrMap ensures that pointer identity is preserved in this + // hasher. + ptrMap: h.ptrMap, + sigTParams: tparams, + } + } + + for i := 0; i < tparams.Len(); i++ { + tparam := tparams.At(i) + hash += 7 * h.Hash(tparam.Constraint()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) @@ -294,6 +461,7 @@ func (h hasher) hash(t types.Type) uint32 { return hash case *types.Map: +<<<<<<< HEAD return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) case *types.Chan: @@ -305,6 +473,19 @@ func (h hasher) hash(t types.Type) uint32 { for i := 0; i < targs.Len(); i++ { targ := targs.At(i) hash += 2 * h.hash(targ) +======= + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + hash := h.hashPtr(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.Hash(targ) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash @@ -318,17 +499,30 @@ func (h hasher) hash(t types.Type) uint32 { panic(fmt.Sprintf("%T: %v", t, t)) } +<<<<<<< HEAD func (h hasher) hashTuple(tuple *types.Tuple) uint32 { // See go/types.identicalTypes for rationale. n := tuple.Len() hash := 9137 + 2*uint32(n) for i := range n { hash += 3 * h.hash(tuple.At(i).Type()) +======= +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash } +<<<<<<< HEAD func (h hasher) hashUnion(t *types.Union) uint32 { +======= +func (h Hasher) hashUnion(t *types.Union) uint32 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -339,11 +533,19 @@ func (h hasher) hashUnion(t *types.Union) uint32 { return h.hashTermSet(terms) } +<<<<<<< HEAD func (h hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. termHash := h.hash(term.Type()) +======= +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.Hash(term.Type()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if term.Tilde() { termHash *= 9161 } @@ -352,6 +554,7 @@ func (h hasher) hashTermSet(terms []*types.Term) uint32 { return hash } +<<<<<<< HEAD // hashTypeParam returns the hash of a type parameter. func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { // Within the signature of a generic function, TypeParams are @@ -388,6 +591,38 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 { // the same name.) hash := uintptr(unsafe.Pointer(tname)) return uint32(hash ^ (hash >> 32)) +======= +// hashTypeParam returns a hash of the type parameter t, with a hash value +// depending on whether t is contained in h.sigTParams. +// +// If h.sigTParams is set and contains t, then we are in the process of hashing +// a signature, and the hash value of t must depend only on t's index and +// constraint: signatures are considered identical modulo type parameter +// renaming. To avoid infinite recursion, we only hash the type parameter +// index, and rely on types.Identical to handle signatures where constraints +// are not identical. +// +// Otherwise the hash of t depends only on t's pointer identity. +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { + if h.sigTParams != nil { + i := t.Index() + if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { + return 9173 + 3*uint32(i) + } + } + return h.hashPtr(t.Obj()) +} + +// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that +// pointers values are not dependent on the GC. +func (h Hasher) hashPtr(ptr any) uint32 { + if hash, ok := h.ptrMap[ptr]; ok { + return hash + } + hash := uint32(reflect.ValueOf(ptr).Pointer()) + h.ptrMap[ptr] = hash + return hash +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // shallowHash computes a hash of t without looking at any of its @@ -404,7 +639,11 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 { // include m itself; there is no mention of the named type X that // might help us break the cycle. // (See comment in go/types.identical, case *Interface, for more.) +<<<<<<< HEAD func (h hasher) shallowHash(t types.Type) uint32 { +======= +func (h Hasher) shallowHash(t types.Type) uint32 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // t is the type of an interface method (Signature), // its params or results (Tuples), or their immediate // elements (mostly Slice, Pointer, Basic, Named), @@ -425,7 +664,11 @@ func (h hasher) shallowHash(t types.Type) uint32 { case *types.Tuple: n := t.Len() hash := 9137 + 2*uint32(n) +<<<<<<< HEAD for i := range n { +======= + for i := 0; i < n; i++ { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hash += 53471161 * h.shallowHash(t.At(i).Type()) } return hash @@ -458,10 +701,17 @@ func (h hasher) shallowHash(t types.Type) uint32 { return 9127 case *types.Named: +<<<<<<< HEAD return h.hashTypeName(t.Obj()) case *types.TypeParam: return h.hashTypeParam(t) +======= + return h.hashPtr(t.Obj()) + + case *types.TypeParam: + return h.hashPtr(t.Obj()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) } diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 58615232ff..2fb8d18b5b 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -15,6 +15,10 @@ import ( "go/types" "os" pathpkg "path" +<<<<<<< HEAD +======= + "strconv" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" ) @@ -65,6 +69,267 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos return end } +<<<<<<< HEAD +======= +func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + // TODO(adonovan): think about generics, and also generic aliases. + under := types.Unalias(typ) + // Don't call Underlying unconditionally: although it removes + // Named and Alias, it also removes TypeParam. + if n, ok := under.(*types.Named); ok { + under = n.Underlying() + } + switch under := under.(type) { + case *types.Basic: + switch { + case under.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"} + case under.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"} + case under.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`} + default: + panic(fmt.Sprintf("unknown basic type %v", under)) + } + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: + return ast.NewIdent("nil") + case *types.Struct: + texpr := TypeExpr(f, pkg, typ) // typ because we want the name here. + if texpr == nil { + return nil + } + return &ast.CompositeLit{ + Type: texpr, + } + } + return nil +} + +// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of +// analysisinternal.ZeroValue) +func IsZeroValue(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to +// named types from packages other than pkg are qualified by an appropriate +// package name, as defined by the import environment of file. +func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + case *types.Pointer: + x := TypeExpr(f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(f, pkg, t.Key()) + value := TypeExpr(f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + if t.Obj().Pkg() == pkg { + return ast.NewIdent(t.Obj().Name()) + } + pkgName := t.Obj().Pkg().Name() + + // If the file already imports the package under another name, use that. + for _, cand := range f.Imports { + if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { + if cand.Name != nil && cand.Name.Name != "" { + pkgName = cand.Name.Name + } + } + } + if pkgName == "." { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + case *types.Struct: + return ast.NewIdent(t.String()) + case *types.Interface: + return ast.NewIdent(t.String()) + default: + return nil + } +} + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + case *ast.SwitchStmt, *ast.TypeSwitchStmt: + return expr.(ast.Stmt) + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WalkASTWithParent walks the AST rooted at n. The semantics are // similar to ast.Inspect except it does not call f(nil). func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index 5662a311da..375d5c0d41 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,13 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +<<<<<<< HEAD // This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. // This file also additionally implements FindExportData for gcexportdata.NewReader. +======= +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gcimporter import ( "bufio" +<<<<<<< HEAD "bytes" "errors" "fmt" @@ -179,6 +186,43 @@ func ReadUnified(r *bufio.Reader) (data []byte, err error) { func FindPackageDefinition(r *bufio.Reader) (size int, err error) { // Uses ReadSlice to limit risk of malformed inputs. +======= + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -186,6 +230,7 @@ func FindPackageDefinition(r *bufio.Reader) (size int, err error) { return } +<<<<<<< HEAD // Is the first line an archive file signature? if string(line) != "!\n" { err = fmt.Errorf("not the start of an archive file (%q)", line) @@ -197,10 +242,54 @@ func FindPackageDefinition(r *bufio.Reader) (size int, err error) { if size <= 0 { err = fmt.Errorf("not a package file") return +======= + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return } +<<<<<<< HEAD // ReadObjectHeaders reads object headers from the reader. Object headers are // lines that do not start with an end-of-section marker "$$". The first header @@ -419,3 +508,5 @@ func lookupGorootExport(pkgDir string) (string, error) { return f.(func() (string, error))() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 3dbd21d1b9..ffe8ab0d71 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,11 +23,24 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" +<<<<<<< HEAD "fmt" +======= + "bytes" + "fmt" + "go/build" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go/token" "go/types" "io" "os" +<<<<<<< HEAD +======= + "os/exec" + "path/filepath" + "strings" + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -39,6 +52,7 @@ const ( trace = false ) +<<<<<<< HEAD // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. @@ -47,6 +61,127 @@ const ( func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var id string +======= +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -65,13 +200,21 @@ func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDi } rc = f } else { +<<<<<<< HEAD var filename string filename, id, err = FindPkg(path, srcDir) +======= + filename, id = FindPkg(path, srcDir) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } +<<<<<<< HEAD return nil, err +======= + return nil, fmt.Errorf("can't find import: %q", id) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // no need to re-import if the package was imported completely before @@ -94,6 +237,7 @@ func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDi } defer rc.Close() +<<<<<<< HEAD buf := bufio.NewReader(rc) data, err := ReadUnified(buf) if err != nil { @@ -106,3 +250,64 @@ func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDi return } +======= + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': + // unified: emitted by cmd/compile since go1.20. + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 69b1d697cb..2f1c6ed11a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -5,6 +5,11 @@ // Indexed package import. // See iexport.go for the export data format. +<<<<<<< HEAD +======= +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gcimporter import ( @@ -1109,9 +1114,12 @@ func (r *importReader) byte() byte { } return x } +<<<<<<< HEAD type byPath []*types.Package func (a byPath) Len() int { return len(a) } func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 6cdab448ec..3e54e4d2a3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,6 +11,10 @@ import ( "go/token" "go/types" "sort" +<<<<<<< HEAD +======= + "strings" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" @@ -70,6 +74,10 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) +<<<<<<< HEAD +======= + s = s[:strings.LastIndex(s, "\n$$\n")] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -264,12 +272,16 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { +<<<<<<< HEAD // cmd/compile emits path="main" for main packages because // that's the linker symbol prefix it used; but we need // the package's path as it would be reported by go list, // hence "main" below. // See test at go/packages.TestMainPackagePathInModeTypes. case "", "main": +======= + case "": +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) path = r.p.PkgPath() case "builtin": return nil // universe diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go index cbe4f3c5ba..bdcfdc8db6 100644 --- a/vendor/golang.org/x/tools/internal/imports/source.go +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -59,5 +59,9 @@ type Source interface { // candidates satisfy all missing references for that package name. It is up // to each data source to select the best result for each entry in the // missing map. +<<<<<<< HEAD ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) +======= + ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go index d14abaa319..ab1c005794 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_env.go +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -48,7 +48,11 @@ func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, return r.loadPackageNames(unknown, srcDir) } +<<<<<<< HEAD func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { +======= +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -121,9 +125,13 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin if err := g.Wait(); err != nil { return nil, err } +<<<<<<< HEAD var ans []*Result for _, x := range results { ans = append(ans, x) } return ans, nil +======= + return results, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 66e69b4389..b9a89704ab 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,6 +5,10 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal +<<<<<<< HEAD +======= +var GetForTest = func(p interface{}) string { return "" } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var GetDepsErrors = func(p interface{}) []*PackageError { return nil } type PackageError struct { @@ -15,6 +19,10 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +<<<<<<< HEAD +======= +var ForTest int // must be set as a LoadMode to call GetForTest +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 9f0b871ff6..3e1f421943 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -268,8 +268,11 @@ var PackageSymbols = map[string][]Symbol{ {"ErrTooLarge", Var, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, +<<<<<<< HEAD {"FieldsFuncSeq", Func, 24}, {"FieldsSeq", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -282,7 +285,10 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, +<<<<<<< HEAD {"Lines", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Map", Func, 0}, {"MinRead", Const, 0}, {"NewBuffer", Func, 0}, @@ -296,9 +302,13 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, +<<<<<<< HEAD {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, {"SplitSeq", Func, 24}, +======= + {"SplitN", Func, 0}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -540,7 +550,10 @@ var PackageSymbols = map[string][]Symbol{ {"NewCTR", Func, 0}, {"NewGCM", Func, 2}, {"NewGCMWithNonceSize", Func, 5}, +<<<<<<< HEAD {"NewGCMWithRandomNonce", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"NewGCMWithTagSize", Func, 11}, {"NewOFB", Func, 0}, {"Stream", Type, 0}, @@ -679,6 +692,7 @@ var PackageSymbols = map[string][]Symbol{ {"Unmarshal", Func, 0}, {"UnmarshalCompressed", Func, 15}, }, +<<<<<<< HEAD "crypto/fips140": { {"Enabled", Func, 24}, }, @@ -687,6 +701,8 @@ var PackageSymbols = map[string][]Symbol{ {"Extract", Func, 24}, {"Key", Func, 24}, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/hmac": { {"Equal", Func, 1}, {"New", Func, 0}, @@ -697,6 +713,7 @@ var PackageSymbols = map[string][]Symbol{ {"Size", Const, 0}, {"Sum", Func, 2}, }, +<<<<<<< HEAD "crypto/mlkem": { {"(*DecapsulationKey1024).Bytes", Method, 24}, {"(*DecapsulationKey1024).Decapsulate", Method, 24}, @@ -728,12 +745,17 @@ var PackageSymbols = map[string][]Symbol{ "crypto/pbkdf2": { {"Key", Func, 24}, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/rand": { {"Int", Func, 0}, {"Prime", Func, 0}, {"Read", Func, 0}, {"Reader", Var, 0}, +<<<<<<< HEAD {"Text", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, "crypto/rc4": { {"(*Cipher).Reset", Method, 0}, @@ -812,6 +834,7 @@ var PackageSymbols = map[string][]Symbol{ {"Sum224", Func, 2}, {"Sum256", Func, 2}, }, +<<<<<<< HEAD "crypto/sha3": { {"(*SHA3).AppendBinary", Method, 24}, {"(*SHA3).BlockSize", Method, 24}, @@ -845,6 +868,8 @@ var PackageSymbols = map[string][]Symbol{ {"SumSHAKE128", Func, 24}, {"SumSHAKE256", Func, 24}, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/sha512": { {"BlockSize", Const, 0}, {"New", Func, 0}, @@ -867,7 +892,10 @@ var PackageSymbols = map[string][]Symbol{ {"ConstantTimeEq", Func, 0}, {"ConstantTimeLessOrEq", Func, 2}, {"ConstantTimeSelect", Func, 0}, +<<<<<<< HEAD {"WithDataIndependentTiming", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"XORBytes", Func, 20}, }, "crypto/tls": { @@ -944,7 +972,10 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo", Type, 4}, {"ClientHelloInfo.CipherSuites", Field, 4}, {"ClientHelloInfo.Conn", Field, 8}, +<<<<<<< HEAD {"ClientHelloInfo.Extensions", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"ClientHelloInfo.ServerName", Field, 4}, {"ClientHelloInfo.SignatureSchemes", Field, 8}, {"ClientHelloInfo.SupportedCurves", Field, 4}, @@ -962,7 +993,10 @@ var PackageSymbols = map[string][]Symbol{ {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, {"Config.EncryptedClientHelloConfigList", Field, 23}, +<<<<<<< HEAD {"Config.EncryptedClientHelloKeys", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, @@ -1016,10 +1050,13 @@ var PackageSymbols = map[string][]Symbol{ {"ECHRejectionError", Type, 23}, {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, +<<<<<<< HEAD {"EncryptedClientHelloKey", Type, 24}, {"EncryptedClientHelloKey.Config", Field, 24}, {"EncryptedClientHelloKey.PrivateKey", Field, 24}, {"EncryptedClientHelloKey.SendAsRetry", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, {"LoadX509KeyPair", Func, 0}, @@ -1118,7 +1155,10 @@ var PackageSymbols = map[string][]Symbol{ {"VersionTLS12", Const, 2}, {"VersionTLS13", Const, 12}, {"X25519", Const, 8}, +<<<<<<< HEAD {"X25519MLKEM768", Const, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"X509KeyPair", Func, 0}, }, "crypto/x509": { @@ -1143,8 +1183,11 @@ var PackageSymbols = map[string][]Symbol{ {"(ConstraintViolationError).Error", Method, 0}, {"(HostnameError).Error", Method, 0}, {"(InsecureAlgorithmError).Error", Method, 6}, +<<<<<<< HEAD {"(OID).AppendBinary", Method, 24}, {"(OID).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, {"(OID).MarshalBinary", Method, 23}, @@ -1173,10 +1216,13 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.Extensions", Field, 2}, {"Certificate.ExtraExtensions", Field, 2}, {"Certificate.IPAddresses", Field, 1}, +<<<<<<< HEAD {"Certificate.InhibitAnyPolicy", Field, 24}, {"Certificate.InhibitAnyPolicyZero", Field, 24}, {"Certificate.InhibitPolicyMapping", Field, 24}, {"Certificate.InhibitPolicyMappingZero", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Certificate.IsCA", Field, 0}, {"Certificate.Issuer", Field, 0}, {"Certificate.IssuingCertificateURL", Field, 2}, @@ -1193,7 +1239,10 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.PermittedURIDomains", Field, 10}, {"Certificate.Policies", Field, 22}, {"Certificate.PolicyIdentifiers", Field, 0}, +<<<<<<< HEAD {"Certificate.PolicyMappings", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Certificate.PublicKey", Field, 0}, {"Certificate.PublicKeyAlgorithm", Field, 0}, {"Certificate.Raw", Field, 0}, @@ -1201,8 +1250,11 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.RawSubject", Field, 0}, {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, {"Certificate.RawTBSCertificate", Field, 0}, +<<<<<<< HEAD {"Certificate.RequireExplicitPolicy", Field, 24}, {"Certificate.RequireExplicitPolicyZero", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Certificate.SerialNumber", Field, 0}, {"Certificate.Signature", Field, 0}, {"Certificate.SignatureAlgorithm", Field, 0}, @@ -1294,7 +1346,10 @@ var PackageSymbols = map[string][]Symbol{ {"NameConstraintsWithoutSANs", Const, 10}, {"NameMismatch", Const, 8}, {"NewCertPool", Func, 0}, +<<<<<<< HEAD {"NoValidChains", Const, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"NotAuthorizedToSign", Const, 0}, {"OID", Type, 22}, {"OIDFromInts", Func, 22}, @@ -1316,9 +1371,12 @@ var PackageSymbols = map[string][]Symbol{ {"ParsePKCS8PrivateKey", Func, 0}, {"ParsePKIXPublicKey", Func, 0}, {"ParseRevocationList", Func, 19}, +<<<<<<< HEAD {"PolicyMapping", Type, 24}, {"PolicyMapping.IssuerDomainPolicy", Field, 24}, {"PolicyMapping.SubjectDomainPolicy", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"PublicKeyAlgorithm", Type, 0}, {"PureEd25519", Const, 13}, {"RSA", Const, 0}, @@ -1365,7 +1423,10 @@ var PackageSymbols = map[string][]Symbol{ {"UnknownPublicKeyAlgorithm", Const, 0}, {"UnknownSignatureAlgorithm", Const, 0}, {"VerifyOptions", Type, 0}, +<<<<<<< HEAD {"VerifyOptions.CertificatePolicies", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"VerifyOptions.CurrentTime", Field, 0}, {"VerifyOptions.DNSName", Field, 0}, {"VerifyOptions.Intermediates", Field, 0}, @@ -2076,8 +2137,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).DynString", Method, 1}, {"(*File).DynValue", Method, 21}, {"(*File).DynamicSymbols", Method, 4}, +<<<<<<< HEAD {"(*File).DynamicVersionNeeds", Method, 24}, {"(*File).DynamicVersions", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*File).ImportedLibraries", Method, 0}, {"(*File).ImportedSymbols", Method, 0}, {"(*File).Section", Method, 0}, @@ -2343,6 +2407,7 @@ var PackageSymbols = map[string][]Symbol{ {"DynFlag", Type, 0}, {"DynFlag1", Type, 21}, {"DynTag", Type, 0}, +<<<<<<< HEAD {"DynamicVersion", Type, 24}, {"DynamicVersion.Deps", Field, 24}, {"DynamicVersion.Flags", Field, 24}, @@ -2356,6 +2421,8 @@ var PackageSymbols = map[string][]Symbol{ {"DynamicVersionNeed", Type, 24}, {"DynamicVersionNeed.Name", Field, 24}, {"DynamicVersionNeed.Needs", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"EI_ABIVERSION", Const, 0}, {"EI_CLASS", Const, 0}, {"EI_DATA", Const, 0}, @@ -3842,6 +3909,7 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Size", Field, 0}, {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, +<<<<<<< HEAD {"Symbol.VersionIndex", Field, 24}, {"Symbol.VersionScope", Field, 24}, {"SymbolVersionScope", Type, 24}, @@ -3855,6 +3923,10 @@ var PackageSymbols = map[string][]Symbol{ {"VersionScopeLocal", Const, 24}, {"VersionScopeNone", Const, 24}, {"VersionScopeSpecific", Const, 24}, +======= + {"Type", Type, 0}, + {"Version", Type, 0}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -4580,10 +4652,15 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16}, }, "encoding": { +<<<<<<< HEAD {"BinaryAppender", Type, 24}, {"BinaryMarshaler", Type, 2}, {"BinaryUnmarshaler", Type, 2}, {"TextAppender", Type, 24}, +======= + {"BinaryMarshaler", Type, 2}, + {"BinaryUnmarshaler", Type, 2}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"TextMarshaler", Type, 2}, {"TextUnmarshaler", Type, 2}, }, @@ -6113,16 +6190,24 @@ var PackageSymbols = map[string][]Symbol{ {"(*Interface).Complete", Method, 5}, {"(*Interface).Embedded", Method, 5}, {"(*Interface).EmbeddedType", Method, 11}, +<<<<<<< HEAD {"(*Interface).EmbeddedTypes", Method, 24}, {"(*Interface).Empty", Method, 5}, {"(*Interface).ExplicitMethod", Method, 5}, {"(*Interface).ExplicitMethods", Method, 24}, +======= + {"(*Interface).Empty", Method, 5}, + {"(*Interface).ExplicitMethod", Method, 5}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Interface).IsComparable", Method, 18}, {"(*Interface).IsImplicit", Method, 18}, {"(*Interface).IsMethodSet", Method, 18}, {"(*Interface).MarkImplicit", Method, 18}, {"(*Interface).Method", Method, 5}, +<<<<<<< HEAD {"(*Interface).Methods", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Interface).NumEmbeddeds", Method, 5}, {"(*Interface).NumExplicitMethods", Method, 5}, {"(*Interface).NumMethods", Method, 5}, @@ -6143,11 +6228,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*MethodSet).At", Method, 5}, {"(*MethodSet).Len", Method, 5}, {"(*MethodSet).Lookup", Method, 5}, +<<<<<<< HEAD {"(*MethodSet).Methods", Method, 24}, {"(*MethodSet).String", Method, 5}, {"(*Named).AddMethod", Method, 5}, {"(*Named).Method", Method, 5}, {"(*Named).Methods", Method, 24}, +======= + {"(*MethodSet).String", Method, 5}, + {"(*Named).AddMethod", Method, 5}, + {"(*Named).Method", Method, 5}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Named).NumMethods", Method, 5}, {"(*Named).Obj", Method, 5}, {"(*Named).Origin", Method, 18}, @@ -6188,7 +6279,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).String", Method, 5}, {"(*Pointer).Underlying", Method, 5}, {"(*Scope).Child", Method, 5}, +<<<<<<< HEAD {"(*Scope).Children", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Scope).Contains", Method, 5}, {"(*Scope).End", Method, 5}, {"(*Scope).Innermost", Method, 5}, @@ -6224,7 +6318,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*StdSizes).Offsetsof", Method, 5}, {"(*StdSizes).Sizeof", Method, 5}, {"(*Struct).Field", Method, 5}, +<<<<<<< HEAD {"(*Struct).Fields", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Struct).NumFields", Method, 5}, {"(*Struct).String", Method, 5}, {"(*Struct).Tag", Method, 5}, @@ -6236,10 +6333,15 @@ var PackageSymbols = map[string][]Symbol{ {"(*Tuple).Len", Method, 5}, {"(*Tuple).String", Method, 5}, {"(*Tuple).Underlying", Method, 5}, +<<<<<<< HEAD {"(*Tuple).Variables", Method, 24}, {"(*TypeList).At", Method, 18}, {"(*TypeList).Len", Method, 18}, {"(*TypeList).Types", Method, 24}, +======= + {"(*TypeList).At", Method, 18}, + {"(*TypeList).Len", Method, 18}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*TypeName).Exported", Method, 5}, {"(*TypeName).Id", Method, 5}, {"(*TypeName).IsAlias", Method, 9}, @@ -6257,11 +6359,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeParam).Underlying", Method, 18}, {"(*TypeParamList).At", Method, 18}, {"(*TypeParamList).Len", Method, 18}, +<<<<<<< HEAD {"(*TypeParamList).TypeParams", Method, 24}, {"(*Union).Len", Method, 18}, {"(*Union).String", Method, 18}, {"(*Union).Term", Method, 18}, {"(*Union).Terms", Method, 24}, +======= + {"(*Union).Len", Method, 18}, + {"(*Union).String", Method, 18}, + {"(*Union).Term", Method, 18}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Union).Underlying", Method, 18}, {"(*Var).Anonymous", Method, 5}, {"(*Var).Embedded", Method, 11}, @@ -6532,12 +6640,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*Hash).WriteByte", Method, 14}, {"(*Hash).WriteString", Method, 14}, {"Bytes", Func, 19}, +<<<<<<< HEAD {"Comparable", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Hash", Type, 14}, {"MakeSeed", Func, 14}, {"Seed", Type, 14}, {"String", Func, 19}, +<<<<<<< HEAD {"WriteComparable", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, "html": { {"EscapeString", Func, 0}, @@ -7224,7 +7338,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*JSONHandler).WithGroup", Method, 21}, {"(*Level).UnmarshalJSON", Method, 21}, {"(*Level).UnmarshalText", Method, 21}, +<<<<<<< HEAD {"(*LevelVar).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*LevelVar).Level", Method, 21}, {"(*LevelVar).MarshalText", Method, 21}, {"(*LevelVar).Set", Method, 21}, @@ -7253,7 +7370,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Attr).Equal", Method, 21}, {"(Attr).String", Method, 21}, {"(Kind).String", Method, 21}, +<<<<<<< HEAD {"(Level).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Level).Level", Method, 21}, {"(Level).MarshalJSON", Method, 21}, {"(Level).MarshalText", Method, 21}, @@ -7284,7 +7404,10 @@ var PackageSymbols = map[string][]Symbol{ {"Debug", Func, 21}, {"DebugContext", Func, 21}, {"Default", Func, 21}, +<<<<<<< HEAD {"DiscardHandler", Var, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Duration", Func, 21}, {"DurationValue", Func, 21}, {"Error", Func, 21}, @@ -7520,7 +7643,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Float).Acc", Method, 5}, {"(*Float).Add", Method, 5}, {"(*Float).Append", Method, 5}, +<<<<<<< HEAD {"(*Float).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Float).Cmp", Method, 5}, {"(*Float).Copy", Method, 5}, {"(*Float).Float32", Method, 5}, @@ -7567,7 +7693,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).And", Method, 0}, {"(*Int).AndNot", Method, 0}, {"(*Int).Append", Method, 6}, +<<<<<<< HEAD {"(*Int).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Int).Binomial", Method, 0}, {"(*Int).Bit", Method, 0}, {"(*Int).BitLen", Method, 0}, @@ -7624,7 +7753,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).Xor", Method, 0}, {"(*Rat).Abs", Method, 0}, {"(*Rat).Add", Method, 0}, +<<<<<<< HEAD {"(*Rat).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Rat).Cmp", Method, 0}, {"(*Rat).Denom", Method, 0}, {"(*Rat).Float32", Method, 4}, @@ -7807,13 +7939,19 @@ var PackageSymbols = map[string][]Symbol{ {"Zipf", Type, 0}, }, "math/rand/v2": { +<<<<<<< HEAD {"(*ChaCha8).AppendBinary", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*ChaCha8).MarshalBinary", Method, 22}, {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, +<<<<<<< HEAD {"(*PCG).AppendBinary", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*PCG).MarshalBinary", Method, 22}, {"(*PCG).Seed", Method, 22}, {"(*PCG).Uint64", Method, 22}, @@ -8081,7 +8219,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SyscallConn", Method, 10}, {"(Flags).String", Method, 0}, {"(HardwareAddr).String", Method, 0}, +<<<<<<< HEAD {"(IP).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(IP).DefaultMask", Method, 0}, {"(IP).Equal", Method, 0}, {"(IP).IsGlobalUnicast", Method, 0}, @@ -8282,9 +8423,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*MaxBytesError).Error", Method, 19}, {"(*ProtocolError).Error", Method, 0}, {"(*ProtocolError).Is", Method, 21}, +<<<<<<< HEAD {"(*Protocols).SetHTTP1", Method, 24}, {"(*Protocols).SetHTTP2", Method, 24}, {"(*Protocols).SetUnencryptedHTTP2", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Request).AddCookie", Method, 0}, {"(*Request).BasicAuth", Method, 4}, {"(*Request).Clone", Method, 13}, @@ -8344,10 +8488,13 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14}, {"(Header).Write", Method, 0}, {"(Header).WriteSubset", Method, 0}, +<<<<<<< HEAD {"(Protocols).HTTP1", Method, 24}, {"(Protocols).HTTP2", Method, 24}, {"(Protocols).String", Method, 24}, {"(Protocols).UnencryptedHTTP2", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"AllowQuerySemicolons", Func, 17}, {"CanonicalHeaderKey", Func, 0}, {"Client", Type, 0}, @@ -8410,6 +8557,7 @@ var PackageSymbols = map[string][]Symbol{ {"FileSystem", Type, 0}, {"Flusher", Type, 0}, {"Get", Func, 0}, +<<<<<<< HEAD {"HTTP2Config", Type, 24}, {"HTTP2Config.CountError", Field, 24}, {"HTTP2Config.MaxConcurrentStreams", Field, 24}, @@ -8422,6 +8570,8 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PingTimeout", Field, 24}, {"HTTP2Config.SendPingTimeout", Field, 24}, {"HTTP2Config.WriteByteTimeout", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Handle", Func, 0}, {"HandleFunc", Func, 0}, {"Handler", Type, 0}, @@ -8462,7 +8612,10 @@ var PackageSymbols = map[string][]Symbol{ {"PostForm", Func, 0}, {"ProtocolError", Type, 0}, {"ProtocolError.ErrorString", Field, 0}, +<<<<<<< HEAD {"Protocols", Type, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"ProxyFromEnvironment", Func, 0}, {"ProxyURL", Func, 0}, {"PushOptions", Type, 8}, @@ -8532,11 +8685,17 @@ var PackageSymbols = map[string][]Symbol{ {"Server.ConnState", Field, 3}, {"Server.DisableGeneralOptionsHandler", Field, 20}, {"Server.ErrorLog", Field, 3}, +<<<<<<< HEAD {"Server.HTTP2", Field, 24}, {"Server.Handler", Field, 0}, {"Server.IdleTimeout", Field, 8}, {"Server.MaxHeaderBytes", Field, 0}, {"Server.Protocols", Field, 24}, +======= + {"Server.Handler", Field, 0}, + {"Server.IdleTimeout", Field, 8}, + {"Server.MaxHeaderBytes", Field, 0}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Server.ReadHeaderTimeout", Field, 8}, {"Server.ReadTimeout", Field, 0}, {"Server.TLSConfig", Field, 0}, @@ -8626,14 +8785,20 @@ var PackageSymbols = map[string][]Symbol{ {"Transport.ExpectContinueTimeout", Field, 6}, {"Transport.ForceAttemptHTTP2", Field, 13}, {"Transport.GetProxyConnectHeader", Field, 16}, +<<<<<<< HEAD {"Transport.HTTP2", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Transport.IdleConnTimeout", Field, 7}, {"Transport.MaxConnsPerHost", Field, 11}, {"Transport.MaxIdleConns", Field, 7}, {"Transport.MaxIdleConnsPerHost", Field, 0}, {"Transport.MaxResponseHeaderBytes", Field, 7}, {"Transport.OnProxyConnectResponse", Field, 20}, +<<<<<<< HEAD {"Transport.Protocols", Field, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Transport.Proxy", Field, 0}, {"Transport.ProxyConnectHeader", Field, 8}, {"Transport.ReadBufferSize", Field, 13}, @@ -8821,8 +8986,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*AddrPort).UnmarshalText", Method, 18}, {"(*Prefix).UnmarshalBinary", Method, 18}, {"(*Prefix).UnmarshalText", Method, 18}, +<<<<<<< HEAD {"(Addr).AppendBinary", Method, 24}, {"(Addr).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Addr).AppendTo", Method, 18}, {"(Addr).As16", Method, 18}, {"(Addr).As4", Method, 18}, @@ -8853,8 +9021,11 @@ var PackageSymbols = map[string][]Symbol{ {"(Addr).WithZone", Method, 18}, {"(Addr).Zone", Method, 18}, {"(AddrPort).Addr", Method, 18}, +<<<<<<< HEAD {"(AddrPort).AppendBinary", Method, 24}, {"(AddrPort).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(AddrPort).AppendTo", Method, 18}, {"(AddrPort).Compare", Method, 22}, {"(AddrPort).IsValid", Method, 18}, @@ -8863,8 +9034,11 @@ var PackageSymbols = map[string][]Symbol{ {"(AddrPort).Port", Method, 18}, {"(AddrPort).String", Method, 18}, {"(Prefix).Addr", Method, 18}, +<<<<<<< HEAD {"(Prefix).AppendBinary", Method, 24}, {"(Prefix).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Prefix).AppendTo", Method, 18}, {"(Prefix).Bits", Method, 18}, {"(Prefix).Contains", Method, 18}, @@ -9049,7 +9223,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Error).Temporary", Method, 6}, {"(*Error).Timeout", Method, 6}, {"(*Error).Unwrap", Method, 13}, +<<<<<<< HEAD {"(*URL).AppendBinary", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*URL).EscapedFragment", Method, 15}, {"(*URL).EscapedPath", Method, 5}, {"(*URL).Hostname", Method, 8}, @@ -9149,6 +9326,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, +<<<<<<< HEAD {"(*Root).Close", Method, 24}, {"(*Root).Create", Method, 24}, {"(*Root).FS", Method, 24}, @@ -9160,6 +9338,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Root).OpenRoot", Method, 24}, {"(*Root).Remove", Method, 24}, {"(*Root).Stat", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*SyscallError).Error", Method, 0}, {"(*SyscallError).Timeout", Method, 10}, {"(*SyscallError).Unwrap", Method, 13}, @@ -9253,8 +9433,11 @@ var PackageSymbols = map[string][]Symbol{ {"O_WRONLY", Const, 0}, {"Open", Func, 0}, {"OpenFile", Func, 0}, +<<<<<<< HEAD {"OpenInRoot", Func, 24}, {"OpenRoot", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"PathError", Type, 0}, {"PathError.Err", Field, 0}, {"PathError.Op", Field, 0}, @@ -9276,7 +9459,10 @@ var PackageSymbols = map[string][]Symbol{ {"Remove", Func, 0}, {"RemoveAll", Func, 0}, {"Rename", Func, 0}, +<<<<<<< HEAD {"Root", Type, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"SEEK_CUR", Const, 0}, {"SEEK_END", Const, 0}, {"SEEK_SET", Const, 0}, @@ -9618,7 +9804,10 @@ var PackageSymbols = map[string][]Symbol{ {"Zero", Func, 0}, }, "regexp": { +<<<<<<< HEAD {"(*Regexp).AppendText", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Regexp).Copy", Method, 6}, {"(*Regexp).Expand", Method, 0}, {"(*Regexp).ExpandString", Method, 0}, @@ -9799,8 +9988,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*StackRecord).Stack", Method, 0}, {"(*TypeAssertionError).Error", Method, 0}, {"(*TypeAssertionError).RuntimeError", Method, 0}, +<<<<<<< HEAD {"(Cleanup).Stop", Method, 24}, {"AddCleanup", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"BlockProfile", Func, 1}, {"BlockProfileRecord", Type, 1}, {"BlockProfileRecord.Count", Field, 1}, @@ -9811,7 +10003,10 @@ var PackageSymbols = map[string][]Symbol{ {"Caller", Func, 0}, {"Callers", Func, 0}, {"CallersFrames", Func, 7}, +<<<<<<< HEAD {"Cleanup", Type, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Compiler", Const, 0}, {"Error", Type, 0}, {"Frame", Type, 7}, @@ -10174,8 +10369,11 @@ var PackageSymbols = map[string][]Symbol{ {"EqualFold", Func, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, +<<<<<<< HEAD {"FieldsFuncSeq", Func, 24}, {"FieldsSeq", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -10188,7 +10386,10 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, +<<<<<<< HEAD {"Lines", Func, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Map", Func, 0}, {"NewReader", Func, 0}, {"NewReplacer", Func, 0}, @@ -10200,9 +10401,13 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, +<<<<<<< HEAD {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, {"SplitSeq", Func, 24}, +======= + {"SplitN", Func, 0}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -16618,9 +16823,13 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0}, }, "testing": { +<<<<<<< HEAD {"(*B).Chdir", Method, 24}, {"(*B).Cleanup", Method, 14}, {"(*B).Context", Method, 24}, +======= + {"(*B).Cleanup", Method, 14}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*B).Elapsed", Method, 20}, {"(*B).Error", Method, 0}, {"(*B).Errorf", Method, 0}, @@ -16632,7 +16841,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Helper", Method, 9}, {"(*B).Log", Method, 0}, {"(*B).Logf", Method, 0}, +<<<<<<< HEAD {"(*B).Loop", Method, 24}, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*B).Name", Method, 8}, {"(*B).ReportAllocs", Method, 1}, {"(*B).ReportMetric", Method, 13}, @@ -16650,9 +16862,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0}, {"(*B).TempDir", Method, 15}, {"(*F).Add", Method, 18}, +<<<<<<< HEAD {"(*F).Chdir", Method, 24}, {"(*F).Cleanup", Method, 18}, {"(*F).Context", Method, 24}, +======= + {"(*F).Cleanup", Method, 18}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*F).Error", Method, 18}, {"(*F).Errorf", Method, 18}, {"(*F).Fail", Method, 18}, @@ -16673,9 +16889,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18}, {"(*M).Run", Method, 4}, {"(*PB).Next", Method, 3}, +<<<<<<< HEAD {"(*T).Chdir", Method, 24}, {"(*T).Cleanup", Method, 14}, {"(*T).Context", Method, 24}, +======= + {"(*T).Cleanup", Method, 14}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*T).Deadline", Method, 15}, {"(*T).Error", Method, 0}, {"(*T).Errorf", Method, 0}, @@ -17166,9 +17386,13 @@ var PackageSymbols = map[string][]Symbol{ {"(Time).Add", Method, 0}, {"(Time).AddDate", Method, 0}, {"(Time).After", Method, 0}, +<<<<<<< HEAD {"(Time).AppendBinary", Method, 24}, {"(Time).AppendFormat", Method, 5}, {"(Time).AppendText", Method, 24}, +======= + {"(Time).AppendFormat", Method, 5}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Time).Before", Method, 0}, {"(Time).Clock", Method, 0}, {"(Time).Compare", Method, 20}, @@ -17642,9 +17866,12 @@ var PackageSymbols = map[string][]Symbol{ {"String", Func, 0}, {"StringData", Func, 0}, }, +<<<<<<< HEAD "weak": { {"(Pointer).Value", Method, 24}, {"Make", Func, 24}, {"Pointer", Type, 24}, }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index cdae2b8e81..52424273e0 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -66,3 +66,78 @@ func IsTypeParam(t types.Type) bool { _, ok := types.Unalias(t).(*types.TypeParam) return ok } +<<<<<<< HEAD +======= + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantiation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = types.Unalias(V) + T = types.Unalias(T) + + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = types.NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := types.Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := types.Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index e54accc69a..5a2181d38d 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -11,8 +11,11 @@ import ( // ReceiverNamed returns the named type (if any) associated with the // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. +<<<<<<< HEAD // // The named result may be nil in ill-typed code. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() if ptr, ok := types.Unalias(t).(*types.Pointer); ok { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index a93d51f988..1c03e05d9c 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -82,7 +82,10 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { type NamedOrAlias interface { types.Type Obj() *types.TypeName +<<<<<<< HEAD // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TypeParams is a light shim around t.TypeParams(). diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go new file mode 100644 index 0000000000..179063d484 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import "go/build/constraint" + +// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). +// Otherwise nil. +// +// Deprecate once x/tools is after go1.21. +var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go new file mode 100644 index 0000000000..38011407d5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +import "go/build/constraint" + +func init() { + ConstraintGoVersion = constraint.GoVersion +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index c3e8a4f591..8619965f6a 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -145,13 +145,27 @@ func CheckResponse(res *http.Response) error { } slurp, err := io.ReadAll(res.Body) if err == nil { +<<<<<<< HEAD return CheckResponseWithBody(res, slurp) +======= + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + jerr.Error.Header = res.Header + return jerr.Error + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &Error{ Code: res.StatusCode, Body: string(slurp), Header: res.Header, } +<<<<<<< HEAD } @@ -179,6 +193,8 @@ func CheckResponseWithBody(res *http.Response, body []byte) error { Body: string(body), Header: res.Header, } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // IsNotModified reports whether err is the result of the @@ -218,6 +234,7 @@ var WithDataWrapper = MarshalStyle(true) // WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. var WithoutDataWrapper = MarshalStyle(false) +<<<<<<< HEAD // JSONReader is like JSONBuffer, but returns an io.Reader instead. func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { buf, err := wrap.JSONBuffer(v) @@ -229,6 +246,9 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { // JSONBuffer encodes the body and wraps it if needed. func (wrap MarshalStyle) JSONBuffer(v interface{}) (*bytes.Buffer, error) { +======= +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) buf := new(bytes.Buffer) if wrap { buf.Write([]byte(`{"data": `)) diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 559cab1385..d09915d639 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC. +======= +// Copyright 2024 Google LLC. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -57,13 +61,19 @@ import ( "errors" "fmt" "io" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strconv" "strings" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -87,7 +97,10 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +<<<<<<< HEAD var _ = internallog.New +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const apiId = "iamcredentials:v1" const apiName = "iamcredentials" @@ -118,8 +131,12 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } +<<<<<<< HEAD s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} s.Projects = NewProjectsService(s) +======= + s, err := New(client) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -138,12 +155,21 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } +<<<<<<< HEAD return NewService(context.Background(), option.WithHTTPClient(client)) +======= + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type Service struct { client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -527,7 +553,12 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateaccesstokenrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -543,7 +574,10 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -579,11 +613,17 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -634,7 +674,12 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateidtokenrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -650,7 +695,10 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*htt googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -686,11 +734,17 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -749,11 +803,19 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) ( if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/allowedLocations") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -761,7 +823,10 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -797,11 +862,17 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Do(opts ...googleapi.Ca }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -852,7 +923,12 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signblobrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -868,7 +944,10 @@ func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -904,11 +983,17 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -959,7 +1044,12 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signjwtrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -975,7 +1065,10 @@ func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -1011,10 +1104,16 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } diff --git a/vendor/google.golang.org/api/idtoken/idtoken.go b/vendor/google.golang.org/api/idtoken/idtoken.go index c8bf7c9b1f..057a874b09 100644 --- a/vendor/google.golang.org/api/idtoken/idtoken.go +++ b/vendor/google.golang.org/api/idtoken/idtoken.go @@ -113,7 +113,10 @@ func newTokenSourceNewAuth(ctx context.Context, audience string, ds *internal.Di CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, Client: oauth2.NewClient(ctx, nil), +<<<<<<< HEAD Logger: ds.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 86861e2438..e842b631ab 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -139,7 +139,10 @@ func detectDefaultFromDialSettings(settings *DialSettings) (*auth.Credentials, e CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, +<<<<<<< HEAD Logger: settings.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 8c7435de3e..37f4859dc1 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -200,9 +200,12 @@ func (mi *MediaInfo) UploadType() string { // UploadRequest sets up an HTTP request for media upload. It adds headers // as necessary, and returns a replacement for the body and a function for http.Request.GetBody. func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { +<<<<<<< HEAD if body == nil { body = new(bytes.Buffer) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cleanup = func() {} if mi == nil { return body, nil, cleanup diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index d74fe2a299..9e7d3f044c 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -164,8 +164,11 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // and calls the returned functions after the request returns (see send.go). // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +<<<<<<< HEAD // Upload does not parse the response into the error on a non 200 response; // it is the caller's responsibility to call resp.Body.Close. +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { // There are a couple of cases where it's possible for err and resp to both @@ -258,6 +261,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rCtx, cancel = context.WithTimeout(ctx, rx.ChunkTransferTimeout) } +<<<<<<< HEAD // We close the response's body here, since we definitely will not // return `resp` now. If we close it before the select case above, a // timer may fire and cause us to return a response with a closed body @@ -270,6 +274,8 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err io.Copy(io.Discard, resp.Body) resp.Body.Close() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, err = rx.transferChunk(rCtx) var status int @@ -296,11 +302,21 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rx.attempts++ pause = bo.Pause() +<<<<<<< HEAD +======= + if resp != nil && resp.Body != nil { + resp.Body.Close() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. if statusResumeIncomplete(resp) { +<<<<<<< HEAD +======= + resp.Body.Close() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 1c91f147ab..79da7b37dd 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,7 +9,10 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "io" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "strings" "time" @@ -223,6 +226,7 @@ func DecodeResponse(target interface{}, res *http.Response) error { } return json.NewDecoder(res.Body).Decode(target) } +<<<<<<< HEAD // DecodeResponseBytes decodes the body of res into target and returns bytes read // from the body. If there is no body, target is unchanged. @@ -239,3 +243,5 @@ func DecodeResponseBytes(target interface{}, res *http.Response) ([]byte, error) } return b, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index fad0d7dbf9..d95ca43c60 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,8 @@ package internal // Version is the current tagged release of the library. +<<<<<<< HEAD const Version = "0.219.0" +======= +const Version = "0.210.0" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index e3321ca4a6..4bb1fd4b7d 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -44,6 +44,7 @@ func (w withCredFile) Apply(o *internal.DialSettings) { // WithCredentialsFile returns a ClientOption that authenticates // API calls with the given service account or refresh token JSON // credentials file. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -52,6 +53,8 @@ func (w withCredFile) Apply(o *internal.DialSettings) { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func WithCredentialsFile(filename string) ClientOption { return withCredFile(filename) } @@ -59,6 +62,7 @@ func WithCredentialsFile(filename string) ClientOption { // WithServiceAccountFile returns a ClientOption that uses a Google service // account credentials file to authenticate. // +<<<<<<< HEAD // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google // Cloud Platform, you must validate it before providing it to any Google @@ -67,6 +71,8 @@ func WithCredentialsFile(filename string) ClientOption { // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated: Use WithCredentialsFile instead. func WithServiceAccountFile(filename string) ClientOption { return WithCredentialsFile(filename) @@ -75,6 +81,7 @@ func WithServiceAccountFile(filename string) ClientOption { // WithCredentialsJSON returns a ClientOption that authenticates // API calls with the given service account or refresh token JSON // credentials. +<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -83,6 +90,8 @@ func WithServiceAccountFile(filename string) ClientOption { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func WithCredentialsJSON(p []byte) ClientOption { return withCredentialsJSON(p) } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 992c4c0145..a9813bb6c0 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -93,7 +93,11 @@ "location": "us-west4" } ], +<<<<<<< HEAD "etag": "\"3133343838373034343130353038353234313337\"", +======= + "etag": "\"3134393437363236373436353839383934323639\"", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3224,6 +3228,7 @@ ], "supportsSubscription": true }, +<<<<<<< HEAD "move": { "description": "Moves the source object to the destination object in the same bucket.", "httpMethod": "POST", @@ -3316,6 +3321,8 @@ "https://www.googleapis.com/auth/devstorage.read_write" ] }, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "patch": { "description": "Patches an object's metadata.", "httpMethod": "PATCH", @@ -4364,7 +4371,11 @@ } } }, +<<<<<<< HEAD "revision": "20241206", +======= + "revision": "20241113", +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "rootUrl": "https://storage.googleapis.com/", "schemas": { "AdvanceRelocateBucketOperationRequest": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 89f08a8d98..0a5a3e6bf5 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,8 @@ +<<<<<<< HEAD // Copyright 2025 Google LLC. +======= +// Copyright 2024 Google LLC. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -64,14 +68,20 @@ import ( "errors" "fmt" "io" +<<<<<<< HEAD "log/slog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strconv" "strings" "github.com/googleapis/gax-go/v2" +<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -95,7 +105,10 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +<<<<<<< HEAD var _ = internallog.New +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _ = gax.Version const apiId = "storage:v1" @@ -142,6 +155,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } +<<<<<<< HEAD s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} s.AnywhereCaches = NewAnywhereCachesService(s) s.BucketAccessControls = NewBucketAccessControlsService(s) @@ -155,6 +169,9 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err s.Objects = NewObjectsService(s) s.Operations = NewOperationsService(s) s.Projects = NewProjectsService(s) +======= + s, err := New(client) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -173,12 +190,32 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } +<<<<<<< HEAD return NewService(context.Background(), option.WithHTTPClient(client)) +======= + s := &Service{client: client, BasePath: basePath} + s.AnywhereCaches = NewAnywhereCachesService(s) + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Folders = NewFoldersService(s) + s.ManagedFolders = NewManagedFoldersService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) + s.Projects = NewProjectsService(s) + return s, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type Service struct { client *http.Client +<<<<<<< HEAD logger *slog.Logger +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -2895,11 +2932,19 @@ func (c *AnywhereCachesDisableCall) Header() http.Header { func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -2908,7 +2953,10 @@ func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -2943,11 +2991,17 @@ func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereC }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3008,11 +3062,19 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3021,7 +3083,10 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3056,11 +3121,17 @@ func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3108,7 +3179,12 @@ func (c *AnywhereCachesInsertCall) Header() http.Header { func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3124,7 +3200,10 @@ func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3160,11 +3239,17 @@ func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3237,11 +3322,19 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3249,7 +3342,10 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3284,11 +3380,17 @@ func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCach }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3358,11 +3460,19 @@ func (c *AnywhereCachesPauseCall) Header() http.Header { func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3371,7 +3481,10 @@ func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3406,11 +3519,17 @@ func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCac }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3459,11 +3578,19 @@ func (c *AnywhereCachesResumeCall) Header() http.Header { func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3472,7 +3599,10 @@ func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3507,11 +3637,17 @@ func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCa }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3563,7 +3699,12 @@ func (c *AnywhereCachesUpdateCall) Header() http.Header { func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3580,7 +3721,10 @@ func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3616,11 +3760,17 @@ func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3679,11 +3829,19 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3692,7 +3850,10 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3707,7 +3868,10 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -3777,11 +3941,19 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3790,7 +3962,10 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3826,11 +4001,17 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3885,7 +4066,12 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3901,7 +4087,10 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3937,11 +4126,17 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4006,11 +4201,19 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4018,7 +4221,10 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4054,11 +4260,17 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4118,7 +4330,12 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4135,7 +4352,10 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4171,11 +4391,17 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4235,7 +4461,12 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4252,7 +4483,10 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4288,11 +4522,17 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4361,11 +4601,19 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4373,7 +4621,10 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4388,7 +4639,10 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -4498,11 +4752,19 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4510,7 +4772,10 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4545,11 +4810,17 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4623,11 +4894,19 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4635,7 +4914,10 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4670,11 +4952,17 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4741,11 +5029,19 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4753,7 +5049,10 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4789,11 +5088,17 @@ func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketS }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4924,7 +5229,12 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4937,7 +5247,10 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4972,11 +5285,17 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5083,16 +5402,27 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } req.Header = reqHeaders +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5127,11 +5457,17 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5208,11 +5544,19 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5220,7 +5564,10 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5255,11 +5602,17 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5401,7 +5754,12 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5417,7 +5775,10 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.patch", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5452,11 +5813,17 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.patch", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5505,7 +5872,12 @@ func (c *BucketsRelocateCall) Header() http.Header { func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.relocatebucketrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5521,7 +5893,10 @@ func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5557,11 +5932,17 @@ func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunni }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5628,11 +6009,19 @@ func (c *BucketsRestoreCall) Header() http.Header { func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5640,7 +6029,10 @@ func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.restore", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5675,11 +6067,17 @@ func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.restore", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5734,7 +6132,12 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5750,7 +6153,10 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5785,11 +6191,17 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5857,11 +6269,19 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5869,7 +6289,10 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5905,11 +6328,17 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6051,7 +6480,12 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6067,7 +6501,10 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6102,11 +6539,17 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6150,7 +6593,12 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6163,7 +6611,10 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.channels.stop", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6178,7 +6629,10 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.channels.stop", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -6237,11 +6691,19 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6250,7 +6712,10 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6265,7 +6730,10 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -6336,11 +6804,19 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6349,7 +6825,10 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6385,11 +6864,17 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6444,7 +6929,12 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6460,7 +6950,10 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6496,11 +6989,17 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6581,11 +7080,19 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6593,7 +7100,10 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6629,11 +7139,17 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6693,7 +7209,12 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6710,7 +7231,10 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6746,11 +7270,17 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6810,7 +7340,12 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6827,7 +7362,10 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6863,11 +7401,17 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6932,11 +7476,19 @@ func (c *FoldersDeleteCall) Header() http.Header { func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6945,7 +7497,10 @@ func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6960,7 +7515,10 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -7039,11 +7597,19 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7052,7 +7618,10 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7087,11 +7656,17 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7147,7 +7722,12 @@ func (c *FoldersInsertCall) Header() http.Header { func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.folder) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7163,7 +7743,10 @@ func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7198,11 +7781,17 @@ func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7310,11 +7899,19 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7322,7 +7919,10 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7357,11 +7957,17 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7451,11 +8057,19 @@ func (c *FoldersRenameCall) Header() http.Header { func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7465,7 +8079,10 @@ func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { "sourceFolder": c.sourceFolder, "destinationFolder": c.destinationFolder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.rename", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7501,11 +8118,17 @@ func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.rename", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7579,11 +8202,19 @@ func (c *ManagedFoldersDeleteCall) Header() http.Header { func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7592,7 +8223,10 @@ func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "managedFolder": c.managedFolder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7607,7 +8241,10 @@ func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -7685,11 +8322,19 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7698,7 +8343,10 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "managedFolder": c.managedFolder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7733,11 +8381,17 @@ func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7814,11 +8468,19 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7827,7 +8489,10 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7862,11 +8527,17 @@ func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7914,7 +8585,12 @@ func (c *ManagedFoldersInsertCall) Header() http.Header { func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.managedfolder) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7930,7 +8606,10 @@ func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7965,11 +8644,17 @@ func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFol }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8048,11 +8733,19 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8060,7 +8753,10 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8095,11 +8791,17 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8178,7 +8880,12 @@ func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8195,7 +8902,10 @@ func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8230,11 +8940,17 @@ func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8305,11 +9021,19 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8318,7 +9042,10 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "managedFolder": c.managedFolder, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8354,11 +9081,17 @@ func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8414,11 +9147,19 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8427,7 +9168,10 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "notification": c.notification, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8442,7 +9186,10 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -8510,11 +9257,19 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8523,7 +9278,10 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "notification": c.notification, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8558,11 +9316,17 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8617,7 +9381,12 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.notification) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8633,7 +9402,10 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8668,11 +9440,17 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8737,11 +9515,19 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8749,7 +9535,10 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8784,11 +9573,17 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8860,11 +9655,19 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8874,7 +9677,10 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8889,7 +9695,10 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -8972,11 +9781,19 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8986,7 +9803,10 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err "object": c.object, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9022,11 +9842,17 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9094,7 +9920,12 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9111,7 +9942,10 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9147,11 +9981,17 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9229,11 +10069,19 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9242,7 +10090,10 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9278,11 +10129,17 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9355,7 +10212,12 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9373,7 +10235,10 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "object": c.object, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9409,11 +10274,17 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9486,7 +10357,12 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9504,7 +10380,10 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9540,11 +10419,17 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9593,7 +10478,12 @@ func (c *ObjectsBulkRestoreCall) Header() http.Header { func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bulkrestoreobjectsrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9609,7 +10499,10 @@ func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9645,11 +10538,17 @@ func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongru }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9769,7 +10668,12 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.composerequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9786,7 +10690,10 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.compose", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9821,11 +10728,17 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.compose", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10027,7 +10940,12 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10046,7 +10964,10 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.copy", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10081,11 +11002,17 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.copy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10188,11 +11115,19 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10201,7 +11136,10 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10216,7 +11154,10 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -10361,11 +11302,19 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10374,7 +11323,10 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10425,11 +11377,17 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10507,11 +11465,19 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10520,7 +11486,10 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10555,11 +11524,17 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10791,7 +11766,12 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10802,10 +11782,21 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } +<<<<<<< HEAD newBody, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) defer cleanup() urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, newBody) +======= + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10814,7 +11805,10 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.insert", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c.retry != nil { return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) } @@ -10870,11 +11864,17 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.insert", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11045,11 +12045,19 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11057,7 +12065,10 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11092,11 +12103,17 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11121,6 +12138,7 @@ func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) err } } +<<<<<<< HEAD type ObjectsMoveCall struct { s *Service bucket string @@ -11322,6 +12340,8 @@ func (c *ObjectsMoveCall) Do(opts ...googleapi.CallOption) (*Object, error) { return ret, nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ObjectsPatchCall struct { s *Service bucket string @@ -11472,7 +12492,12 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11489,7 +12514,10 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.patch", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11524,11 +12552,17 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.patch", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11653,11 +12687,19 @@ func (c *ObjectsRestoreCall) Header() http.Header { func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11666,7 +12708,10 @@ func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.restore", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11701,11 +12746,17 @@ func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.restore", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11929,7 +12980,12 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11948,7 +13004,10 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11984,11 +13043,17 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12056,7 +13121,12 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12073,7 +13143,10 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12108,11 +13181,17 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12193,11 +13272,19 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12206,7 +13293,10 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12242,11 +13332,17 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12400,7 +13496,12 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12417,7 +13518,10 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12452,11 +13556,17 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12592,7 +13702,12 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12608,7 +13723,10 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12643,11 +13761,17 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12702,7 +13826,12 @@ func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header { func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.advancerelocatebucketoperationrequest) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12719,7 +13848,10 @@ func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Respo "bucket": c.bucket, "operationId": c.operationId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12734,7 +13866,10 @@ func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) e if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -12785,11 +13920,19 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12798,7 +13941,10 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12813,7 +13959,10 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -12874,11 +14023,19 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12887,7 +14044,10 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12923,11 +14083,17 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13009,11 +14175,19 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13021,7 +14195,10 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13057,11 +14234,17 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunnin }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13137,11 +14320,19 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) +======= + req, err := http.NewRequest("POST", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13149,7 +14340,10 @@ func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13184,11 +14378,17 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13244,11 +14444,19 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) +======= + req, err := http.NewRequest("DELETE", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13257,7 +14465,10 @@ func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13272,7 +14483,10 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "response", internallog.HTTPResponse(res, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -13340,11 +14554,19 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13353,7 +14575,10 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) "projectId": c.projectId, "accessId": c.accessId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13389,11 +14614,17 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13490,11 +14721,19 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13502,7 +14741,10 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13538,11 +14780,17 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13624,7 +14872,12 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) +<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.hmackeymetadata) +======= + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13641,7 +14894,10 @@ func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "request", internallog.HTTPRequest(req, body.Bytes())) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13677,11 +14933,17 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13747,11 +15009,19 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } +<<<<<<< HEAD +======= + var body io.Reader = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") urls += "?" + c.urlParams_.Encode() +<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) +======= + req, err := http.NewRequest("GET", urls, body) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13759,7 +15029,10 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) +<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "request", internallog.HTTPRequest(req, nil)) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13794,10 +15067,16 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi }, } target := &ret +<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "response", internallog.HTTPResponse(res, b)) +======= + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index a354d223d3..db57bc8a31 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -22,6 +22,10 @@ import ( "cloud.google.com/go/auth/grpctransport" "cloud.google.com/go/auth/oauth2adapt" "cloud.google.com/go/compute/metadata" +<<<<<<< HEAD +======= + "go.opencensus.io/plugin/ocgrpc" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "golang.org/x/oauth2" "golang.org/x/time/rate" @@ -235,7 +239,10 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, +<<<<<<< HEAD Logger: ds.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, @@ -249,7 +256,10 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna SkipValidation: skipValidation, }, UniverseDomain: ds.UniverseDomain, +<<<<<<< HEAD Logger: ds.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) return pool, err } @@ -386,6 +396,10 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. +<<<<<<< HEAD +======= + grpcOpts = addOCStatsHandler(grpcOpts, o) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { @@ -395,6 +409,16 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C return dialContext(ctx, endpoint, grpcOpts...) } +<<<<<<< HEAD +======= +func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { if settings.TelemetryDisabled { return opts diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 6b7ea74ba4..480c8fa48b 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -19,6 +19,10 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/oauth2adapt" +<<<<<<< HEAD +======= + "go.opencensus.io/plugin/ochttp" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" @@ -26,6 +30,10 @@ import ( "google.golang.org/api/internal" "google.golang.org/api/internal/cert" "google.golang.org/api/option" +<<<<<<< HEAD +======= + "google.golang.org/api/transport/http/internal/propagation" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -119,7 +127,10 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, +<<<<<<< HEAD Logger: ds.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, @@ -130,7 +141,10 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. SkipValidation: skipValidation, }, UniverseDomain: ds.UniverseDomain, +<<<<<<< HEAD Logger: ds.Logger, +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err @@ -165,7 +179,14 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport +<<<<<<< HEAD trans = addOpenTelemetryTransport(trans, settings) +======= + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, settings) + trans = addOCTransport(trans, settings) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch { case settings.NoAuth: // Do nothing. @@ -306,6 +327,19 @@ func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialS return otelhttp.NewTransport(trans) } +<<<<<<< HEAD +======= +func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // clonedTransport returns the given RoundTripper as a cloned *http.Transport. // It returns nil if the RoundTripper can't be cloned or coerced to // *http.Transport. diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 0000000000..ba7512aa26 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,87 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.8 +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 4a9fce53c4..48aa96659b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,8 +180,11 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +<<<<<<< HEAD // Configuration for which RPCs should be generated in the GAPIC client. SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CommonLanguageSettings) Reset() { @@ -231,6 +234,7 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } +<<<<<<< HEAD func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { if x != nil { return x.SelectiveGapicGeneration @@ -238,6 +242,8 @@ func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGe return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -993,6 +999,7 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +<<<<<<< HEAD // Map of service names to renamed services. Keys are the package relative // service names and values are the name to be used for the service client // and call options. @@ -1003,6 +1010,8 @@ type GoSettings struct { // renamed_services: // Publisher: TopicAdmin RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GoSettings) Reset() { @@ -1044,6 +1053,7 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } +<<<<<<< HEAD func (x *GoSettings) GetRenamedServices() map[string]string { if x != nil { return x.RenamedServices @@ -1051,6 +1061,8 @@ func (x *GoSettings) GetRenamedServices() map[string]string { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1149,6 +1161,7 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +<<<<<<< HEAD // This message is used to configure the generation of a subset of the RPCs in // a service for client libraries. type SelectiveGapicGeneration struct { @@ -1200,6 +1213,8 @@ func (x *SelectiveGapicGeneration) GetMethods() []string { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1213,17 +1228,24 @@ type PythonSettings_ExperimentalFeatures struct { // This feature will be enabled by default 1 month after launching the // feature in preview packages. RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +<<<<<<< HEAD // Enables generation of protobuf code using new types that are more // Pythonic which are included in `protobuf>=5.29.x`. This feature will be // enabled by default 1 month after launching the feature in preview // packages. ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { +<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[14] +======= + mi := &file_google_api_client_proto_msgTypes[13] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1236,7 +1258,11 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[14] +======= + mi := &file_google_api_client_proto_msgTypes[13] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1259,6 +1285,7 @@ func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { return false } +<<<<<<< HEAD func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { if x != nil { return x.ProtobufPythonicTypesEnabled @@ -1266,6 +1293,8 @@ func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() return false } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1294,7 +1323,11 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { +<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[18] +======= + mi := &file_google_api_client_proto_msgTypes[16] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1307,7 +1340,11 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { +<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[18] +======= + mi := &file_google_api_client_proto_msgTypes[16] +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1495,7 +1532,11 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, +<<<<<<< HEAD 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, +======= + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1504,6 +1545,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, +<<<<<<< HEAD 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, @@ -1680,10 +1722,113 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, +======= + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, + 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, + 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, + 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, + 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, + 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, + 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, + 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, + 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, + 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, +<<<<<<< HEAD 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, @@ -1773,6 +1918,149 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -1788,7 +2076,11 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +<<<<<<< HEAD var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +======= +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination @@ -1804,6 +2096,7 @@ var file_google_api_client_proto_goTypes = []interface{}{ (*RubySettings)(nil), // 11: google.api.RubySettings (*GoSettings)(nil), // 12: google.api.GoSettings (*MethodSettings)(nil), // 13: google.api.MethodSettings +<<<<<<< HEAD (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures @@ -1857,6 +2150,57 @@ var file_google_api_client_proto_depIdxs = []int32{ 35, // [35:35] is the sub-list for extension type_name 31, // [31:35] is the sub-list for extension extendee 0, // [0:31] is the sub-list for field type_name +======= + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_api_client_proto_init() } @@ -2009,6 +2353,7 @@ func file_google_api_client_proto_init() { return nil } } +<<<<<<< HEAD file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SelectiveGapicGeneration); i { case 0: @@ -2022,6 +2367,9 @@ func file_google_api_client_proto_init() { } } file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { +======= + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -2033,7 +2381,11 @@ func file_google_api_client_proto_init() { return nil } } +<<<<<<< HEAD file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { +======= + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -2052,7 +2404,11 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, +<<<<<<< HEAD NumMessages: 19, +======= + NumMessages: 17, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index f388426b08..1ea48cd118 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,23 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, +<<<<<<< HEAD 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, +======= + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, +<<<<<<< HEAD 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index 7f6e006cde..5da71ecec8 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -172,6 +172,7 @@ func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1} } +<<<<<<< HEAD // The resource hierarchy level of the timeseries data of a metric. type MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel int32 @@ -229,6 +230,8 @@ func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0, 0} } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Defines a metric type and its schema. Once a metric descriptor is created, // deleting or altering it stops data collection and makes the metric type's // existing data unusable. @@ -576,8 +579,11 @@ type MetricDescriptor_MetricDescriptorMetadata struct { // age are guaranteed to be ingested and available to be read, excluding // data loss due to errors. IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` +<<<<<<< HEAD // The scope of the timeseries data of the metric. TimeSeriesResourceHierarchyLevel []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel `protobuf:"varint,4,rep,packed,name=time_series_resource_hierarchy_level,json=timeSeriesResourceHierarchyLevel,proto3,enum=google.api.MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel" json:"time_series_resource_hierarchy_level,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { @@ -634,6 +640,7 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb return nil } +<<<<<<< HEAD func (x *MetricDescriptor_MetricDescriptorMetadata) GetTimeSeriesResourceHierarchyLevel() []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel { if x != nil { return x.TimeSeriesResourceHierarchyLevel @@ -641,6 +648,8 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetTimeSeriesResourceHierarc return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var File_google_api_metric_proto protoreflect.FileDescriptor var file_google_api_metric_proto_rawDesc = []byte{ @@ -651,7 +660,11 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, +<<<<<<< HEAD 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x09, 0x0a, +======= + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, @@ -686,7 +699,11 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, +<<<<<<< HEAD 0x73, 0x1a, 0x87, 0x04, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, +======= + 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, @@ -699,6 +716,7 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, +<<<<<<< HEAD 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0xa6, 0x01, 0x0a, 0x24, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x5f, @@ -747,6 +765,37 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, + 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, + 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, + 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, + 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, + 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, + 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -761,6 +810,7 @@ func file_google_api_metric_proto_rawDescGZIP() []byte { return file_google_api_metric_proto_rawDescData } +<<<<<<< HEAD var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_google_api_metric_proto_goTypes = []interface{}{ @@ -791,6 +841,36 @@ var file_google_api_metric_proto_depIdxs = []int32{ 10, // [10:10] is the sub-list for extension type_name 10, // [10:10] is the sub-list for extension extendee 0, // [0:10] is the sub-list for field type_name +======= +var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_api_metric_proto_goTypes = []interface{}{ + (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind + (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType + (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor + (*Metric)(nil), // 3: google.api.Metric + (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata + nil, // 5: google.api.Metric.LabelsEntry + (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor + (api.LaunchStage)(0), // 7: google.api.LaunchStage + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration +} +var file_google_api_metric_proto_depIdxs = []int32{ + 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor + 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind + 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType + 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata + 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage + 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry + 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage + 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration + 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_api_metric_proto_init() } @@ -841,7 +921,11 @@ func file_google_api_metric_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_metric_proto_rawDesc, +<<<<<<< HEAD NumEnums: 3, +======= + NumEnums: 2, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NumMessages: 4, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3cd9a5bb8e..fb8540b663 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,12 +80,20 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // +<<<<<<< HEAD // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than // `{"instanceLimit": "100/request"}`, should be returned as, // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of +======= + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -871,6 +879,7 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +<<<<<<< HEAD // The reason of the field-level error. This is a constant value that // identifies the proximate cause of the field-level error. It should // uniquely identify the type of the FieldViolation within the scope of the @@ -881,6 +890,8 @@ type BadRequest_FieldViolation struct { // Provides a localized error message for field-level errors that is safe to // return to the API consumer. LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BadRequest_FieldViolation) Reset() { @@ -929,6 +940,7 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +<<<<<<< HEAD func (x *BadRequest_FieldViolation) GetReason() string { if x != nil { return x.Reason @@ -943,6 +955,8 @@ func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { return nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1051,12 +1065,17 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, +<<<<<<< HEAD 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, +======= + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, +<<<<<<< HEAD 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, @@ -1102,6 +1121,47 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -1142,12 +1202,20 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link +<<<<<<< HEAD 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage 7, // [7:7] is the sub-list for method output_type 7, // [7:7] is the sub-list for method input_type 7, // [7:7] is the sub-list for extension type_name 7, // [7:7] is the sub-list for extension extendee 0, // [0:7] is the sub-list for field type_name +======= + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_rpc_error_details_proto_init() } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 382ad69411..13b0558f27 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,6 +73,20 @@ func unregisterForTesting(name string) { delete(m, name) } +<<<<<<< HEAD +======= +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -95,6 +109,60 @@ func Get(name string) Builder { return nil } +<<<<<<< HEAD +======= +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -362,6 +430,21 @@ type ExitIdler interface { ExitIdle() } +<<<<<<< HEAD +======= +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -374,3 +457,25 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") +<<<<<<< HEAD +======= + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 86d495bb62..cae53b4b64 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto @@ -55,9 +59,17 @@ type LoadBalanceRequest struct { func (x *LoadBalanceRequest) Reset() { *x = LoadBalanceRequest{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LoadBalanceRequest) String() string { @@ -68,7 +80,11 @@ func (*LoadBalanceRequest) ProtoMessage() {} func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,9 +153,17 @@ type InitialLoadBalanceRequest struct { func (x *InitialLoadBalanceRequest) Reset() { *x = InitialLoadBalanceRequest{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InitialLoadBalanceRequest) String() string { @@ -150,7 +174,11 @@ func (*InitialLoadBalanceRequest) ProtoMessage() {} func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -186,9 +214,17 @@ type ClientStatsPerToken struct { func (x *ClientStatsPerToken) Reset() { *x = ClientStatsPerToken{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientStatsPerToken) String() string { @@ -199,7 +235,11 @@ func (*ClientStatsPerToken) ProtoMessage() {} func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -252,9 +292,17 @@ type ClientStats struct { func (x *ClientStats) Reset() { *x = ClientStats{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientStats) String() string { @@ -265,7 +313,11 @@ func (*ClientStats) ProtoMessage() {} func (x *ClientStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -337,9 +389,17 @@ type LoadBalanceResponse struct { func (x *LoadBalanceResponse) Reset() { *x = LoadBalanceResponse{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LoadBalanceResponse) String() string { @@ -350,7 +410,11 @@ func (*LoadBalanceResponse) ProtoMessage() {} func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -428,9 +492,17 @@ type FallbackResponse struct { func (x *FallbackResponse) Reset() { *x = FallbackResponse{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *FallbackResponse) String() string { @@ -441,7 +513,11 @@ func (*FallbackResponse) ProtoMessage() {} func (x *FallbackResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -469,9 +545,17 @@ type InitialLoadBalanceResponse struct { func (x *InitialLoadBalanceResponse) Reset() { *x = InitialLoadBalanceResponse{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InitialLoadBalanceResponse) String() string { @@ -482,7 +566,11 @@ func (*InitialLoadBalanceResponse) ProtoMessage() {} func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -518,9 +606,17 @@ type ServerList struct { func (x *ServerList) Reset() { *x = ServerList{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServerList) String() string { @@ -531,7 +627,11 @@ func (*ServerList) ProtoMessage() {} func (x *ServerList) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -581,9 +681,17 @@ type Server struct { func (x *Server) Reset() { *x = Server{} +<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Server) String() string { @@ -594,7 +702,11 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -799,6 +911,119 @@ func file_grpc_lb_v1_load_balancer_proto_init() { if File_grpc_lb_v1_load_balancer_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*LoadBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*InitialLoadBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ClientStatsPerToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ClientStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*LoadBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*FallbackResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*InitialLoadBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ServerList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 9ff07522d7..3771f8c93e 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,7 +19,11 @@ package grpclb import ( +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" @@ -112,7 +116,11 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, +<<<<<<< HEAD subConnsNext: rand.IntN(len(readySCs)), +======= + subConnsNext: rand.Intn(len(readySCs)), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -147,7 +155,11 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, +<<<<<<< HEAD subConnsNext: rand.IntN(len(readySCs)), +======= + subConnsNext: rand.Intn(len(readySCs)), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stats: stats, } } diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go index 6dede1a40b..85bf145a48 100644 --- a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -22,7 +22,11 @@ package leastrequest import ( "encoding/json" "fmt" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "google.golang.org/grpc/balancer" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index 7d66cb491c..24d5af76eb 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,6 +18,7 @@ // Package internal contains code internal to the pickfirst package. package internal +<<<<<<< HEAD import ( rand "math/rand/v2" "time" @@ -33,3 +34,9 @@ var ( return func() { timer.Stop() } } ) +======= +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea8899818c..3add356c78 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,7 +23,11 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 76fa5fea95..0d359193e4 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,15 +29,22 @@ import ( "encoding/json" "errors" "fmt" +<<<<<<< HEAD "net" "net/netip" "sync" "time" +======= + "sync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" +<<<<<<< HEAD expstats "google.golang.org/grpc/experimental/stats" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -54,6 +61,7 @@ func init() { balancer.Register(pickfirstBuilder{}) } +<<<<<<< HEAD type ( // enableHealthListenerKeyType is a unique key type used in resolver // attributes to indicate whether the health listener usage is enabled. @@ -67,11 +75,14 @@ type ( managedByPickfirstKeyType struct{} ) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. +<<<<<<< HEAD Name = "pick_first_leaf" disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", @@ -125,6 +136,23 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) subConns: resolver.NewAddressMap(), state: connectivity.Connecting, cancelConnectionTimer: func() {}, +======= + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -142,6 +170,7 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } +<<<<<<< HEAD // EnableHealthListener updates the state to configure pickfirst for using a // generic health listener. func EnableHealthListener(state resolver.State) resolver.State { @@ -160,6 +189,8 @@ func IsManagedByPickfirst(addr resolver.Address) bool { return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -177,6 +208,7 @@ type scData struct { subConn balancer.SubConn addr resolver.Address +<<<<<<< HEAD rawConnectivityState connectivity.State // The effective connectivity state based on raw connectivity, health state // and after following sticky TransientFailure behaviour defined in A62. @@ -191,6 +223,16 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, addr: addr, +======= + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -207,14 +249,20 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. +<<<<<<< HEAD logger *internalgrpclog.PrefixLogger cc balancer.ClientConn target string metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil +======= + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. +<<<<<<< HEAD mu sync.Mutex // State reported to the channel based on SubConn states and resolver // updates. @@ -226,6 +274,15 @@ type pickfirstBalancer struct { numTF int cancelConnectionTimer func() healthCheckingEnabled bool +======= + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ResolverError is called by the ClientConn when the name resolver produces @@ -251,7 +308,11 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } +<<<<<<< HEAD b.updateBalancerState(balancer.State{ +======= + b.cc.UpdateState(balancer.State{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -260,16 +321,26 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() +<<<<<<< HEAD b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. +======= + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } +<<<<<<< HEAD b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -292,6 +363,12 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { +<<<<<<< HEAD +======= + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -314,6 +391,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) +<<<<<<< HEAD newAddrs = interleaveAddresses(newAddrs) prevAddr := b.addressList.currentAddress() @@ -325,6 +403,18 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // If the previous ready SubConn exists in new address list, // keep this connection and don't create new SubConns. if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { +======= + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -336,6 +426,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. +<<<<<<< HEAD if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. b.forceUpdateConcludedStateLocked(balancer.State{ @@ -347,6 +438,20 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. b.startFirstPassLocked() +======= + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -361,7 +466,10 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() +<<<<<<< HEAD b.cancelConnectionTimer() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.state = connectivity.Shutdown } @@ -371,6 +479,7 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() +<<<<<<< HEAD if b.state == connectivity.Idle { b.startFirstPassLocked() } @@ -386,6 +495,14 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.requestConnectionLocked() } +======= + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { sd.(*scData).subConn.Shutdown() @@ -407,6 +524,7 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +<<<<<<< HEAD // interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) // as per RFC-8305 section 4. // Whichever address family is first in the list is followed by an address of @@ -471,6 +589,8 @@ func addressFamily(address string) ipAddrFamily { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -499,7 +619,10 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { +<<<<<<< HEAD b.cancelConnectionTimer() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -540,6 +663,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() { } scd := sd.(*scData) +<<<<<<< HEAD switch scd.rawConnectivityState { case connectivity.Idle: scd.subConn.Connect() @@ -596,17 +720,46 @@ func (b *pickfirstBalancer) scheduleNextConnectionLocked() { cancelled = true closeFn() }) +======= + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() +<<<<<<< HEAD oldState := sd.rawConnectivityState sd.rawConnectivityState = newState.ConnectivityState +======= + oldState := sd.state + sd.state = newState.ConnectivityState +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. +<<<<<<< HEAD if !b.isActiveSCData(sd) { return } @@ -623,6 +776,16 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if newState.ConnectivityState == connectivity.Ready { connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) +======= + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -630,6 +793,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } +<<<<<<< HEAD if !b.healthCheckingEnabled { if b.logger.V(2) { b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) @@ -654,6 +818,12 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub }) sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { b.updateSubConnHealthState(sd, scs) +======= + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) return } @@ -664,6 +834,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. +<<<<<<< HEAD // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second // part of the if condition below once the issue is fixed. if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { @@ -682,6 +853,15 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) b.addressList.reset() b.updateBalancerState(balancer.State{ +======= + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -691,18 +871,29 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: +<<<<<<< HEAD // The effective state can be in either IDLE, CONNECTING or // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. if sd.effectiveState != connectivity.TransientFailure { sd.effectiveState = connectivity.Connecting b.updateBalancerState(balancer.State{ +======= + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError +<<<<<<< HEAD sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from @@ -720,6 +911,21 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // End the first pass if we've seen a TRANSIENT_FAILURE from all // SubConns once. b.endFirstPassIfPossibleLocked(newState.ConnectionError) +======= + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return } @@ -730,7 +936,11 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { +<<<<<<< HEAD b.updateBalancerState(balancer.State{ +======= + b.cc.UpdateState(balancer.State{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -744,6 +954,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } +<<<<<<< HEAD // endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the // addresses are tried and their SubConns have reported a failure. func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { @@ -761,18 +972,31 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } b.firstPass = false b.updateBalancerState(balancer.State{ +======= +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. for _, v := range b.subConns.Values() { sd := v.(*scData) +<<<<<<< HEAD if sd.rawConnectivityState == connectivity.Idle { +======= + if sd.state == connectivity.Idle { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sd.subConn.Connect() } } } +<<<<<<< HEAD func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { activeSD, found := b.subConns.Get(sd.addr) return found && activeSD == sd @@ -833,6 +1057,8 @@ func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.St b.cc.UpdateState(newState) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type picker struct { result balancer.PickResult err error @@ -889,6 +1115,18 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } +<<<<<<< HEAD +======= +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (al *addressList) reset() { al.idx = 0 } @@ -911,6 +1149,7 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } +<<<<<<< HEAD // hasNext returns whether incrementing the addressList will result in moving // past the end of the list. If the list has already moved past the end, it // returns false. @@ -921,6 +1160,8 @@ func (al *addressList) hasNext() bool { return al.idx+1 < len(al.addresses) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/vendor/google.golang.org/grpc/balancer/rls/config.go b/vendor/google.golang.org/grpc/balancer/rls/config.go index ff540aa058..c4e59d41b9 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/config.go +++ b/vendor/google.golang.org/grpc/balancer/rls/config.go @@ -143,10 +143,14 @@ type lbConfigJSON struct { // - childPolicyConfigTargetFieldName: // - must be set and non-empty func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +<<<<<<< HEAD if logger.V(2) { logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) } +======= + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfgJSON := &lbConfigJSON{} if err := json.Unmarshal(c, cfgJSON); err != nil { return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) diff --git a/vendor/google.golang.org/grpc/balancer/rls/control_channel.go b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go index f2ad8bc720..06ec3671c3 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/control_channel.go +++ b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go @@ -209,9 +209,13 @@ func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLo Reason: reason, StaleHeaderData: staleHeaders, } +<<<<<<< HEAD if cc.logger.V(2) { cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) } +======= + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) defer cancel() diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go index 6249948ede..9cd88f2ffa 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go @@ -20,15 +20,24 @@ package adaptive import ( +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "time" ) // For overriding in unittests. var ( +<<<<<<< HEAD timeNowFunc = time.Now randFunc = rand.Float64 +======= + timeNowFunc = func() time.Time { return time.Now() } + randFunc = func() float64 { return rand.Float64() } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 80a42d2251..0aa8977286 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,11 @@ package roundrobin import ( +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +64,11 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. +<<<<<<< HEAD next: uint32(rand.IntN(len(scs))), +======= + next: uint32(rand.Intn(len(scs))), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index d7b9dc4666..1cb533f7b4 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -19,23 +19,38 @@ package weightedroundrobin import ( +<<<<<<< HEAD "encoding/json" "fmt" rand "math/rand/v2" +======= + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" "time" "unsafe" "google.golang.org/grpc/balancer" +<<<<<<< HEAD "google.golang.org/grpc/balancer/endpointsharding" "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" +======= + "google.golang.org/grpc/balancer/base" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/balancer/weightedroundrobin/internal" "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/grpclog" +<<<<<<< HEAD "google.golang.org/grpc/internal/grpcsync" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/orca" "google.golang.org/grpc/resolver" @@ -84,6 +99,7 @@ var ( }) ) +<<<<<<< HEAD // endpointSharding which specifies pick first children. var endpointShardingLBConfig serviceconfig.LoadBalancingConfig @@ -94,12 +110,17 @@ func init() { if err != nil { logger.Fatal(err) } +======= +func init() { + balancer.Register(bb{}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &wrrBalancer{ +<<<<<<< HEAD ClientConn: cc, target: bOpts.Target.String(), metricsRecorder: bOpts.MetricsRecorder, @@ -109,6 +130,17 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.child = endpointsharding.NewBalancer(b, bOpts) +======= + cc: cc, + subConns: resolver.NewAddressMap(), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + scMap: make(map[balancer.SubConn]*weightedSubConn), + connectivityState: connectivity.Connecting, + target: bOpts.Target.String(), + metricsRecorder: bOpts.MetricsRecorder, + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.logger = prefixLogger(b) b.logger.Infof("Created") return b @@ -149,6 +181,7 @@ func (bb) Name() string { return Name } +<<<<<<< HEAD // updateEndpointsLocked updates endpoint weight state based off new update, by // starting and clearing any endpoint weights needed. // @@ -222,11 +255,38 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error if b.logger.V(2) { b.logger.Infof("UpdateCCS: %v", ccs) } +======= +// wrrBalancer implements the weighted round robin LB policy. +type wrrBalancer struct { + // The following fields are immutable. + cc balancer.ClientConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + cfg *lbConfig // active config + subConns *resolver.AddressMap // active weightedSubConns mapped by address + scMap map[balancer.SubConn]*weightedSubConn + connectivityState connectivity.State // aggregate state + csEvltr *balancer.ConnectivityStateEvaluator + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + stopPicker func() + locality string +} + +func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + b.logger.Infof("UpdateCCS: %v", ccs) + b.resolverErr = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfg, ok := ccs.BalancerConfig.(*lbConfig) if !ok { return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) } +<<<<<<< HEAD // Note: empty endpoints and duplicate addresses across endpoints won't // explicitly error but will have undefined behavior. b.mu.Lock() @@ -337,6 +397,96 @@ func (b *wrrBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubC func (b *wrrBalancer) ResolverError(err error) { // Will cause inline picker update from endpoint sharding. b.child.ResolverError(err) +======= + b.cfg = cfg + b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) + b.updateAddresses(ccs.ResolverState.Addresses) + + if len(ccs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + + return nil +} + +func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { + addrsSet := resolver.NewAddressMap() + + // Loop through new address list and create subconns for any new addresses. + for _, addr := range addrs { + if _, ok := addrsSet.Get(addr); ok { + // Redundant address; skip. + continue + } + addrsSet.Set(addr, nil) + + var wsc *weightedSubConn + wsci, ok := b.subConns.Get(addr) + if ok { + wsc = wsci.(*weightedSubConn) + } else { + // addr is a new address (not existing in b.subConns). + var sc balancer.SubConn + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sc, state) + }, + }) + if err != nil { + b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) + continue + } + wsc = &weightedSubConn{ + SubConn: sc, + logger: b.logger, + connectivityState: connectivity.Idle, + // Initially, we set load reports to off, because they are not + // running upon initial weightedSubConn creation. + cfg: &lbConfig{EnableOOBLoadReport: false}, + + metricsRecorder: b.metricsRecorder, + target: b.target, + locality: b.locality, + } + b.subConns.Set(addr, wsc) + b.scMap[sc] = wsc + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + // Update config for existing weightedSubConn or send update for first + // time to new one. Ensures an OOB listener is running if needed + // (and stops the existing one if applicable). + wsc.updateConfig(b.cfg) + } + + // Loop through existing subconns and remove ones that are not in addrs. + for _, addr := range b.subConns.Keys() { + if _, ok := addrsSet.Get(addr); ok { + // Existing address also in new address list; skip. + continue + } + // addr was removed by resolver. Remove. + wsci, _ := b.subConns.Get(addr) + wsc := wsci.(*weightedSubConn) + wsc.SubConn.Shutdown() + b.subConns.Delete(addr) + } +} + +func (b *wrrBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.connectivityState = connectivity.TransientFailure + } + if b.connectivityState != connectivity.TransientFailure { + // No need to update the picker since no error is being returned. + return + } + b.regeneratePicker() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -344,6 +494,7 @@ func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub } func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +<<<<<<< HEAD b.mu.Lock() ew := b.scToWeight[sc] // updates from a no longer relevant SubConn update, nothing to do here but @@ -385,12 +536,47 @@ func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.Sub ew.stopORCAListener() } ew.pickedSC = nil +======= + wsc := b.scMap[sc] + if wsc == nil { + b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) + return + } + if b.logger.V(2) { + logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) + } + + cs := state.ConnectivityState + + if cs == connectivity.TransientFailure { + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + if cs == connectivity.Shutdown { + delete(b.scMap, sc) + // The subconn was removed from b.subConns when the address was removed + // in updateAddresses. + } + + oldCS := wsc.updateConnectivityState(cs) + b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || + b.connectivityState == connectivity.TransientFailure { + b.regeneratePicker() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } // Close stops the balancer. It cancels any ongoing scheduler updates and // stops any ORCA listeners. func (b *wrrBalancer) Close() { +<<<<<<< HEAD b.mu.Lock() if b.stopPicker != nil { b.stopPicker.Fire() @@ -411,17 +597,106 @@ func (b *wrrBalancer) ExitIdle() { if ei, ok := b.child.(balancer.ExitIdler); ok { // Should always be ok, as child is endpoint sharding. ei.ExitIdle() } +======= + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + for _, wsc := range b.scMap { + // Ensure any lingering OOB watchers are stopped. + wsc.updateConnectivityState(connectivity.Shutdown) + } +} + +// ExitIdle is ignored; we always connect to all backends. +func (b *wrrBalancer) ExitIdle() {} + +func (b *wrrBalancer) readySubConns() []*weightedSubConn { + var ret []*weightedSubConn + for _, v := range b.subConns.Values() { + wsc := v.(*weightedSubConn) + if wsc.connectivityState == connectivity.Ready { + ret = append(ret, wsc) + } + } + return ret +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.connectivityState is +// TransientFailure. +func (b *wrrBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *wrrBalancer) regeneratePicker() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + + switch b.connectivityState { + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(b.mergeErrors()), + }) + return + case connectivity.Connecting, connectivity.Idle: + // Idle could happen very briefly if all subconns are Idle and we've + // asked them to connect but they haven't reported Connecting yet. + // Report the same as Connecting since this is temporary. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }) + return + case connectivity.Ready: + b.connErr = nil + } + + p := &picker{ + v: rand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + metricsRecorder: b.metricsRecorder, + locality: b.locality, + target: b.target, + } + var ctx context.Context + ctx, b.stopPicker = context.WithCancel(context.Background()) + p.start(ctx) + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.connectivityState, + Picker: p, + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // picker is the WRR policy's picker. It uses live-updating backend weights to // update the scheduler periodically and ensure picks are routed proportional // to those weights. type picker struct { +<<<<<<< HEAD scheduler unsafe.Pointer // *scheduler; accessed atomically v uint32 // incrementing value used by the scheduler; accessed atomically cfg *lbConfig // active config when picker created weightedPickers []pickerWeightedEndpoint // all READY pickers +======= + scheduler unsafe.Pointer // *scheduler; accessed atomically + v uint32 // incrementing value used by the scheduler; accessed atomically + cfg *lbConfig // active config when picker created + subConns []*weightedSubConn // all READY subconns +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The following fields are immutable. target string @@ -429,6 +704,7 @@ type picker struct { metricsRecorder estats.MetricsRecorder } +<<<<<<< HEAD func (p *picker) endpointWeights(recordMetrics bool) []float64 { wp := make([]float64, len(p.weightedPickers)) now := internal.TimeNow() @@ -462,6 +738,16 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } } return pr, nil +======= +func (p *picker) scWeights(recordMetrics bool) []float64 { + ws := make([]float64, len(p.subConns)) + now := internal.TimeNow() + for i, wsc := range p.subConns { + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) + } + + return ws +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (p *picker) inc() uint32 { @@ -473,9 +759,15 @@ func (p *picker) regenerateScheduler() { atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) } +<<<<<<< HEAD func (p *picker) start(stopPicker *grpcsync.Event) { p.regenerateScheduler() if len(p.weightedPickers) == 1 { +======= +func (p *picker) start(ctx context.Context) { + p.regenerateScheduler() + if len(p.subConns) == 1 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // No need to regenerate weights with only one backend. return } @@ -485,7 +777,11 @@ func (p *picker) start(stopPicker *grpcsync.Event) { defer ticker.Stop() for { select { +<<<<<<< HEAD case <-stopPicker.Done(): +======= + case <-ctx.Done(): +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return case <-ticker.C: p.regenerateScheduler() @@ -494,12 +790,38 @@ func (p *picker) start(stopPicker *grpcsync.Event) { }() } +<<<<<<< HEAD // endpointWeight is the weight for an endpoint. It tracks the SubConn that will // be picked for the endpoint, and other parameters relevant to computing the // effective weight. When needed, it also tracks connectivity state, listens for // metrics updates by implementing the orca.OOBListener interface and manages // that listener. type endpointWeight struct { +======= +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + // Read the scheduler atomically. All scheduler operations are threadsafe, + // and if the scheduler is replaced during this usage, we want to use the + // scheduler that was live when the pick started. + sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) + + pickedSC := p.subConns[sched.nextIndex()] + pr := balancer.PickResult{SubConn: pickedSC.SubConn} + if !p.cfg.EnableOOBLoadReport { + pr.Done = func(info balancer.DoneInfo) { + if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { + pickedSC.OnLoadReport(load) + } + } + } + return pr, nil +} + +// weightedSubConn is the wrapper of a subconn that holds the subconn and its +// weight (and other parameters relevant to computing the effective weight). +// When needed, it also tracks connectivity state, listens for metrics updates +// by implementing the orca.OOBListener interface and manages that listener. +type weightedSubConn struct { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The following fields are immutable. balancer.SubConn logger *grpclog.PrefixLogger @@ -511,11 +833,14 @@ type endpointWeight struct { // do not need a mutex. connectivityState connectivity.State stopORCAListener func() +<<<<<<< HEAD // The first SubConn for the endpoint that goes READY when endpoint has no // READY SubConns yet, cleared on that sc disconnecting (i.e. going out of // READY). Represents what pick first will use as it's picked SubConn for // this endpoint. pickedSC balancer.SubConn +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The following fields are accessed asynchronously and are protected by // mu. Note that mu may not be held when calling into the stopORCAListener @@ -529,11 +854,19 @@ type endpointWeight struct { cfg *lbConfig } +<<<<<<< HEAD func (w *endpointWeight) OnLoadReport(load *v3orcapb.OrcaLoadReport) { if w.logger.V(2) { w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) } // Update weights of this endpoint according to the reported load. +======= +func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { + if w.logger.V(2) { + w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) + } + // Update weights of this subchannel according to the reported load +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) utilization := load.ApplicationUtilization if utilization == 0 { utilization = load.CpuUtilization @@ -562,7 +895,11 @@ func (w *endpointWeight) OnLoadReport(load *v3orcapb.OrcaLoadReport) { // updateConfig updates the parameters of the WRR policy and // stops/starts/restarts the ORCA OOB listener. +<<<<<<< HEAD func (w *endpointWeight) updateConfig(cfg *lbConfig) { +======= +func (w *weightedSubConn) updateConfig(cfg *lbConfig) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) w.mu.Lock() oldCfg := w.cfg w.cfg = cfg @@ -575,12 +912,23 @@ func (w *endpointWeight) updateConfig(cfg *lbConfig) { // load reporting disabled, OOBReportingPeriod is always 0.) return } +<<<<<<< HEAD // (Re)start the listener to use the new config's settings for OOB // reporting. w.updateORCAListener(cfg) } func (w *endpointWeight) updateORCAListener(cfg *lbConfig) { +======= + if w.connectivityState == connectivity.Ready { + // (Re)start the listener to use the new config's settings for OOB + // reporting. + w.updateORCAListener(cfg) + } +} + +func (w *weightedSubConn) updateORCAListener(cfg *lbConfig) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w.stopORCAListener != nil { w.stopORCAListener() } @@ -588,6 +936,7 @@ func (w *endpointWeight) updateORCAListener(cfg *lbConfig) { w.stopORCAListener = nil return } +<<<<<<< HEAD if w.pickedSC == nil { // No picked SC for this endpoint yet, nothing to listen on. return } @@ -599,11 +948,63 @@ func (w *endpointWeight) updateORCAListener(cfg *lbConfig) { } // weight returns the current effective weight of the endpoint, taking into +======= + if w.logger.V(2) { + w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, cfg.OOBReportingPeriod) + } + opts := orca.OOBListenerOptions{ReportInterval: time.Duration(cfg.OOBReportingPeriod)} + w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) +} + +func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { + switch cs { + case connectivity.Idle: + // Always reconnect when idle. + w.SubConn.Connect() + case connectivity.Ready: + // If we transition back to READY state, reset nonEmptySince so that we + // apply the blackout period after we start receiving load data. Also + // reset lastUpdated to trigger endpoint weight not yet usable in the + // case endpoint gets asked what weight it is before receiving a new + // load report. Note that we cannot guarantee that we will never receive + // lingering callbacks for backend metric reports from the previous + // connection after the new connection has been established, but they + // should be masked by new backend metric reports from the new + // connection by the time the blackout period ends. + w.mu.Lock() + w.nonEmptySince = time.Time{} + w.lastUpdated = time.Time{} + cfg := w.cfg + w.mu.Unlock() + w.updateORCAListener(cfg) + } + + oldCS := w.connectivityState + + if oldCS == connectivity.TransientFailure && + (cs == connectivity.Connecting || cs == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return oldCS + } + + w.connectivityState = cs + + return oldCS +} + +// weight returns the current effective weight of the subconn, taking into +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // account the parameters. Returns 0 for blacked out or expired data, which // will cause the backend weight to be treated as the mean of the weights of the // other backends. If forScheduler is set to true, this function will emit // metrics through the metrics registry. +<<<<<<< HEAD func (w *endpointWeight) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { +======= +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) w.mu.Lock() defer w.mu.Unlock() @@ -613,7 +1014,11 @@ func (w *endpointWeight) weight(now time.Time, weightExpirationPeriod, blackoutP }() } +<<<<<<< HEAD // The endpoint has not received a load report (i.e. just turned READY with +======= + // The SubConn has not received a load report (i.e. just turned READY with +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // no load report). if w.lastUpdated.Equal(time.Time{}) { endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go index 7d3d6815eb..ecf378d61f 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -26,6 +26,7 @@ type scheduler interface { nextIndex() int } +<<<<<<< HEAD // newScheduler uses scWeights to create a new scheduler for selecting endpoints // in a picker. It will return a round robin implementation if at least // len(scWeights)-1 are zero or there is only a single endpoint, otherwise it @@ -34,6 +35,16 @@ type scheduler interface { func (p *picker) newScheduler(recordMetrics bool) scheduler { epWeights := p.endpointWeights(recordMetrics) n := len(epWeights) +======= +// newScheduler uses scWeights to create a new scheduler for selecting subconns +// in a picker. It will return a round robin implementation if at least +// len(scWeights)-1 are zero or there is only a single subconn, otherwise it +// will return an Earliest Deadline First (EDF) scheduler implementation that +// selects the subchannels according to their weights. +func (p *picker) newScheduler(recordMetrics bool) scheduler { + scWeights := p.scWeights(recordMetrics) + n := len(scWeights) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n == 0 { return nil } @@ -46,7 +57,11 @@ func (p *picker) newScheduler(recordMetrics bool) scheduler { sum := float64(0) numZero := 0 max := float64(0) +<<<<<<< HEAD for _, w := range epWeights { +======= + for _, w := range scWeights { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sum += w if w > max { max = w @@ -68,7 +83,11 @@ func (p *picker) newScheduler(recordMetrics bool) scheduler { weights := make([]uint16, n) allEqual := true +<<<<<<< HEAD for i, w := range epWeights { +======= + for i, w := range scWeights { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w == 0 { // Backends with weight = 0 use the mean. weights[i] = mean diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go index 258cdd5db2..1d947f6a43 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -56,6 +56,7 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } +<<<<<<< HEAD // SetAddrInfoInEndpoint returns a copy of endpoint in which the Attributes // field is updated with addrInfo. func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolver.Endpoint { @@ -63,6 +64,8 @@ func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolv return endpoint } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of // addr. func GetAddrInfo(addr resolver.Address) AddrInfo { diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index a617f6a63a..c1ca4a39d2 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -99,16 +99,23 @@ func LocalityFromResolverState(state resolver.State) string { // creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { +<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) } +======= + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) +<<<<<<< HEAD endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.stateAggregator.PauseStateUpdates() defer b.stateAggregator.ResumeStateUpdates() @@ -156,7 +163,10 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[name], +<<<<<<< HEAD Endpoints: endpointsSplit[name], +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), }, diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index c2688376ae..b034539fa9 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/status" ) +<<<<<<< HEAD var ( setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) // noOpRegisterHealthListenerFn is used when client side health checking is @@ -43,6 +44,9 @@ var ( return func() {} } ) +======= +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -197,7 +201,10 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, +<<<<<<< HEAD healthData: newHealthData(connectivity.Idle), +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ac.acbw = acbw return acbw, nil @@ -263,13 +270,17 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { +<<<<<<< HEAD internal.EnforceSubConnEmbedding +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer +<<<<<<< HEAD // Access to healthData is protected by healthMu. healthMu sync.Mutex @@ -296,6 +307,8 @@ func newHealthData(s connectivity.State) *healthData { connectivityState: s, closeHealthProducer: func() {}, } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // updateState is invoked by grpc to push a subConn state update to the @@ -315,6 +328,7 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } +<<<<<<< HEAD // Invalidate the health listener by updating the healthData. acbw.healthMu.Lock() // A race may occur if a health listener is registered soon after the @@ -333,6 +347,8 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve acbw.healthData = newHealthData(scs.ConnectivityState) acbw.healthMu.Unlock() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) acbw.stateListener(scs) }) } @@ -427,6 +443,7 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } +<<<<<<< HEAD // healthProducerRegisterFn is a type alias for the health producer's function // for registering listeners. @@ -513,3 +530,5 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub hd.closeHealthProducer = registerFn(ctx, listenerWrapper) }) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 21dd72969a..01fc013464 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,9 +278,17 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcLogEntry) String() string { @@ -287,7 +299,11 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -438,9 +454,17 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientHeader) String() string { @@ -451,7 +475,11 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -505,9 +533,17 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServerHeader) String() string { @@ -518,7 +554,11 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -559,9 +599,17 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Trailer) String() string { @@ -572,7 +620,11 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -630,9 +682,17 @@ type Message struct { func (x *Message) Reset() { *x = Message{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Message) String() string { @@ -643,7 +703,11 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -703,9 +767,17 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Metadata) String() string { @@ -716,7 +788,11 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -750,9 +826,17 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MetadataEntry) String() string { @@ -763,7 +847,11 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -806,9 +894,17 @@ type Address struct { func (x *Address) Reset() { *x = Address{} +<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Address) String() string { @@ -819,7 +915,11 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1041,6 +1141,107 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4f57b55434..4c9ae4fb1b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -775,7 +775,14 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } +<<<<<<< HEAD balCfg := cc.sc.lbConfig +======= + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bw := cc.balancerWrapper cc.mu.Unlock() @@ -1371,7 +1378,11 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz +<<<<<<< HEAD newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) +======= + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1445,7 +1456,11 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } +<<<<<<< HEAD healthCheckFunc := internal.HealthCheckFunc +======= + healthCheckFunc := ac.cc.dopts.healthCheckFunc +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1477,7 +1492,11 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { +<<<<<<< HEAD err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) +======= + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 959c2f99d4..e3c3dd529a 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,11 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } +<<<<<<< HEAD return mem.BufferSlice{mem.SliceBuffer(data)}, nil +======= + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 40e42b6ae5..2403b6e775 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/gcp/altscontext.proto @@ -60,9 +64,17 @@ type AltsContext struct { func (x *AltsContext) Reset() { *x = AltsContext{} +<<<<<<< HEAD mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AltsContext) String() string { @@ -73,7 +85,11 @@ func (*AltsContext) ProtoMessage() {} func (x *AltsContext) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -222,6 +238,23 @@ func file_grpc_gcp_altscontext_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*AltsContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 2993bbfab1..f515fa0f5e 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/gcp/handshaker.proto @@ -154,9 +158,17 @@ type Endpoint struct { func (x *Endpoint) Reset() { *x = Endpoint{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Endpoint) String() string { @@ -167,7 +179,11 @@ func (*Endpoint) ProtoMessage() {} func (x *Endpoint) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -219,9 +235,17 @@ type Identity struct { func (x *Identity) Reset() { *x = Identity{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Identity) String() string { @@ -232,7 +256,11 @@ func (*Identity) ProtoMessage() {} func (x *Identity) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -336,9 +364,17 @@ type StartClientHandshakeReq struct { func (x *StartClientHandshakeReq) Reset() { *x = StartClientHandshakeReq{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartClientHandshakeReq) String() string { @@ -349,7 +385,11 @@ func (*StartClientHandshakeReq) ProtoMessage() {} func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,9 +501,17 @@ type ServerHandshakeParameters struct { func (x *ServerHandshakeParameters) Reset() { *x = ServerHandshakeParameters{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServerHandshakeParameters) String() string { @@ -474,7 +522,11 @@ func (*ServerHandshakeParameters) ProtoMessage() {} func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -541,9 +593,17 @@ type StartServerHandshakeReq struct { func (x *StartServerHandshakeReq) Reset() { *x = StartServerHandshakeReq{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartServerHandshakeReq) String() string { @@ -554,7 +614,11 @@ func (*StartServerHandshakeReq) ProtoMessage() {} func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -635,9 +699,17 @@ type NextHandshakeMessageReq struct { func (x *NextHandshakeMessageReq) Reset() { *x = NextHandshakeMessageReq{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *NextHandshakeMessageReq) String() string { @@ -648,7 +720,11 @@ func (*NextHandshakeMessageReq) ProtoMessage() {} func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -692,9 +768,17 @@ type HandshakerReq struct { func (x *HandshakerReq) Reset() { *x = HandshakerReq{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerReq) String() string { @@ -705,7 +789,11 @@ func (*HandshakerReq) ProtoMessage() {} func (x *HandshakerReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -802,9 +890,17 @@ type HandshakerResult struct { func (x *HandshakerResult) Reset() { *x = HandshakerResult{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerResult) String() string { @@ -815,7 +911,11 @@ func (*HandshakerResult) ProtoMessage() {} func (x *HandshakerResult) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -899,9 +999,17 @@ type HandshakerStatus struct { func (x *HandshakerStatus) Reset() { *x = HandshakerStatus{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerStatus) String() string { @@ -912,7 +1020,11 @@ func (*HandshakerStatus) ProtoMessage() {} func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -965,9 +1077,17 @@ type HandshakerResp struct { func (x *HandshakerResp) Reset() { *x = HandshakerResp{} +<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerResp) String() string { @@ -978,7 +1098,11 @@ func (*HandshakerResp) ProtoMessage() {} func (x *HandshakerResp) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1293,6 +1417,131 @@ func file_grpc_gcp_handshaker_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Endpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*StartClientHandshakeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ServerHandshakeParameters); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*StartServerHandshakeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*NextHandshakeMessageReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*HandshakerResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index a8d5c4857b..1547dce067 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto @@ -102,9 +106,17 @@ type RpcProtocolVersions struct { func (x *RpcProtocolVersions) Reset() { *x = RpcProtocolVersions{} +<<<<<<< HEAD mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RpcProtocolVersions) String() string { @@ -115,7 +127,11 @@ func (*RpcProtocolVersions) ProtoMessage() {} func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -156,9 +172,17 @@ type RpcProtocolVersions_Version struct { func (x *RpcProtocolVersions_Version) Reset() { *x = RpcProtocolVersions_Version{} +<<<<<<< HEAD mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RpcProtocolVersions_Version) String() string { @@ -169,7 +193,11 @@ func (*RpcProtocolVersions_Version) ProtoMessage() {} func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -269,6 +297,35 @@ func file_grpc_gcp_transport_security_common_proto_init() { if File_grpc_gcp_transport_security_common_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RpcProtocolVersions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*RpcProtocolVersions_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index 5a9c9461f0..572cb8c4de 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -22,6 +22,10 @@ package google import ( "context" "fmt" +<<<<<<< HEAD +======= + "time" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" @@ -30,7 +34,11 @@ import ( "google.golang.org/grpc/internal" ) +<<<<<<< HEAD const defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +======= +const tokenRequestTimeout = 30 * time.Second +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var logger = grpclog.Component("credentials") @@ -38,9 +46,12 @@ var logger = grpclog.Component("credentials") type DefaultCredentialsOptions struct { // PerRPCCreds is a per RPC credentials that is passed to a bundle. PerRPCCreds credentials.PerRPCCredentials +<<<<<<< HEAD // ALTSPerRPCCreds is a per RPC credentials that, if specified, will // supercede PerRPCCreds above for and only for ALTS connections. ALTSPerRPCCreds credentials.PerRPCCredentials +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewDefaultCredentialsWithOptions returns a credentials bundle that is @@ -49,21 +60,31 @@ type DefaultCredentialsOptions struct { // This API is experimental. func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { if opts.PerRPCCreds == nil { +<<<<<<< HEAD var err error // If the ADC ends up being Compute Engine Credentials, this context // won't be used. Otherwise, the context dictates all the subsequent // token requests via HTTP. So we cannot have any deadline or timeout. opts.PerRPCCreds, err = newADC(context.TODO()) +======= + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + var err error + opts.PerRPCCreds, err = newADC(ctx) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } } +<<<<<<< HEAD if opts.ALTSPerRPCCreds != nil { opts.PerRPCCreds = &dualPerRPCCreds{ perRPCCreds: opts.PerRPCCreds, altsPerRPCCreds: opts.ALTSPerRPCCreds, } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { @@ -122,7 +143,11 @@ var ( return alts.NewClientCreds(alts.DefaultClientOptions()) } newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { +<<<<<<< HEAD return oauth.NewApplicationDefault(ctx, defaultCloudPlatformScope) +======= + return oauth.NewApplicationDefault(ctx) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ) @@ -152,6 +177,7 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { return newCreds, nil } +<<<<<<< HEAD // dualPerRPCCreds implements credentials.PerRPCCredentials by embedding the // fallback PerRPCCredentials and the ALTS one. It pickes one of them based on @@ -176,3 +202,5 @@ func (d *dualPerRPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) func (d *dualPerRPCCreds) RequireTransportSecurity() bool { return d.altsPerRPCCreds.RequireTransportSecurity() || d.perRPCCreds.RequireTransportSecurity() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index bd5fe22b6a..c19d5f0a8d 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,8 +32,11 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +<<<<<<< HEAD const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -130,7 +133,11 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() +<<<<<<< HEAD return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) +======= + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -160,7 +167,11 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() +<<<<<<< HEAD return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) +======= + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f3a045296a..088864d328 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -87,6 +87,10 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool +<<<<<<< HEAD +======= + healthCheckFunc internal.HealthChecker +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -428,11 +432,14 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +<<<<<<< HEAD // Note that gRPC by default performs name resolution on the target passed to // NewClient. To bypass name resolution and cause the target string to be // passed directly to the dialer here instead, use the "passthrough" resolver // by specifying it in the target string, e.g. "passthrough:target". // +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -449,6 +456,13 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } +<<<<<<< HEAD +======= +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,6 +676,19 @@ func WithDisableHealthCheck() DialOption { }) } +<<<<<<< HEAD +======= +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -672,6 +699,10 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, +<<<<<<< HEAD +======= + healthCheckFunc: internal.HealthCheckFunc, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a18..6de1e7ce0c 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,7 +23,10 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" +<<<<<<< HEAD "google.golang.org/grpc/stats" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { @@ -35,7 +38,11 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. +<<<<<<< HEAD var DefaultMetrics = stats.NewMetricSet() +======= +var DefaultMetrics = NewMetrics() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -43,7 +50,11 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. +<<<<<<< HEAD Name string +======= + Name Metric +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -155,16 +166,25 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. +<<<<<<< HEAD var registeredMetrics = make(map[string]bool) +======= +var registeredMetrics = make(map[Metric]bool) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. +<<<<<<< HEAD var metricsRegistry = make(map[string]*MetricDescriptor) +======= +var metricsRegistry = make(map[Metric]*MetricDescriptor) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. +<<<<<<< HEAD func DescriptorForMetric(metricName string) *MetricDescriptor { return metricsRegistry[metricName] } @@ -176,6 +196,19 @@ func registerMetric(metricName string, def bool) { registeredMetrics[metricName] = true if def { DefaultMetrics = DefaultMetrics.Add(metricName) +======= +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -257,8 +290,13 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry +<<<<<<< HEAD registeredMetrics = make(map[string]bool) metricsRegistry = make(map[string]*MetricDescriptor) +======= + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee1423605a..895a8eb9ef 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,7 +19,11 @@ // Package stats contains experimental metrics/stats API's. package stats +<<<<<<< HEAD import "google.golang.org/grpc/stats" +======= +import "maps" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -40,6 +44,7 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } +<<<<<<< HEAD // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. // Metrics will be deleted in a future release. type Metrics = stats.MetricSet @@ -51,4 +56,77 @@ type Metric = string // stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { return stats.NewMetricSet(metrics...) +======= +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index ed90060c3c..eb16dd3f41 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,7 @@ var severityName = []string{ fatalLog: "FATAL", } +<<<<<<< HEAD // sprintf is fmt.Sprintf. // These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. var sprintf = fmt.Sprintf @@ -117,6 +118,8 @@ var sprintln = fmt.Sprintln // This var exists to make it possible to test functions calling os.Exit. var exit = os.Exit +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -127,7 +130,11 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { +<<<<<<< HEAD g.m[severity].Output(2, sevStr+": "+s) +======= + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } // TODO: we can also include the logging component, but that needs more @@ -139,6 +146,7 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +<<<<<<< HEAD func (g *loggerT) printf(severity int, format string, args ...any) { // Note the discard check is duplicated in each print func, rather than in // output, to avoid the expensive Sprint calls. @@ -212,6 +220,57 @@ func (g *loggerT) Fatalln(args ...any) { func (g *loggerT) Fatalf(format string, args ...any) { g.printf(fatalLog, format, args...) exit(1) +======= +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (g *loggerT) V(l int) bool { @@ -226,6 +285,7 @@ type LoggerV2Config struct { FormatJSON bool } +<<<<<<< HEAD // combineLoggers returns a combined logger for both higher & lower severity logs, // or only one if the other is io.Discard. // @@ -243,14 +303,21 @@ func combineLoggers(lower, higher io.Writer) io.Writer { return io.MultiWriter(lower, higher) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { +<<<<<<< HEAD +======= + var m []*log.Logger +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) flag := log.LstdFlags if c.FormatJSON { flag = 0 } +<<<<<<< HEAD warningW = combineLoggers(infoW, warningW) errorW = combineLoggers(errorW, warningW) @@ -263,5 +330,12 @@ func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { log.New(errorW, "", flag), log.New(fatalW, "", flag), } +======= + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 467de16bdb..c8c9d41812 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -99,9 +103,17 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} +<<<<<<< HEAD mi := &file_grpc_health_v1_health_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HealthCheckRequest) String() string { @@ -112,7 +124,11 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -144,9 +160,17 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} +<<<<<<< HEAD mi := &file_grpc_health_v1_health_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HealthCheckResponse) String() string { @@ -157,7 +181,11 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -256,6 +284,35 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*HealthCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*HealthCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b6ae7f2585..e04dedd8ce 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,11 @@ package backoff import ( "context" "errors" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 1e42b6fdc8..889e565646 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,7 +49,11 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. +<<<<<<< HEAD XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) +======= + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 9afeb444d4..1e28cd888d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,10 +53,13 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") +<<<<<<< HEAD // XDSDualstackEndpointsEnabled is true if gRPC should read the // "additional addresses" in the xDS endpoint resource. // TODO: https://github.com/grpc/grpc-go/issues/7866 - Control this using // an env variable when all LB policies handle endpoints. XDSDualstackEndpointsEnabled = false +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go index 362c05fa2a..489e6d59f5 100644 --- a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go +++ b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go @@ -48,6 +48,7 @@ func (p pathValue) Equal(o any) bool { return true } +<<<<<<< HEAD // FromEndpoint returns the hierarchical path of endpoint. func FromEndpoint(endpoint resolver.Endpoint) []string { path, _ := endpoint.Attributes.Value(pathKey).(pathValue) @@ -60,6 +61,8 @@ func SetInEndpoint(endpoint resolver.Endpoint, path []string) resolver.Endpoint return endpoint } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Get returns the hierarchical path of addr. func Get(addr resolver.Address) []string { attrs := addr.BalancerAttributes @@ -122,6 +125,7 @@ func Group(addrs []resolver.Address) map[string][]resolver.Address { } return ret } +<<<<<<< HEAD // GroupEndpoints splits a slice of endpoints into groups based on // the first hierarchy path. The first hierarchy path will be removed from the @@ -169,3 +173,5 @@ func GroupEndpoints(endpoints []resolver.Endpoint) map[string][]resolver.Endpoin } return ret } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c17b98194b..a27c4808ab 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,12 +29,19 @@ import ( ) var ( +<<<<<<< HEAD // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // RegisterClientHealthCheckListener is used to provide a listener for // updates from the client-side health checking service. It returns a // function that can be called to stop the health producer. RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() +======= + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -151,6 +158,7 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) +<<<<<<< HEAD // NewXDSResolverWithClientForTesting creates a new xDS resolver builder // using the provided xDS client instead of creating a new one using the // bootstrap configuration specified by the supported environment variables. @@ -165,6 +173,8 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -271,9 +281,12 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" +<<<<<<< HEAD // EnforceSubConnEmbedding is used to enforce proper SubConn implementation // embedding. type EnforceSubConnEmbedding interface { enforceSubConnEmbedding() } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 22731029f5..9456d69b57 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/lookup/v1/rls.proto @@ -105,9 +109,17 @@ type RouteLookupRequest struct { func (x *RouteLookupRequest) Reset() { *x = RouteLookupRequest{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupRequest) String() string { @@ -118,7 +130,11 @@ func (*RouteLookupRequest) ProtoMessage() {} func (x *RouteLookupRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -187,9 +203,17 @@ type RouteLookupResponse struct { func (x *RouteLookupResponse) Reset() { *x = RouteLookupResponse{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupResponse) String() string { @@ -200,7 +224,11 @@ func (*RouteLookupResponse) ProtoMessage() {} func (x *RouteLookupResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -335,6 +363,35 @@ func file_grpc_lookup_v1_rls_proto_init() { if File_grpc_lookup_v1_rls_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index 73b70c25ea..a689fafdf3 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: +<<<<<<< HEAD // protoc-gen-go v1.35.2 +======= +// protoc-gen-go v1.34.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto @@ -59,9 +63,17 @@ type NameMatcher struct { func (x *NameMatcher) Reset() { *x = NameMatcher{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *NameMatcher) String() string { @@ -72,7 +84,11 @@ func (*NameMatcher) ProtoMessage() {} func (x *NameMatcher) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -129,9 +145,17 @@ type GrpcKeyBuilder struct { func (x *GrpcKeyBuilder) Reset() { *x = GrpcKeyBuilder{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcKeyBuilder) String() string { @@ -142,7 +166,11 @@ func (*GrpcKeyBuilder) ProtoMessage() {} func (x *GrpcKeyBuilder) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -273,9 +301,17 @@ type HttpKeyBuilder struct { func (x *HttpKeyBuilder) Reset() { *x = HttpKeyBuilder{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HttpKeyBuilder) String() string { @@ -286,7 +322,11 @@ func (*HttpKeyBuilder) ProtoMessage() {} func (x *HttpKeyBuilder) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -394,9 +434,17 @@ type RouteLookupConfig struct { func (x *RouteLookupConfig) Reset() { *x = RouteLookupConfig{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupConfig) String() string { @@ -407,7 +455,11 @@ func (*RouteLookupConfig) ProtoMessage() {} func (x *RouteLookupConfig) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -498,9 +550,17 @@ type RouteLookupClusterSpecifier struct { func (x *RouteLookupClusterSpecifier) Reset() { *x = RouteLookupClusterSpecifier{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupClusterSpecifier) String() string { @@ -511,7 +571,11 @@ func (*RouteLookupClusterSpecifier) ProtoMessage() {} func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -548,9 +612,17 @@ type GrpcKeyBuilder_Name struct { func (x *GrpcKeyBuilder_Name) Reset() { *x = GrpcKeyBuilder_Name{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcKeyBuilder_Name) String() string { @@ -561,7 +633,11 @@ func (*GrpcKeyBuilder_Name) ProtoMessage() {} func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -608,9 +684,17 @@ type GrpcKeyBuilder_ExtraKeys struct { func (x *GrpcKeyBuilder_ExtraKeys) Reset() { *x = GrpcKeyBuilder_ExtraKeys{} +<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) +======= + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcKeyBuilder_ExtraKeys) String() string { @@ -621,7 +705,11 @@ func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] +<<<<<<< HEAD if x != nil { +======= + if protoimpl.UnsafeEnabled && x != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -828,6 +916,95 @@ func file_grpc_lookup_v1_rls_config_proto_init() { if File_grpc_lookup_v1_rls_config_proto != nil { return } +<<<<<<< HEAD +======= + if !protoimpl.UnsafeEnabled { + file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*NameMatcher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*HttpKeyBuilder); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*RouteLookupClusterSpecifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95d0..1bab1d432d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,9 +24,14 @@ import ( "context" "encoding/json" "fmt" +<<<<<<< HEAD rand "math/rand/v2" "net" "net/netip" +======= + "math/rand" + "net" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "strconv" "strings" @@ -123,7 +128,11 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. +<<<<<<< HEAD if ipAddr, err := formatIP(host); err == nil { +======= + if ipAddr, ok := formatIP(host); ok { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -261,9 +270,15 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { +<<<<<<< HEAD ip, err := formatIP(a) if err != nil { return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) +======= + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -323,9 +338,15 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { +<<<<<<< HEAD ip, err := formatIP(a) if err != nil { return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) +======= + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -352,6 +373,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } +<<<<<<< HEAD // formatIP returns an error if addr is not a valid textual representation of // an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and @@ -365,6 +387,21 @@ func formatIP(addr string) (string, error) { return addr, nil } return "[" + addr + "]", nil +======= +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // parseTarget takes the user input target string and default port, returns @@ -380,7 +417,11 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } +<<<<<<< HEAD if _, err := netip.ParseAddr(target); err == nil { +======= + if ip := net.ParseIP(target); ip != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -428,7 +469,11 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } +<<<<<<< HEAD return rand.IntN(100)+1 <= *a +======= + return rand.Intn(100)+1 <= *a +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224ec..61516b2ad9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,11 +92,22 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n +<<<<<<< HEAD if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() return 0 } return f.reset() +======= + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 3dea235735..5ae7813909 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,11 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } +<<<<<<< HEAD func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { +======= +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +293,22 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) +<<<<<<< HEAD func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { +======= +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). +<<<<<<< HEAD func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { +======= +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +329,11 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) +<<<<<<< HEAD func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { +======= +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +349,11 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { s.hdrMu.Unlock() } +<<<<<<< HEAD func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { +======= +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +377,11 @@ func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.Bu return nil } +<<<<<<< HEAD func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { +======= +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +409,11 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e return err } +<<<<<<< HEAD func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { +======= +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,6 +436,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req +<<<<<<< HEAD s := &ServerStream{ Stream: &Stream{ id: 0, // irrelevant @@ -420,6 +449,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream }, cancel: cancel, st: ht, +======= + s := &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -473,7 +514,13 @@ func (ht *serverHandlerTransport) runStream() { } } +<<<<<<< HEAD func (ht *serverHandlerTransport) incrMsgRecv() {} +======= +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") @@ -498,5 +545,9 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } +<<<<<<< HEAD return connectionErrorf(true, err, "%s", err.Error()) +======= + return connectionErrorf(true, err, err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f323ab7f45..6cd6a3d9fa 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -123,7 +123,11 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState +<<<<<<< HEAD activeStreams map[uint32]*ClientStream +======= + activeStreams map[uint32]*Stream +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -199,10 +203,17 @@ func isTemporary(err error) bool { return true } +<<<<<<< HEAD // NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { +======= +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -339,7 +350,11 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, +<<<<<<< HEAD activeStreams: make(map[uint32]*ClientStream), +======= + activeStreams: make(map[uint32]*Stream), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -480,6 +495,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } +<<<<<<< HEAD func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ @@ -493,6 +509,19 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt done: make(chan struct{}), headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, +======= +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -508,7 +537,11 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { +<<<<<<< HEAD s.Close(err) +======= + t.CloseStream(s, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, }, windowHandler: func(n int) { @@ -599,6 +632,15 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } +<<<<<<< HEAD +======= + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -734,7 +776,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. +<<<<<<< HEAD func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { +======= +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -759,7 +805,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return } // The stream was unprocessed by the server. +<<<<<<< HEAD s.unprocessed.Store(true) +======= + atomic.StoreUint32(&s.unprocessed, 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -904,7 +954,25 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return s, nil } +<<<<<<< HEAD func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +======= +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1067,7 +1135,11 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. +<<<<<<< HEAD func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { +======= +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reader := data.Reader() if opts.Last { @@ -1096,11 +1168,18 @@ func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, o _ = reader.Close() return err } +<<<<<<< HEAD t.incrMsgSent() return nil } func (t *http2Client) getStream(f http2.Frame) *ClientStream { +======= + return nil +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1110,7 +1189,11 @@ func (t *http2Client) getStream(f http2.Frame) *ClientStream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. +<<<<<<< HEAD func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { +======= +func (t *http2Client) adjustWindow(s *Stream, n uint32) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1119,7 +1202,11 @@ func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. +<<<<<<< HEAD func (t *http2Client) updateWindow(s *ClientStream, n uint32) { +======= +func (t *http2Client) updateWindow(s *Stream, n uint32) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1225,7 +1312,11 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. +<<<<<<< HEAD s.unprocessed.Store(true) +======= + atomic.StoreUint32(&s.unprocessed, 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1366,11 +1457,19 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } +<<<<<<< HEAD streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. stream.unprocessed.Store(true) +======= + streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) streamsToClose = append(streamsToClose, stream) } } @@ -1422,7 +1521,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() +<<<<<<< HEAD s.bytesReceived.Store(true) +======= + atomic.StoreUint32(&s.bytesReceived, 1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1792,6 +1895,7 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } +<<<<<<< HEAD func (t *http2Client) incrMsgSent() { if channelz.IsOn() { t.channelz.SocketMetrics.MessagesSent.Add(1) @@ -1804,6 +1908,16 @@ func (t *http2Client) incrMsgRecv() { t.channelz.SocketMetrics.MessagesReceived.Add(1) t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } +======= +func (t *http2Client) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 997b0a59b5..49fa2d316d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,11 @@ import ( "fmt" "io" "math" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "strconv" @@ -111,7 +115,11 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState +<<<<<<< HEAD activeStreams map[uint32]*ServerStream +======= + activeStreams map[uint32]*Stream +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +264,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, +<<<<<<< HEAD activeStreams: make(map[uint32]*ServerStream), +======= + activeStreams: make(map[uint32]*Stream), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +371,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. +<<<<<<< HEAD func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { +======= +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,6 +401,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() +<<<<<<< HEAD s := &ServerStream{ Stream: &Stream{ id: streamID, @@ -392,6 +409,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade fc: &inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, +======= + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) headerWireLength: int(frame.Header().Length), } var ( @@ -539,6 +563,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) +<<<<<<< HEAD +======= + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } t.mu.Lock() if t.state != reachable { @@ -564,7 +597,11 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ +<<<<<<< HEAD httpStatus: http.StatusMethodNotAllowed, +======= + httpStatus: 405, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -585,7 +622,11 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ +<<<<<<< HEAD httpStatus: http.StatusOK, +======= + httpStatus: 200, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -630,7 +671,11 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. +<<<<<<< HEAD func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { +======= +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -694,7 +739,11 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre } } +<<<<<<< HEAD func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { +======= +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -712,7 +761,11 @@ func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. +<<<<<<< HEAD func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { +======= +func (t *http2Server) adjustWindow(s *Stream, n uint32) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -722,7 +775,11 @@ func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. +<<<<<<< HEAD func (t *http2Server) updateWindow(s *ServerStream, n uint32) { +======= +func (t *http2Server) updateWindow(s *Stream, n uint32) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -959,7 +1016,11 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } +<<<<<<< HEAD func (t *http2Server) streamContextErr(s *ServerStream) error { +======= +func (t *http2Server) streamContextErr(s *Stream) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) select { case <-t.done: return ErrConnClosing @@ -969,7 +1030,11 @@ func (t *http2Server) streamContextErr(s *ServerStream) error { } // WriteHeader sends the header metadata md back to the client. +<<<<<<< HEAD func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { +======= +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1002,7 +1067,11 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } +<<<<<<< HEAD func (t *http2Server) writeHeaderLocked(s *ServerStream) error { +======= +func (t *http2Server) writeHeaderLocked(s *Stream) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1042,7 +1111,11 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. +<<<<<<< HEAD func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { +======= +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1113,11 +1186,19 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). +<<<<<<< HEAD func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.writeHeader(s, nil); err != nil { +======= +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _ = reader.Close() return err } @@ -1143,7 +1224,10 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ _ = reader.Close() return err } +<<<<<<< HEAD t.incrMsgSent() +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -1273,7 +1357,11 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. +<<<<<<< HEAD func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { +======= +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1294,7 +1382,11 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. +<<<<<<< HEAD func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +======= +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1318,7 +1410,11 @@ func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrC } // closeStream clears the footprint of a stream when the stream is not needed any more. +<<<<<<< HEAD func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +======= +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1412,6 +1508,7 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } +<<<<<<< HEAD func (t *http2Server) incrMsgSent() { if channelz.IsOn() { t.channelz.SocketMetrics.MessagesSent.Add(1) @@ -1424,6 +1521,16 @@ func (t *http2Server) incrMsgRecv() { t.channelz.SocketMetrics.MessagesReceived.Add(1) t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } +======= +func (t *http2Server) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +} + +func (t *http2Server) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (t *http2Server) getOutFlowWindow() int64 { @@ -1456,7 +1563,11 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) +<<<<<<< HEAD j := rand.Int64N(2*r) - r +======= + j := rand.Int63n(2*r) - r +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 2859b87755..7f7599b303 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,6 +27,10 @@ import ( "fmt" "io" "net" +<<<<<<< HEAD +======= + "strings" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" "time" @@ -38,6 +42,10 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" +<<<<<<< HEAD +======= + "google.golang.org/grpc/resolver" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -131,7 +139,11 @@ type recvBufferReader struct { err error } +<<<<<<< HEAD func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { +======= +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if r.err != nil { return 0, r.err } @@ -140,9 +152,15 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { +<<<<<<< HEAD n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) +======= + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return n, r.err } @@ -172,12 +190,20 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } +<<<<<<< HEAD func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { +======= +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): +<<<<<<< HEAD return r.readMessageHeaderAdditional(m, header) +======= + return r.readHeaderAdditional(m, header) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -190,7 +216,11 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } +<<<<<<< HEAD func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { +======= +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -211,9 +241,15 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() +<<<<<<< HEAD return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): return r.readMessageHeaderAdditional(m, header) +======= + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -244,7 +280,11 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } +<<<<<<< HEAD func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +======= +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.recv.load() if m.err != nil { if m.buffer != nil { @@ -286,8 +326,19 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 +<<<<<<< HEAD ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream +======= + st ServerTransport // nil for client side Stream + ct ClientTransport // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) recvCompress string sendCompress string buf *recvBuffer @@ -295,10 +346,17 @@ type Stream struct { fc *inFlow wq *writeQuota +<<<<<<< HEAD +======= + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) +<<<<<<< HEAD state streamState // contentSubtype is the content-subtype for requests. @@ -306,6 +364,53 @@ type Stream struct { contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. +======= + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + headerWireLength int // Only set on server side. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *Stream) swapState(st streamState) streamState { @@ -320,12 +425,119 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } +<<<<<<< HEAD // Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { return s.trailer.Copy() +======= +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metadata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Context returns the context of the stream. @@ -333,15 +545,80 @@ func (s *Stream) Context() context.Context { return s.ctx } +<<<<<<< HEAD +======= +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } +<<<<<<< HEAD +======= +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *Stream) write(m recvMsg) { s.buf.put(m) } +<<<<<<< HEAD // ReadMessageHeader reads data into the provided header slice from the stream. // It first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of @@ -351,13 +628,29 @@ func (s *Stream) write(m recvMsg) { // unexpected end of the stream. The method returns any error encountered during // the read process or nil if the header was successfully read. func (s *Stream) ReadMessageHeader(header []byte) (err error) { +======= +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. +func (s *Stream) ReadHeader(header []byte) (err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { +<<<<<<< HEAD n, err := s.trReader.ReadMessageHeader(header) +======= + n, err := s.trReader.ReadHeader(header) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) header = header[n:] if len(header) == 0 { err = nil @@ -373,7 +666,11 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. +<<<<<<< HEAD func (s *Stream) read(n int) (data mem.BufferSlice, err error) { +======= +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -413,8 +710,13 @@ type transportReader struct { er error } +<<<<<<< HEAD func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { n, err := t.reader.ReadMessageHeader(header) +======= +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { t.er = err return 0, err @@ -433,6 +735,20 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } +<<<<<<< HEAD +======= +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -508,9 +824,21 @@ type ConnectOptions struct { BufferPool mem.BufferPool } +<<<<<<< HEAD // WriteOptions provides additional hints and information for message // transmission. type WriteOptions struct { +======= +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Last indicates whether this write is the last piece for // this stream. Last bool @@ -559,8 +887,23 @@ type ClientTransport interface { // It does not block. GracefulClose() +<<<<<<< HEAD // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) +======= + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -580,6 +923,15 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr +<<<<<<< HEAD +======= + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ServerTransport is the common interface for all gRPC server-side transport @@ -589,7 +941,23 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. +<<<<<<< HEAD HandleStreams(context.Context, func(*ServerStream)) +======= + HandleStreams(context.Context, func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -601,6 +969,7 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +<<<<<<< HEAD } type internalServerTransport interface { @@ -609,6 +978,14 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() +======= + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/internal/wrr/random.go b/vendor/google.golang.org/grpc/internal/wrr/random.go index 0913ed6764..f0c4e060c6 100644 --- a/vendor/google.golang.org/grpc/internal/wrr/random.go +++ b/vendor/google.golang.org/grpc/internal/wrr/random.go @@ -19,7 +19,11 @@ package wrr import ( "fmt" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sort" ) @@ -46,19 +50,31 @@ func NewRandom() WRR { return &randomWRR{} } +<<<<<<< HEAD var randInt64n = rand.Int64N +======= +var randInt63n = rand.Int63n +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (rw *randomWRR) Next() (item any) { if len(rw.items) == 0 { return nil } if rw.equalWeights { +<<<<<<< HEAD return rw.items[randInt64n(int64(len(rw.items)))].item +======= + return rw.items[randInt63n(int64(len(rw.items)))].item +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight // Random number in [0, sumOfWeights). +<<<<<<< HEAD randomWeight := randInt64n(sumOfWeights) +======= + randomWeight := randInt63n(sumOfWeights) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Item's accumulated weights are in ascending order, because item's weight >= 0. // Binary search rw.items to find first item whose accumulatedWeight > randomWeight // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go index 780257ec33..1c288cec99 100644 --- a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go @@ -36,11 +36,19 @@ type HeaderMatcher interface { String() string } +<<<<<<< HEAD // valueFromMD retrieves metadata from context. If there are // multiple values, the values are concatenated with "," (comma and no space). // // All header matchers only match against the comma-concatenated string. func valueFromMD(md metadata.MD, key string) (string, bool) { +======= +// mdValuesFromOutgoingCtx retrieves metadata from context. If there are +// multiple values, the values are concatenated with "," (comma and no space). +// +// All header matchers only match against the comma-concatenated string. +func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) vs, ok := md[key] if !ok { return "", false @@ -63,7 +71,11 @@ func NewHeaderExactMatcher(key, exact string, invert bool) *HeaderExactMatcher { // Match returns whether the passed in HTTP Headers match according to the // HeaderExactMatcher. func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hem.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hem.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -90,7 +102,11 @@ func NewHeaderRegexMatcher(key string, re *regexp.Regexp, invert bool) *HeaderRe // Match returns whether the passed in HTTP Headers match according to the // HeaderRegexMatcher. func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hrm.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -117,7 +133,11 @@ func NewHeaderRangeMatcher(key string, start, end int64, invert bool) *HeaderRan // Match returns whether the passed in HTTP Headers match according to the // HeaderRangeMatcher. func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hrm.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -149,7 +169,11 @@ func NewHeaderPresentMatcher(key string, present bool, invert bool) *HeaderPrese // Match returns whether the passed in HTTP Headers match according to the // HeaderPresentMatcher. func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD vs, ok := valueFromMD(md, hpm.key) +======= + vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) present := ok && len(vs) > 0 // TODO: Are we sure we need this len(vs) > 0? return present == hpm.present } @@ -174,7 +198,11 @@ func NewHeaderPrefixMatcher(key string, prefix string, invert bool) *HeaderPrefi // Match returns whether the passed in HTTP Headers match according to the // HeaderPrefixMatcher. func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hpm.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hpm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -201,7 +229,11 @@ func NewHeaderSuffixMatcher(key string, suffix string, invert bool) *HeaderSuffi // Match returns whether the passed in HTTP Headers match according to the // HeaderSuffixMatcher. func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hsm.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -231,7 +263,11 @@ func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderC // Match returns whether the passed in HTTP Headers match according to the // HeaderContainsMatcher. func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hcm.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hcm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -262,7 +298,11 @@ func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderSt // Match returns whether the passed in HTTP Headers match according to the // specified StringMatcher. func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { +<<<<<<< HEAD v, ok := valueFromMD(md, hsm.key) +======= + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index 07bb59cee5..1416a5ff7d 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -20,7 +20,10 @@ import ( "errors" "fmt" "net" +<<<<<<< HEAD "net/netip" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "regexp" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -345,8 +348,12 @@ func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) } func (sim *remoteIPMatcher) match(data *rpcData) bool { +<<<<<<< HEAD ip, _ := netip.ParseAddr(data.peerInfo.Addr.String()) return sim.ipNet.Contains(net.IP(ip.AsSlice())) +======= + return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type localIPMatcher struct { @@ -363,8 +370,12 @@ func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { } func (dim *localIPMatcher) match(data *rpcData) bool { +<<<<<<< HEAD ip, _ := netip.ParseAddr(data.localAddr.String()) return dim.ipNet.Contains(net.IP(ip.AsSlice())) +======= + return dim.ipNet.Contains(net.IP(net.ParseIP(data.localAddr.String()))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // portMatcher matches on whether the destination port of the RPC matches the diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go index 8f376c4003..fb33537791 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -219,9 +219,12 @@ func newRPCData(ctx context.Context) (*rpcData, error) { if !ok { return nil, errors.New("missing method in incoming context") } +<<<<<<< HEAD // gRPC-Go strips :path from the headers given to the application, but RBAC should be // able to match against it. md[":path"] = []string{mn} +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The connection is needed in order to find the destination address and // port of the incoming RPC Call. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 65002e2cc8..ab92111273 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,11 +22,14 @@ import ( "io" ) +<<<<<<< HEAD const ( // 32 KiB is what io.Copy uses. readAllBufSize = 32 * 1024 ) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -224,6 +227,7 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given +<<<<<<< HEAD // buffer into a new Buffer pulled from the given pool and the Buffer is // added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { @@ -279,3 +283,10 @@ nextBuffer: } } } +======= +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969af..02de738099 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,11 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() +<<<<<<< HEAD p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} +======= + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +80,11 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() +<<<<<<< HEAD compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} +======= + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 8eb1cf3bcf..ecede0885b 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,7 +22,10 @@ package resolver import ( "context" +<<<<<<< HEAD "errors" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "net" "net/url" @@ -238,8 +241,13 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an +<<<<<<< HEAD // error. The ClientConn then forwards this error to the load balancing // policy. +======= + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -331,6 +339,7 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } +<<<<<<< HEAD // ValidateEndpoints validates endpoints from a petiole policy's perspective. // Petiole policies should call this before calling into their children. See @@ -348,3 +357,5 @@ func ValidateEndpoints(endpoints []Endpoint) error { } return errors.New("endpoints list contains no addresses") } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 9fac2b08b4..8701787699 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -622,7 +622,11 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { +<<<<<<< HEAD ReadMessageHeader(header []byte) error +======= + ReadHeader(header []byte) error +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Read(n int) (mem.BufferSlice, error) } @@ -656,7 +660,11 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { +<<<<<<< HEAD err := p.r.ReadMessageHeader(p.header[:]) +======= + err := p.r.ReadHeader(p.header[:]) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return 0, nil, err } @@ -664,6 +672,12 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) +<<<<<<< HEAD +======= + if length == 0 { + return pf, nil, nil + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -814,7 +828,11 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +<<<<<<< HEAD func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +======= +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -838,7 +856,11 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { +<<<<<<< HEAD out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} +======= + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } size = len(uncompressedBuf) } else { @@ -874,7 +896,34 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } +<<<<<<< HEAD out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) +======= + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { out.Free() return nil, 0, err @@ -882,6 +931,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } +<<<<<<< HEAD type recvCompressor interface { RecvCompress() string } @@ -890,6 +940,12 @@ type recvCompressor interface { // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +======= +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 9d5b2884d1..d56822a7b5 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -87,13 +87,21 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") +<<<<<<< HEAD // MethodHandler is a function type that processes a unary RPC method call. type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +======= +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string +<<<<<<< HEAD Handler MethodHandler +======= + Handler methodHandler +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ServiceDesc represents an RPC service's specification. @@ -622,8 +630,13 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 +<<<<<<< HEAD // serverWorker blocks on a *transport.ServerStream channel forever and waits // for data to be fed by serveStreams. This allows multiple requests to be +======= +// serverWorker blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1021,7 +1034,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) +<<<<<<< HEAD st.HandleStreams(ctx, func(stream *transport.ServerStream) { +======= + st.HandleStreams(ctx, func(stream *transport.Stream) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1137,7 +1154,11 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } +<<<<<<< HEAD func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { +======= +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1166,7 +1187,11 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } +<<<<<<< HEAD err = stream.Write(hdr, payload, opts) +======= + err = t.Write(stream, hdr, payload, opts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1213,7 +1238,11 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } +<<<<<<< HEAD func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +======= +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1321,7 +1350,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) +<<<<<<< HEAD stream.WriteStatus(st) +======= + t.WriteStatus(stream, st) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return st.Err() } } @@ -1355,11 +1388,16 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { +<<<<<<< HEAD if e := stream.WriteStatus(status.Convert(err)); e != nil { +======= + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } +<<<<<<< HEAD freed := false dataFree := func() { if !freed { @@ -1370,6 +1408,13 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt defer dataFree() df := func(v any) error { defer dataFree() +======= + defer d.Free() + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v any) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1410,7 +1455,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } +<<<<<<< HEAD if e := stream.WriteStatus(appStatus); e != nil { +======= + if e := t.WriteStatus(stream, appStatus); e != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1437,20 +1486,32 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } +<<<<<<< HEAD opts := &transport.WriteOptions{Last: true} +======= + opts := &transport.Options{Last: true} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } +<<<<<<< HEAD if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { +======= + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { +<<<<<<< HEAD if e := stream.WriteStatus(sts); e != nil { +======= + if e := t.WriteStatus(stream, sts); e != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1490,6 +1551,12 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt binlog.Log(ctx, sm) } } +<<<<<<< HEAD +======= + if channelz.IsOn() { + t.IncrMsgSent() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1505,7 +1572,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt binlog.Log(ctx, st) } } +<<<<<<< HEAD return stream.WriteStatus(statusOK) +======= + return t.WriteStatus(stream, statusOK) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1544,7 +1615,11 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } +<<<<<<< HEAD func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +======= +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if channelz.IsOn() { s.incrCallsStarted() } @@ -1564,6 +1639,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, +<<<<<<< HEAD +======= + t: t, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1650,7 +1729,11 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) +<<<<<<< HEAD ss.s.WriteStatus(st) +======= + t.WriteStatus(ss.s, st) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return st.Err() } } @@ -1719,7 +1802,11 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv binlog.Log(ctx, st) } } +<<<<<<< HEAD ss.s.WriteStatus(appStatus) +======= + t.WriteStatus(ss.s, appStatus) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1737,10 +1824,17 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv binlog.Log(ctx, st) } } +<<<<<<< HEAD return ss.s.WriteStatus(statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { +======= + return t.WriteStatus(ss.s, statusOK) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1770,7 +1864,11 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) +<<<<<<< HEAD if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { +======= + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1785,6 +1883,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser service := sm[:pos] method := sm[pos+1:] +<<<<<<< HEAD // FromIncomingContext is expensive: skip if there are no statsHandlers if len(s.opts.statsHandlers) > 0 { md, _ := metadata.FromIncomingContext(ctx) @@ -1799,6 +1898,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser Header: md, }) } +======= + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1807,17 +1919,29 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { +<<<<<<< HEAD s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { s.processStreamingRPC(ctx, stream, srv, sd, ti) +======= + s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { +<<<<<<< HEAD s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) +======= + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } var errDesc string @@ -1830,7 +1954,11 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } +<<<<<<< HEAD if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { +======= + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2105,7 +2233,11 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { +<<<<<<< HEAD stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) +======= + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2127,7 +2259,11 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { +<<<<<<< HEAD stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) +======= + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 8d451e07c7..7b6ff9c85c 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,7 +168,10 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -268,6 +271,7 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +<<<<<<< HEAD func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { return jrp.MaxAttempts > 1 && jrp.InitialBackoff > 0 && @@ -276,13 +280,25 @@ func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { len(jrp.RetryableStatusCodes) > 0 } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } +<<<<<<< HEAD if !isValidRetryPolicy(jrp) { return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) +======= + if jrp.MaxAttempts <= 1 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if jrp.MaxAttempts < maxAttempts { @@ -301,7 +317,11 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } +<<<<<<< HEAD func minPointers(a, b *int) *int { +======= +func min(a, b *int) *int { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if *a < *b { return a } @@ -313,7 +333,11 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { +<<<<<<< HEAD return minPointers(mcMax, doptMax) +======= + return min(mcMax, doptMax) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go index 265791e5a2..7c58f534e0 100644 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go @@ -260,6 +260,7 @@ func (h *clientStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, } const ( +<<<<<<< HEAD // ClientAttemptStartedMetricName is the number of client call attempts // started. ClientAttemptStartedMetricName string = "grpc.client.attempt.started" @@ -275,4 +276,20 @@ const ( // ClientCallDurationMetricName is the time taken by gRPC to complete an RPC // from application's perspective. ClientCallDurationMetricName string = "grpc.client.call.duration" +======= + // ClientAttemptStarted is the number of client call attempts started. + ClientAttemptStarted estats.Metric = "grpc.client.attempt.started" + // ClientAttemptDuration is the end-to-end time taken to complete a client + // call attempt. + ClientAttemptDuration estats.Metric = "grpc.client.attempt.duration" + // ClientAttemptSentCompressedTotalMessageSize is the compressed message + // bytes sent per client call attempt. + ClientAttemptSentCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.sent_total_compressed_message_size" + // ClientAttemptRcvdCompressedTotalMessageSize is the compressed message + // bytes received per call attempt. + ClientAttemptRcvdCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.rcvd_total_compressed_message_size" + // ClientCallDuration is the time taken by gRPC to complete an RPC from + // application's perspective. + ClientCallDuration estats.Metric = "grpc.client.call.duration" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go index dcc424775f..c78c1f958a 100644 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go @@ -16,10 +16,13 @@ // Package opentelemetry implements opentelemetry instrumentation code for // gRPC-Go clients and servers. +<<<<<<< HEAD // // For details on configuring opentelemetry and various instruments that this // package creates, see // [gRPC OpenTelemetry Metrics](https://grpc.io/docs/guides/opentelemetry-metrics/). +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package opentelemetry import ( @@ -32,7 +35,10 @@ import ( estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" +<<<<<<< HEAD "google.golang.org/grpc/stats" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) otelinternal "google.golang.org/grpc/stats/opentelemetry/internal" otelattribute "go.opentelemetry.io/otel/attribute" @@ -62,13 +68,20 @@ type Options struct { type MetricsOptions struct { // MeterProvider is the MeterProvider instance that will be used to create // instruments. To enable metrics collection, set a meter provider. If +<<<<<<< HEAD // unset, no metrics will be recorded. +======= + // unset, no metrics will be recorded. Any implementation knobs (i.e. views, + // bounds) set in the MeterProvider take precedence over the API calls from + // this interface. (i.e. it will create default views for unset views). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MeterProvider otelmetric.MeterProvider // Metrics are the metrics to instrument. Will create instrument and record telemetry // for corresponding metric supported by the client and server // instrumentation components if applicable. If not set, the default metrics // will be recorded. +<<<<<<< HEAD Metrics *stats.MetricSet // MethodAttributeFilter is a function that determines whether to record the @@ -84,6 +97,20 @@ type MetricsOptions struct { // OptionalLabels specifies a list of optional labels to enable on any // metrics that support them. +======= + Metrics *estats.Metrics + + // MethodAttributeFilter is to record the method name of RPCs handled by + // grpc.UnknownServiceHandler, but take care to limit the values allowed, as + // allowing too many will increase cardinality and could cause severe memory + // or performance problems. On Client Side, pass a + // grpc.StaticMethodCallOption as a call option into Invoke or NewStream. + // This only applies for server side metrics. + MethodAttributeFilter func(string) bool + + // OptionalLabels are labels received from LB Policies that this component + // should add to metrics that record after receiving incoming metadata. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) OptionalLabels []string // pluginOption is used to get labels to attach to certain metrics, if set. @@ -213,7 +240,11 @@ type serverMetrics struct { callDuration otelmetric.Float64Histogram } +<<<<<<< HEAD func createInt64Counter(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter { +======= +func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Int64Counter{} } @@ -225,7 +256,11 @@ func createInt64Counter(setOfMetrics map[string]bool, metricName string, meter o return ret } +<<<<<<< HEAD func createFloat64Counter(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter { +======= +func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Float64Counter{} } @@ -237,7 +272,11 @@ func createFloat64Counter(setOfMetrics map[string]bool, metricName string, meter return ret } +<<<<<<< HEAD func createInt64Histogram(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram { +======= +func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Int64Histogram{} } @@ -249,7 +288,11 @@ func createInt64Histogram(setOfMetrics map[string]bool, metricName string, meter return ret } +<<<<<<< HEAD func createFloat64Histogram(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram { +======= +func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Float64Histogram{} } @@ -261,7 +304,11 @@ func createFloat64Histogram(setOfMetrics map[string]bool, metricName string, met return ret } +<<<<<<< HEAD func createInt64Gauge(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge { +======= +func createInt64Gauge(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Int64Gauge{} } @@ -304,7 +351,11 @@ type registryMetrics struct { optionalLabels []string } +<<<<<<< HEAD func (rm *registryMetrics) registerMetrics(metrics *stats.MetricSet, meter otelmetric.Meter) { +======= +func (rm *registryMetrics) registerMetrics(metrics *estats.Metrics, meter otelmetric.Meter) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rm.intCounts = make(map[*estats.MetricDescriptor]otelmetric.Int64Counter) rm.floatCounts = make(map[*estats.MetricDescriptor]otelmetric.Float64Counter) rm.intHistos = make(map[*estats.MetricDescriptor]otelmetric.Int64Histogram) @@ -385,12 +436,20 @@ var ( // DefaultSizeBounds are the default bounds for metrics which record size. DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} // defaultPerCallMetrics are the default metrics provided by this module. +<<<<<<< HEAD defaultPerCallMetrics = stats.NewMetricSet(ClientAttemptStartedMetricName, ClientAttemptDurationMetricName, ClientAttemptSentCompressedTotalMessageSizeMetricName, ClientAttemptRcvdCompressedTotalMessageSizeMetricName, ClientCallDurationMetricName, ServerCallStartedMetricName, ServerCallSentCompressedTotalMessageSizeMetricName, ServerCallRcvdCompressedTotalMessageSizeMetricName, ServerCallDurationMetricName) +======= + defaultPerCallMetrics = estats.NewMetrics(ClientAttemptStarted, ClientAttemptDuration, ClientAttemptSentCompressedTotalMessageSize, ClientAttemptRcvdCompressedTotalMessageSize, ClientCallDuration, ServerCallStarted, ServerCallSentCompressedTotalMessageSize, ServerCallRcvdCompressedTotalMessageSize, ServerCallDuration) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // DefaultMetrics returns a set of default OpenTelemetry metrics. // // This should only be invoked after init time. +<<<<<<< HEAD func DefaultMetrics() *stats.MetricSet { +======= +func DefaultMetrics() *estats.Metrics { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return defaultPerCallMetrics.Join(estats.DefaultMetrics) } diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go index 4765afa8ed..05e3958101 100644 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go @@ -264,6 +264,7 @@ func (h *serverStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, } const ( +<<<<<<< HEAD // ServerCallStartedMetricName is the number of server calls started. ServerCallStartedMetricName string = "grpc.server.call.started" // ServerCallSentCompressedTotalMessageSizeMetricName is the compressed @@ -275,4 +276,17 @@ const ( // ServerCallDurationMetricName is the end-to-end time taken to complete a // call from server transport's perspective. ServerCallDurationMetricName string = "grpc.server.call.duration" +======= + // ServerCallStarted is the number of server calls started. + ServerCallStarted estats.Metric = "grpc.server.call.started" + // ServerCallSentCompressedTotalMessageSize is the compressed message bytes + // sent per server call. + ServerCallSentCompressedTotalMessageSize estats.Metric = "grpc.server.call.sent_total_compressed_message_size" + // ServerCallRcvdCompressedTotalMessageSize is the compressed message bytes + // received per server call. + ServerCallRcvdCompressedTotalMessageSize estats.Metric = "grpc.server.call.rcvd_total_compressed_message_size" + // ServerCallDuration is the end-to-end time taken to complete a call from + // server transport's perspective. + ServerCallDuration estats.Metric = "grpc.server.call.duration" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 6f20d2d548..f5ee6595c5 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,17 +260,33 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} +<<<<<<< HEAD +======= +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // +<<<<<<< HEAD // Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) +======= +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Tags returns the tags from the context for the inbound RPC. // +<<<<<<< HEAD // Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") @@ -280,17 +296,57 @@ func Tags(ctx context.Context) []byte { return []byte(traceValues[len(traceValues)-1]) } +======= +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // +<<<<<<< HEAD // Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) +======= +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Trace returns the trace from the context for the inbound RPC. // +<<<<<<< HEAD // Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") @@ -298,4 +354,27 @@ func Trace(ctx context.Context) []byte { return nil } return []byte(traceValues[len(traceValues)-1]) +======= +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 54adbbced7..0bdadc3e1c 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,11 @@ import ( "errors" "io" "math" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "sync" "time" @@ -113,9 +117,13 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of +<<<<<<< HEAD // the stream may be discovered using RecvMsg. For unary or server-streaming // RPCs (StreamDesc.ClientStreams is false), a nil error is returned // unconditionally. +======= + // the stream may be discovered using RecvMsg. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -218,7 +226,11 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() +<<<<<<< HEAD newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { +======= + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -586,7 +598,11 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport +<<<<<<< HEAD s *transport.ClientStream +======= + s *transport.Stream +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p *parser pickResult balancer.PickResult @@ -708,10 +724,18 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) +<<<<<<< HEAD cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) // Apply jitter by multiplying with a random factor between 0.8 and 1.2 cur *= 0.8 + 0.4*rand.Float64() dur = time.Duration(int64(cur)) +======= + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(rand.Int63n(int64(cur))) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cs.numRetriesSincePushback++ } @@ -992,7 +1016,11 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { +<<<<<<< HEAD a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) +======= + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1084,7 +1112,11 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } +<<<<<<< HEAD if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { +======= + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1098,6 +1130,12 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } +<<<<<<< HEAD +======= + if channelz.IsOn() { + a.t.IncrMsgSent() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -1151,6 +1189,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } +<<<<<<< HEAD +======= + if channelz.IsOn() { + a.t.IncrMsgRecv() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1178,7 +1222,11 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { +<<<<<<< HEAD a.s.Close(err) +======= + a.t.CloseStream(a.s, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tr = a.s.Trailer() } @@ -1335,7 +1383,11 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { +<<<<<<< HEAD s *transport.ClientStream +======= + s *transport.Stream +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1375,7 +1427,11 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true +<<<<<<< HEAD as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) +======= + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1425,7 +1481,11 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } +<<<<<<< HEAD if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { +======= + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1435,6 +1495,12 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } +<<<<<<< HEAD +======= + if channelz.IsOn() { + as.t.IncrMsgSent() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -1472,6 +1538,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } +<<<<<<< HEAD +======= + if channelz.IsOn() { + as.t.IncrMsgRecv() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1499,7 +1571,11 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { +<<<<<<< HEAD as.s.Close(err) +======= + as.t.CloseStream(as.s, err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err != nil { @@ -1566,7 +1642,12 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context +<<<<<<< HEAD s *transport.ServerStream +======= + t transport.ServerTransport + s *transport.Stream +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p *parser codec baseCodec @@ -1616,7 +1697,11 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } +<<<<<<< HEAD err = ss.s.SendHeader(md) +======= + err = ss.t.WriteHeader(ss.s, md) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1656,7 +1741,11 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) +<<<<<<< HEAD ss.s.WriteStatus(st) +======= + ss.t.WriteStatus(ss.s, st) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1664,6 +1753,12 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } +<<<<<<< HEAD +======= + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1695,7 +1790,11 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } +<<<<<<< HEAD if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { +======= + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return toRPCErr(err) } @@ -1741,7 +1840,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) +<<<<<<< HEAD ss.s.WriteStatus(st) +======= + ss.t.WriteStatus(ss.s, st) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1749,6 +1852,12 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } +<<<<<<< HEAD +======= + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { @@ -1766,7 +1875,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { +<<<<<<< HEAD err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) +======= + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return toRPCErr(err) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 0e03fa4d4f..1af9328caf 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,8 @@ package grpc // Version is the current grpc version. +<<<<<<< HEAD const Version = "1.70.0" +======= +const Version = "1.68.1" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 0f9cb52b7e..5f249cc0e3 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -28,9 +28,14 @@ package googledirectpath import ( "encoding/json" "fmt" +<<<<<<< HEAD rand "math/rand/v2" "net/url" "sync" +======= + "math/rand" + "net/url" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" "google.golang.org/grpc/grpclog" @@ -47,7 +52,11 @@ const ( c2pScheme = "google-c2p" c2pAuthority = "traffic-director-c2p.xds.googleapis.com" +<<<<<<< HEAD defaultUniverseDomain = "googleapis.com" +======= + tdURL = "dns:///directpath-pa.googleapis.com" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" @@ -57,6 +66,7 @@ const ( dnsName, xdsName = "dns", "xds" ) +<<<<<<< HEAD var ( logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) universeDomainMu sync.Mutex @@ -64,12 +74,20 @@ var ( // For overriding in unittests. onGCE = googlecloud.OnGCE randInt = rand.Int +======= +// For overriding in unittests. +var ( + onGCE = googlecloud.OnGCE + randInt = rand.Int + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { resolver.Register(c2pResolverBuilder{}) } +<<<<<<< HEAD // SetUniverseDomain informs the gRPC library of the universe domain // in which the process is running (for example, "googleapis.com"). // It is the caller's responsibility to ensure that the domain is correct. @@ -117,6 +135,8 @@ func getXdsServerURI() string { return fmt.Sprintf("dns:///directpath-pa.%s", universeDomain) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type c2pResolverBuilder struct{} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { @@ -140,7 +160,15 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts go func() { zoneCh <- getZone(httpReqTimeout) }() go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() +<<<<<<< HEAD xdsServerURI := getXdsServerURI() +======= + xdsServerURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI + if xdsServerURI == "" { + xdsServerURI = tdURL + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nodeCfg := newNodeConfig(<-zoneCh, <-ipv6CapableCh) xdsServerCfg := newXdsServerConfig(xdsServerURI) authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index 71a4c9c9da..5790ba13ae 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -24,6 +24,10 @@ package clusterimpl import ( +<<<<<<< HEAD +======= + "context" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "encoding/json" "fmt" "sync" @@ -34,6 +38,10 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/grpclog" +<<<<<<< HEAD +======= + "google.golang.org/grpc/internal/grpcsync" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/bootstrap" @@ -52,11 +60,16 @@ const ( ) var ( +<<<<<<< HEAD connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) // Below function is no-op in actual code, but can be overridden in // tests to give tests visibility into exactly when certain events happen. clientConnUpdateHook = func() {} pickerUpdateHook = func() {} +======= + connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) + errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { @@ -66,10 +79,21 @@ func init() { type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +<<<<<<< HEAD b := &clusterImplBalancer{ ClientConn: cc, loadWrapper: loadstore.NewWrapper(), requestCountMax: defaultRequestCountMax, +======= + ctx, cancel := context.WithCancel(context.Background()) + b := &clusterImplBalancer{ + ClientConn: cc, + bOpts: bOpts, + loadWrapper: loadstore.NewWrapper(), + requestCountMax: defaultRequestCountMax, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.logger = prefixLogger(b) b.child = gracefulswitch.NewBalancer(b, bOpts) @@ -88,6 +112,7 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err type clusterImplBalancer struct { balancer.ClientConn +<<<<<<< HEAD // The following fields are set at creation time, and are read-only after // that, and therefore need not be protected by a mutex. logger *grpclog.PrefixLogger @@ -158,6 +183,35 @@ func (b *clusterImplBalancer) newPickerLocked() *picker { countMax: b.requestCountMax, telemetryLabels: b.telemetryLabels, } +======= + bOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + xdsClient xdsclient.XDSClient + + config *LBConfig + child *gracefulswitch.Balancer + cancelLoadReport func() + edsServiceName string + lrsServer *bootstrap.ServerConfig + loadWrapper *loadstore.Wrapper + + clusterNameMu sync.Mutex + clusterName string + + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // childState/drops/requestCounter keeps the state used by the most recently + // generated picker. + childState balancer.State + dropCategories []DropConfig // The categories for drops. + drops []*dropper + requestCounterCluster string // The cluster name for the request counter. + requestCounterService string // The service name for the request counter. + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 + telemetryLabels map[string]string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // updateLoadStore checks the config for load store, and decides whether it @@ -238,12 +292,16 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { return nil } +<<<<<<< HEAD func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { defer clientConnUpdateHook() b.mu.Lock() b.inhibitPickerUpdates = true b.mu.Unlock() +======= +func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if b.logger.V(2) { b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) } @@ -286,6 +344,7 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) return err } +<<<<<<< HEAD // Addresses and sub-balancer config are sent to sub-balancer. err = b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, @@ -316,6 +375,45 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) func (b *clusterImplBalancer) ResolverError(err error) { b.child.ResolverError(err) +======= + b.config = newConfig + + b.telemetryLabels = newConfig.TelemetryLabels + dc := b.handleDropAndRequestCount(newConfig) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(dc), + }) + } + + // Addresses and sub-balancer config are sent to sub-balancer. + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: parsedCfg, + }) +} + +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + errCh <- b.updateClientConnState(s) + } + onFailure := func() { + // An attempt to schedule callback fails only when an update is received + // after Close(). + errCh <- errBalancerClosed + } + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh +} + +func (b *clusterImplBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + b.child.ResolverError(err) + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { @@ -340,6 +438,7 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer } func (b *clusterImplBalancer) Close() { +<<<<<<< HEAD b.child.Close() b.childState = balancer.State{} @@ -352,11 +451,32 @@ func (b *clusterImplBalancer) Close() { func (b *clusterImplBalancer) ExitIdle() { b.child.ExitIdle() +======= + b.serializer.TrySchedule(func(_ context.Context) { + b.child.Close() + b.childState = balancer.State{} + + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() +} + +func (b *clusterImplBalancer) ExitIdle() { + b.serializer.TrySchedule(func(context.Context) { + b.child.ExitIdle() + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Override methods to accept updates from the child LB. func (b *clusterImplBalancer) UpdateState(state balancer.State) { +<<<<<<< HEAD b.mu.Lock() defer b.mu.Unlock() @@ -382,12 +502,35 @@ func (b *clusterImplBalancer) UpdateState(state balancer.State) { func (b *clusterImplBalancer) setClusterName(n string) { b.mu.Lock() defer b.mu.Unlock() +======= + b.serializer.TrySchedule(func(context.Context) { + b.childState = state + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(&dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }), + }) + }) +} + +func (b *clusterImplBalancer) setClusterName(n string) { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.clusterName = n } func (b *clusterImplBalancer) getClusterName() string { +<<<<<<< HEAD b.mu.Lock() defer b.mu.Unlock() +======= + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return b.clusterName } @@ -428,6 +571,7 @@ func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer scw := &scWrapper{} oldListener := opts.StateListener opts.StateListener = func(state balancer.SubConnState) { +<<<<<<< HEAD b.updateSubConnState(sc, state, oldListener) if state.ConnectivityState != connectivity.Ready { return @@ -443,6 +587,25 @@ func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer return } scw.updateLocalityID(lID) +======= + b.serializer.TrySchedule(func(context.Context) { + b.updateSubConnState(sc, state, oldListener) + if state.ConnectivityState != connectivity.Ready { + return + } + // Read connected address and call updateLocalityID() based on the connected + // address's locality. https://github.com/grpc/grpc-go/issues/7339 + addr := connectedAddress(state) + lID := xdsinternal.GetLocalityID(addr) + if lID.Empty() { + if b.logger.V(2) { + b.logger.Infof("Locality ID for %s unexpectedly empty", addr) + } + return + } + scw.updateLocalityID(lID) + }) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { @@ -472,3 +635,56 @@ func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resol } b.ClientConn.UpdateAddresses(sc, newAddrs) } +<<<<<<< HEAD +======= + +type dropConfigs struct { + drops []*dropper + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 +} + +// handleDropAndRequestCount compares drop and request counter in newConfig with +// the one currently used by picker. It returns a new dropConfigs if a new +// picker needs to be generated, otherwise it returns nil. +func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { + // Compare new drop config. And update picker if it's changed. + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + // Compare cluster name. And update picker if it's changed, because circuit + // breaking's stream counter will be different. + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { + b.requestCounterCluster = newConfig.Cluster + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + updatePicker = true + } + // Compare upper bound of stream count. And update picker if it's changed. + // This is also for circuit breaking. + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + if !updatePicker { + return nil + } + return &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + } +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index cd94182fa7..05c8711d1b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -87,6 +87,20 @@ type picker struct { telemetryLabels map[string]string } +<<<<<<< HEAD +======= +func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { + return &picker{ + drops: config.drops, + s: b.childState, + loadStore: b.loadWrapper, + counter: config.requestCounter, + countMax: config.requestCountMax, + telemetryLabels: b.telemetryLabels, + } +} + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func telemetryLabels(ctx context.Context) map[string]string { if ctx == nil { return nil @@ -129,7 +143,11 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if d.loadStore != nil { d.loadStore.CallDropped("") } +<<<<<<< HEAD return balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) +======= + return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go index 24ad2399dd..149b081706 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -87,7 +87,10 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er // TODO: Get rid of handling hierarchy in addresses. This LB policy never // gets addresses from the resolver. addressesSplit := hierarchy.Group(s.ResolverState.Addresses) +<<<<<<< HEAD endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Remove sub-balancers that are not in the new list from the aggregator and // balancergroup. @@ -140,7 +143,10 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er if err := b.bg.UpdateClientConnState(childName, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[childName], +<<<<<<< HEAD Endpoints: endpointsSplit[childName], +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }, @@ -169,14 +175,21 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er } func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { +<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newConfig, ok := s.BalancerConfig.(*lbConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } +<<<<<<< HEAD +======= + b.logger.Infof("Update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.stateAggregator.pauseStateUpdates() defer b.stateAggregator.resumeStateUpdates() diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index f0a8905d37..3f8222c996 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -184,10 +184,14 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { return } +<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) } +======= + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfg, _ := update.state.BalancerConfig.(*LBConfig) if cfg == nil { b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) @@ -234,7 +238,11 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } +<<<<<<< HEAD childCfgBytes, endpoints, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) +======= + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { b.logger.Warningf("Failed to build child policy config: %v", err) return @@ -248,6 +256,7 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) } +<<<<<<< HEAD flattenedAddrs := make([]resolver.Address, len(endpoints)) for i := range endpoints { for j := range endpoints[i].Addresses { @@ -270,11 +279,22 @@ func (b *clusterResolverBalancer) updateChildConfig() { // See https://github.com/grpc/grpc-go/issues/7339 endpoints[i].Addresses[j] = addr } +======= + endpoints := make([]resolver.Endpoint, len(addrs)) + for i, a := range addrs { + endpoints[i].Attributes = a.BalancerAttributes + endpoints[i].Addresses = []resolver.Address{a} + endpoints[i].Addresses[0].BalancerAttributes = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Endpoints: endpoints, +<<<<<<< HEAD Addresses: flattenedAddrs, +======= + Addresses: addrs, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: b.configRaw, Attributes: b.attrsWithClient, }, diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go index 9a3a71c2e5..2aab56addf 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -48,8 +48,13 @@ type priorityConfig struct { mechanism DiscoveryMechanism // edsResp is set only if type is EDS. edsResp xdsresource.EndpointsUpdate +<<<<<<< HEAD // endpoints is set only if type is DNS. endpoints []resolver.Endpoint +======= + // addresses is set only if type is DNS. + addresses []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Each discovery mechanism has a name generator so that the child policies // can reuse names between updates (EDS updates for example). childNameGen *nameGenerator @@ -71,8 +76,13 @@ type priorityConfig struct { // ┌──────▼─────┐ ┌─────▼──────┐ // │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) // └────────────┘ └────────────┘ +<<<<<<< HEAD func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Endpoint, error) { pc, endpoints, err := buildPriorityConfig(priorities, xdsLBPolicy) +======= +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, fmt.Errorf("failed to build priority config: %v", err) } @@ -80,6 +90,7 @@ func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internals if err != nil { return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) } +<<<<<<< HEAD return ret, endpoints, nil } @@ -87,16 +98,33 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi var ( retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retEndpoints []resolver.Endpoint +======= + return ret, addrs, nil +} + +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { + var ( + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retAddrs []resolver.Address +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) for _, p := range priorities { switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: +<<<<<<< HEAD names, configs, endpoints, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) +======= + names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, err } retConfig.Priorities = append(retConfig.Priorities, names...) +<<<<<<< HEAD retEndpoints = append(retEndpoints, endpoints...) +======= + retAddrs = append(retAddrs, addrs...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) for n, c := range odCfgs { retConfig.Children[n] = &priority.Child{ @@ -107,9 +135,15 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi } continue case DiscoveryMechanismTypeLogicalDNS: +<<<<<<< HEAD name, config, endpoints := buildClusterImplConfigForDNS(p.childNameGen, p.endpoints, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) retEndpoints = append(retEndpoints, endpoints...) +======= + name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) + retConfig.Priorities = append(retConfig.Priorities, name) + retAddrs = append(retAddrs, addrs...) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, @@ -120,7 +154,11 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi continue } } +<<<<<<< HEAD return retConfig, retEndpoints, nil +======= + return retConfig, retAddrs, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { @@ -137,6 +175,7 @@ func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg out return &odCfgRet } +<<<<<<< HEAD func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoint, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Endpoint) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" @@ -147,12 +186,25 @@ func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoin // Copy the nested address field as slice fields are shared by the // iteration variable and the original slice. retEndpoints[i].Addresses = append([]resolver.Address{}, e.Addresses...) +======= +func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { + // Endpoint picking policy for DNS is hardcoded to pick_first. + const childPolicy = "pick_first" + retAddrs := make([]resolver.Address, 0, len(addrStrs)) + pName := fmt.Sprintf("priority-%v", g.prefix) + for _, addrStr := range addrStrs { + retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return pName, &clusterimpl.LBConfig{ Cluster: mechanism.Cluster, TelemetryLabels: mechanism.TelemetryLabels, ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, +<<<<<<< HEAD }, retEndpoints +======= + }, retAddrs +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for @@ -164,7 +216,11 @@ func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoin // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 +<<<<<<< HEAD func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Endpoint, error) { +======= +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -186,17 +242,30 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint } retNames := g.generate(priorities) retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) +<<<<<<< HEAD var retEndpoints []resolver.Endpoint for i, pName := range retNames { priorityLocalities := priorities[i] cfg, endpoints, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) +======= + var retAddrs []resolver.Address + for i, pName := range retNames { + priorityLocalities := priorities[i] + cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, nil, err } retConfigs[pName] = cfg +<<<<<<< HEAD retEndpoints = append(retEndpoints, endpoints...) } return retNames, retConfigs, retEndpoints, nil +======= + retAddrs = append(retAddrs, addrs...) + } + return retNames, retConfigs, retAddrs, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // groupLocalitiesByPriority returns the localities grouped by priority. @@ -247,8 +316,13 @@ func dedupSortedIntSlice(a []int) []int { // priority), and generates a cluster impl policy config, and a list of // addresses with their path hierarchy set to [priority-name, locality-name], so // priority and the xDS LB Policy know which child policy each address is for. +<<<<<<< HEAD func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Endpoint, error) { var retEndpoints []resolver.Endpoint +======= +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { + var addrs []resolver.Address +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, locality := range localities { var lw uint32 = 1 if locality.Weight != 0 { @@ -265,24 +339,39 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } +<<<<<<< HEAD resolverEndpoint := resolver.Endpoint{} for _, as := range endpoint.Addresses { resolverEndpoint.Addresses = append(resolverEndpoint.Addresses, resolver.Address{Addr: as}) } resolverEndpoint = hierarchy.SetInEndpoint(resolverEndpoint, []string{priorityName, localityStr}) resolverEndpoint = internal.SetLocalityIDInEndpoint(resolverEndpoint, locality.ID) +======= + addr := resolver.Address{Addr: endpoint.Address} + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "To provide the xds_wrr_locality load balancer information about // locality weights received from EDS, the cluster resolver will // populate a new locality weight attribute for each address The // attribute will have the weight (as an integer) of the locality // the address is part of." - A52 +<<<<<<< HEAD resolverEndpoint = wrrlocality.SetAddrInfoInEndpoint(resolverEndpoint, wrrlocality.AddrInfo{LocalityWeight: lw}) +======= + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ew uint32 = 1 if endpoint.Weight != 0 { ew = endpoint.Weight } +<<<<<<< HEAD resolverEndpoint = weightedroundrobin.SetAddrInfoInEndpoint(resolverEndpoint, weightedroundrobin.AddrInfo{Weight: lw * ew}) retEndpoints = append(retEndpoints, resolverEndpoint) +======= + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) + addrs = append(addrs, addr) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return &clusterimpl.LBConfig{ @@ -293,5 +382,9 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority TelemetryLabels: mechanism.TelemetryLabels, DropCategories: drops, ChildPolicy: xdsLBPolicy, +<<<<<<< HEAD }, retEndpoints, nil +======= + }, addrs, nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index d9315c3ace..bb8a3a2ab4 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -24,7 +24,10 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" +<<<<<<< HEAD "google.golang.org/grpc/resolver" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -295,8 +298,13 @@ func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { switch uu := u.(type) { case xdsresource.EndpointsUpdate: ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) +<<<<<<< HEAD case []resolver.Endpoint: ret = append(ret, priorityConfig{mechanism: rDM.dm, endpoints: uu, childNameGen: rDM.childNameGen}) +======= + case []string: + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } select { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 5f7a211530..554f1a32f7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -47,7 +47,11 @@ type dnsDiscoveryMechanism struct { logger *grpclog.PrefixLogger mu sync.Mutex +<<<<<<< HEAD endpoints []resolver.Endpoint +======= + addrs []string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) updateReceived bool } @@ -103,7 +107,11 @@ func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { if !dr.updateReceived { return nil, false } +<<<<<<< HEAD return dr.endpoints, true +======= + return dr.addrs, true +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (dr *dnsDiscoveryMechanism) resolveNow() { @@ -133,6 +141,7 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } dr.mu.Lock() +<<<<<<< HEAD var endpoints = state.Endpoints if len(endpoints) == 0 { endpoints = make([]resolver.Endpoint, len(state.Addresses)) @@ -142,6 +151,25 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } } dr.endpoints = endpoints +======= + var addrs []string + if len(state.Endpoints) > 0 { + // Assume 1 address per endpoint, which is how DNS is expected to + // behave. The slice will grow as needed, however. + addrs = make([]string, 0, len(state.Endpoints)) + for _, e := range state.Endpoints { + for _, a := range e.Addresses { + addrs = append(addrs, a.Addr) + } + } + } else { + addrs = make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } + } + dr.addrs = addrs +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dr.updateReceived = true dr.mu.Unlock() @@ -164,7 +192,11 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) { dr.mu.Unlock() return } +<<<<<<< HEAD dr.endpoints = nil +======= + dr.addrs = nil +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dr.updateReceived = true dr.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index 8f58c00303..159ff51192 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -25,7 +25,11 @@ import ( "encoding/json" "fmt" "math" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "sync" "sync/atomic" @@ -33,7 +37,10 @@ import ( "unsafe" "google.golang.org/grpc/balancer" +<<<<<<< HEAD "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" @@ -73,7 +80,11 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.logger = prefixLogger(b) b.logger.Infof("Created") +<<<<<<< HEAD b.child = synchronizingBalancerWrapper{lb: gracefulswitch.NewBalancer(b, bOpts)} +======= + b.child = gracefulswitch.NewBalancer(b, bOpts) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go b.run() return b } @@ -153,11 +164,14 @@ type lbCfgUpdate struct { done chan struct{} } +<<<<<<< HEAD type scHealthUpdate struct { scw *subConnWrapper state balancer.SubConnState } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type outlierDetectionBalancer struct { // These fields are safe to be accessed without holding any mutex because // they are synchronized in run(), which makes these field accesses happen @@ -176,7 +190,14 @@ type outlierDetectionBalancer struct { logger *grpclog.PrefixLogger channelzParent channelz.Identifier +<<<<<<< HEAD child synchronizingBalancerWrapper +======= + // childMu guards calls into child (to uphold the balancer.Balancer API + // guarantee of synchronous calls). + childMu sync.Mutex + child *gracefulswitch.Balancer +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // mu guards access to the following fields. It also helps to synchronize // behaviors of the following events: config updates, firing of the interval @@ -193,8 +214,13 @@ type outlierDetectionBalancer struct { // which uses addrs. This balancer waits for the interval timer algorithm to // finish before making the update to the addrs map. // +<<<<<<< HEAD // This mutex is never held when calling methods on the child policy // (within the context of a single goroutine). +======= + // This mutex is never held at the same time as childMu (within the context + // of a single goroutine). +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) mu sync.Mutex addrs map[string]*addressInfo cfg *LBConfig @@ -279,9 +305,19 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt // the balancer.Balancer API, so it is guaranteed to be called in a // synchronous manner, so it cannot race with this read. if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { +<<<<<<< HEAD if err := b.child.switchTo(bb); err != nil { return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) } +======= + b.childMu.Lock() + err := b.child.SwitchTo(bb) + if err != nil { + b.childMu.Unlock() + return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) + } + b.childMu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.mu.Lock() @@ -318,10 +354,19 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } b.mu.Unlock() +<<<<<<< HEAD err := b.child.updateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, BalancerConfig: b.cfg.ChildPolicy.Config, }) +======= + b.childMu.Lock() + err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.cfg.ChildPolicy.Config, + }) + b.childMu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) done := make(chan struct{}) b.pickerUpdateCh.Put(lbCfgUpdate{ @@ -334,7 +379,13 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } func (b *outlierDetectionBalancer) ResolverError(err error) { +<<<<<<< HEAD b.child.resolverError(err) +======= + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ResolverError(err) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -350,7 +401,10 @@ func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state if state.ConnectivityState == connectivity.Shutdown { delete(b.scWrappers, scw.SubConn) } +<<<<<<< HEAD scw.setLatestConnectivityState(state.ConnectivityState) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.scUpdateCh.Put(&scUpdate{ scw: scw, state: state, @@ -364,7 +418,13 @@ func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state func (b *outlierDetectionBalancer) Close() { b.closed.Fire() <-b.done.Done() +<<<<<<< HEAD b.child.closeLB() +======= + b.childMu.Lock() + b.child.Close() + b.childMu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.scUpdateCh.Close() b.pickerUpdateCh.Close() @@ -377,7 +437,13 @@ func (b *outlierDetectionBalancer) Close() { } func (b *outlierDetectionBalancer) ExitIdle() { +<<<<<<< HEAD b.child.exitIdle() +======= + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ExitIdle() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // wrappedPicker delegates to the child policy's picker, and when the request @@ -467,6 +533,7 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal return nil, err } scw := &subConnWrapper{ +<<<<<<< HEAD SubConn: sc, addresses: addrs, scUpdateCh: b.scUpdateCh, @@ -474,6 +541,12 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal latestRawConnectivityState: balancer.SubConnState{ConnectivityState: connectivity.Idle}, latestHealthState: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, healthListenerEnabled: len(addrs) == 1 && pickfirstleaf.IsManagedByPickfirst(addrs[0]), +======= + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + listener: oldListener, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.mu.Lock() defer b.mu.Unlock() @@ -591,18 +664,47 @@ func (b *outlierDetectionBalancer) Target() string { // if the SubConn is not ejected. func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { scw := u.scw +<<<<<<< HEAD scw.clearHealthListener() b.child.updateSubConnState(scw, u.state) } func (b *outlierDetectionBalancer) handleSubConnHealthUpdate(u *scHealthUpdate) { b.child.updateSubConnHealthState(u.scw, u.state) +======= + scw.latestState = u.state + if !scw.ejected { + if scw.listener != nil { + b.childMu.Lock() + scw.listener(u.state) + b.childMu.Unlock() + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // handleEjectedUpdate handles any SubConns that get ejected/unejected, and // forwards the appropriate corresponding subConnState to the child policy. func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { +<<<<<<< HEAD b.child.handleEjectionUpdate(u) +======= + scw := u.scw + scw.ejected = u.isEjected + // If scw.latestState has never been written to will default to connectivity + // IDLE, which is fine. + stateToUpdate := scw.latestState + if u.isEjected { + stateToUpdate = balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + } + if scw.listener != nil { + b.childMu.Lock() + scw.listener(stateToUpdate) + b.childMu.Unlock() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // handleChildStateUpdate forwards the picker update wrapped in a wrapped picker @@ -675,8 +777,11 @@ func (b *outlierDetectionBalancer) run() { b.handleSubConnUpdate(u) case *ejectionUpdate: b.handleEjectedUpdate(u) +<<<<<<< HEAD case *scHealthUpdate: b.handleSubConnHealthUpdate(u) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case update, ok := <-b.pickerUpdateCh.Get(): if !ok { @@ -805,7 +910,11 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) if successRate < requiredSuccessRate { channelz.Infof(logger, b.channelzParent, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) +<<<<<<< HEAD if uint32(rand.Int32N(100)) < ejectionCfg.EnforcementPercentage { +======= + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.ejectAddress(addrInfo) } } @@ -832,7 +941,11 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { channelz.Infof(logger, b.channelzParent, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) +<<<<<<< HEAD if uint32(rand.Int32N(100)) < ejectionCfg.EnforcementPercentage { +======= + if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.ejectAddress(addrInfo) } } @@ -861,6 +974,7 @@ func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { } } +<<<<<<< HEAD // synchronizingBalancerWrapper serializes calls into balancer (to uphold the // balancer.Balancer API guarantee of synchronous calls). It also ensures a // consistent order of locking mutexes when using SubConn listeners to avoid @@ -924,6 +1038,8 @@ func (sbw *synchronizingBalancerWrapper) handleEjectionUpdate(u *ejectionUpdate) } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // addressInfo contains the runtime information about an address that pertains // to Outlier Detection. This struct and all of its fields is protected by // outlierDetectionBalancer.mu in the case where it is accessed through the diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go index 7d710fde1b..3ba2583790 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -19,11 +19,17 @@ package outlierdetection import ( "fmt" +<<<<<<< HEAD "sync" "unsafe" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" +======= + "unsafe" + + "google.golang.org/grpc/balancer" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/resolver" ) @@ -33,6 +39,7 @@ import ( // whether or not this SubConn is ejected. type subConnWrapper struct { balancer.SubConn +<<<<<<< HEAD // addressInfo is a pointer to the subConnWrapper's corresponding address // map entry, if the map entry exists. It is accessed atomically. addressInfo unsafe.Pointer // *addressInfo @@ -60,10 +67,26 @@ type subConnWrapper struct { // run(), and child will always have the correctly updated SubConnState. ejected bool +======= + listener func(balancer.SubConnState) + + // addressInfo is a pointer to the subConnWrapper's corresponding address + // map entry, if the map entry exists. + addressInfo unsafe.Pointer // *addressInfo + // These two pieces of state will reach eventual consistency due to sync in + // run(), and child will always have the correctly updated SubConnState. + // latestState is the latest state update from the underlying SubConn. This + // is used whenever a SubConn gets unejected. + latestState balancer.SubConnState + ejected bool + + scUpdateCh *buffer.Unbounded +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // addresses is the list of address(es) this SubConn was created with to // help support any change in address(es) addresses []resolver.Address +<<<<<<< HEAD // latestHealthState is tracked to update the child policy during // unejection. latestHealthState balancer.SubConnState @@ -81,6 +104,8 @@ type subConnWrapper struct { // used to ensure a health listener is registered with the SubConn only when // the SubConn is READY. latestReceivedConnectivityState connectivity.State +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // eject causes the wrapper to report a state update with the TRANSIENT_FAILURE @@ -105,6 +130,7 @@ func (scw *subConnWrapper) uneject() { func (scw *subConnWrapper) String() string { return fmt.Sprintf("%+v", scw.addresses) } +<<<<<<< HEAD func (scw *subConnWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { // gRPC currently supports two mechanisms that provide a health signal for @@ -210,3 +236,5 @@ func (scw *subConnWrapper) setLatestConnectivityState(state connectivity.State) defer scw.mu.Unlock() scw.latestReceivedConnectivityState = state } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index ba3fe52e5c..5cfd764e53 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -123,7 +123,10 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) +<<<<<<< HEAD endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.mu.Lock() // Create and remove children, since we know all children from the config @@ -143,7 +146,10 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err cb := newChildBalancer(name, b, bb.Name(), b.cc) cb.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], +<<<<<<< HEAD Endpoints: endpointsSplit[name], +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) @@ -165,7 +171,10 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // be built, if it's a low priority). currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], +<<<<<<< HEAD Endpoints: endpointsSplit[name], +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index 8c44f19c3b..923e3f8c0a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -285,10 +285,14 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { } func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { +<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) } +======= + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go index 2b289a8114..c016ca77ba 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -120,6 +120,7 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } +<<<<<<< HEAD // SetAddrInfoInEndpoint returns a copy of endpoint in which the Attributes // field is updated with AddrInfo. func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolver.Endpoint { @@ -127,6 +128,8 @@ func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolv return endpoint } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (a AddrInfo) String() string { return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go index 0ffa9c8272..986d1e96fb 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -24,7 +24,11 @@ import ( "errors" "fmt" "io" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "sync/atomic" "time" @@ -162,7 +166,11 @@ func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done f } // For overriding in tests +<<<<<<< HEAD var randIntn = rand.IntN +======= +var randIntn = rand.Intn +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var newTimer = time.NewTimer func injectDelay(ctx context.Context, delayCfg *cpb.FaultDelay) error { diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index 74c9195215..6bffe2eecd 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -86,12 +86,15 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { return addr } +<<<<<<< HEAD // SetLocalityIDInEndpoint sets locality ID in endpoint to l. func SetLocalityIDInEndpoint(endpoint resolver.Endpoint, l LocalityID) resolver.Endpoint { endpoint.Attributes = endpoint.Attributes.WithValue(localityKey, l) return endpoint } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. var ResourceTypeMapForTesting map[string]any diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index 7df75465ac..19def17a7c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -23,7 +23,11 @@ import ( "encoding/json" "fmt" "math/bits" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "sync/atomic" "time" diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index 1ba6c001d9..087184622e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -22,7 +22,11 @@ package resolver import ( "context" "fmt" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "google.golang.org/grpc/internal" @@ -44,10 +48,17 @@ import ( // xdsresolver.Scheme const Scheme = "xds" +<<<<<<< HEAD // newBuilderWithConfigForTesting creates a new xds resolver builder using a // specific xds bootstrap config, so tests can use multiple xds clients in // different ClientConns at the same time. func newBuilderWithConfigForTesting(config []byte) (resolver.Builder, error) { +======= +// newBuilderForTesting creates a new xds resolver builder using a specific xds +// bootstrap config, so tests can use multiple xds clients in different +// ClientConns at the same time. +func newBuilderForTesting(config []byte) (resolver.Builder, error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &xdsResolverBuilder{ newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) @@ -55,6 +66,7 @@ func newBuilderWithConfigForTesting(config []byte) (resolver.Builder, error) { }, nil } +<<<<<<< HEAD // newBuilderWithClientForTesting creates a new xds resolver builder using the // specific xDS client, so that tests have complete control over the exact // specific xDS client being used. @@ -72,6 +84,11 @@ func init() { resolver.Register(&xdsResolverBuilder{}) internal.NewXDSResolverWithConfigForTesting = newBuilderWithConfigForTesting internal.NewXDSResolverWithClientForTesting = newBuilderWithClientForTesting +======= +func init() { + resolver.Register(&xdsResolverBuilder{}) + internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rinternal.NewWRR = wrr.NewRandom rinternal.NewXDSClient = xdsclient.New diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index f81685a45e..0f40119160 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -19,6 +19,7 @@ package xdsclient import ( "context" +<<<<<<< HEAD "fmt" "sync/atomic" @@ -27,6 +28,20 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" +======= + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/transport" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -35,6 +50,7 @@ import ( v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) +<<<<<<< HEAD type resourceState struct { watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource. cache xdsresource.ResourceData // Most recent ACKed update for this resource. @@ -89,6 +105,46 @@ type authority struct { // The below defined fields must only be accessed in the context of the // serializer callback, owned by this authority. +======= +type watchState int + +const ( + watchStateStarted watchState = iota // Watch started, request not yet set. + watchStateRequested // Request sent for resource being watched. + watchStateReceived // Response received for resource being watched. + watchStateTimeout // Watch timer expired, no response. + watchStateCanceled // Watch cancelled. +) + +type resourceState struct { + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + deletionIgnored bool // True if resource deletion was ignored for a prior update + + // Common watch state for all watchers of this resource. + wTimer *time.Timer // Expiry timer + wState watchState // State of the watch +} + +// authority wraps all state associated with a single management server. It +// contains the transport used to communicate with the management server and a +// cache of resource state for resources requested from the management server. +// +// Bootstrap configuration could contain multiple entries in the authorities map +// that share the same server config (server address and credentials to use). We +// share the same authority instance amongst these entries, and the reference +// counting is taken care of by the `clientImpl` type. +type authority struct { + serverCfg *bootstrap.ServerConfig // Server config for this authority + bootstrapCfg *bootstrap.Config // Full bootstrap configuration + refCount int // Reference count of watches referring to this authority + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks + resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup + transport *transport.Transport // Underlying xDS transport to the management server + watchExpiryTimeout time.Duration // Resource watch expiry timeout + logger *grpclog.PrefixLogger +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A two level map containing the state of all the resources being watched. // @@ -98,6 +154,7 @@ type authority struct { // // The second level map key is the resource name, with the value being the // actual state of the resource. +<<<<<<< HEAD resources map[xdsresource.Type]map[string]*resourceState // An ordered list of xdsChannels corresponding to the list of server @@ -320,6 +377,91 @@ func (a *authority) adsResourceUpdate(serverConfig *bootstrap.ServerConfig, rTyp // Only executed in the context of a serializer callback. func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig, rType xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { a.handleRevertingToPrimaryOnUpdate(serverConfig) +======= + resourcesMu sync.Mutex + resources map[xdsresource.Type]map[string]*resourceState + closed bool +} + +// authorityArgs is a convenience struct to wrap arguments required to create a +// new authority. All fields here correspond directly to appropriate fields +// stored in the authority struct. +type authorityArgs struct { + // The reason for passing server config and bootstrap config separately + // (although the former is part of the latter) is because authorities in the + // bootstrap config might contain an empty server config, and in this case, + // the top-level server config is to be used. + serverCfg *bootstrap.ServerConfig + bootstrapCfg *bootstrap.Config + serializer *grpcsync.CallbackSerializer + resourceTypeGetter func(string) xdsresource.Type + watchExpiryTimeout time.Duration + backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. + logger *grpclog.PrefixLogger +} + +func newAuthority(args authorityArgs) (*authority, error) { + ret := &authority{ + serverCfg: args.serverCfg, + bootstrapCfg: args.bootstrapCfg, + serializer: args.serializer, + resourceTypeGetter: args.resourceTypeGetter, + watchExpiryTimeout: args.watchExpiryTimeout, + logger: args.logger, + resources: make(map[xdsresource.Type]map[string]*resourceState), + } + + tr, err := transport.New(transport.Options{ + ServerCfg: args.serverCfg, + OnRecvHandler: ret.handleResourceUpdate, + OnErrorHandler: ret.newConnectionError, + OnSendHandler: ret.transportOnSendHandler, + Backoff: args.backoff, + Logger: args.logger, + NodeProto: args.bootstrapCfg.Node(), + }) + if err != nil { + return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) + } + ret.transport = tr + return ret, nil +} + +// transportOnSendHandler is called by the underlying transport when it sends a +// resource request successfully. Timers are activated for resources waiting for +// a response. +func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { + rType := a.resourceTypeGetter(u.URL) + // Resource type not found is not expected under normal circumstances, since + // the resource type url passed to the transport is determined by the authority. + if rType == nil { + a.logger.Warningf("Unknown resource type url: %s.", u.URL) + return + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + a.startWatchTimersLocked(rType, u.ResourceNames) +} + +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { + rType := a.resourceTypeGetter(resourceUpdate.URL) + if rType == nil { + return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) + } + + opts := &xdsresource.DecodeOptions{ + BootstrapConfig: a.bootstrapCfg, + ServerConfig: a.serverCfg, + } + updates, md, err := decodeAllResources(opts, rType, resourceUpdate) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) + return err +} + +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // We build a list of callback funcs to invoke, and invoke them at the end // of this method instead of inline (when handling the update for a @@ -329,7 +471,12 @@ func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig // callbacks are invoked, and the watchers have processed the update. watcherCnt := new(atomic.Int64) done := func() { +<<<<<<< HEAD if watcherCnt.Add(-1) == 0 { +======= + watcherCnt.Add(-1) + if watcherCnt.Load() == 0 { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) onDone() } } @@ -340,15 +487,22 @@ func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig // this update, invoke onDone explicitly to unblock the next read on // the ADS stream. onDone() +<<<<<<< HEAD return } for _, f := range funcsToSchedule { a.watcherCallbackSerializer.ScheduleOr(f, onDone) +======= + } + for _, f := range funcsToSchedule { + a.serializer.ScheduleOr(f, onDone) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }() resourceStates := a.resources[rType] for name, uErr := range updates { +<<<<<<< HEAD state, ok := resourceStates[name] if !ok { continue @@ -401,6 +555,87 @@ func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig if md.ErrState != nil { state.md.Version = md.ErrState.Version } +======= + if state, ok := resourceStates[name]; ok { + // Cancel the expiry timer associated with the resource once a + // response is received, irrespective of whether the update is a + // good one or not. + // + // We check for watch states `started` and `requested` here to + // accommodate for a race which can happen in the following + // scenario: + // - When a watch is registered, it is possible that the ADS stream + // is not yet created. In this case, the request for the resource + // is not sent out immediately. An entry in the `resourceStates` + // map is created with a watch state of `started`. + // - Once the stream is created, it is possible that the management + // server might respond with the requested resource before we send + // out request for the same. If we don't check for `started` here, + // and move the state to `received`, we will end up starting the + // timer when the request gets sent out. And since the management + // server already sent us the resource, there is a good chance + // that it will not send it again. This would eventually lead to + // the timer firing, even though we have the resource in the + // cache. + if state.wState == watchStateStarted || state.wState == watchStateRequested { + // It is OK to ignore the return value from Stop() here because + // if the timer has already fired, it means that the timer watch + // expiry callback is blocked on the same lock that we currently + // hold. Since we move the state to `received` here, the timer + // callback will be a no-op. + if state.wTimer != nil { + state.wTimer.Stop() + } + state.wState = watchStateReceived + } + + if uErr.err != nil { + // On error, keep previous version of the resource. But update + // status and error. + state.md.ErrState = md.ErrState + state.md.Status = md.Status + for watcher := range state.watchers { + watcher := watcher + err := uErr.err + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) + } + continue + } + + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) + } + // Notify watchers if any of these conditions are met: + // - this is the first update for this resource + // - this update is different from the one currently cached + // - the previous update for this resource was NACKed, but the update + // before that was the same as this update. + if state.cache == nil || !state.cache.Equal(uErr.resource) || state.md.ErrState != nil { + for watcher := range state.watchers { + watcher := watcher + resource := uErr.resource + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) + } + } + // Sync cache. + if a.logger.V(2) { + a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) + } + state.cache = uErr.resource + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + state.md = md + state.md.ErrState = nil + state.md.Status = xdsresource.ServiceStatusACKed + if md.ErrState != nil { + state.md.Version = md.ErrState.Version + } + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // If this resource type requires that all resources be present in every @@ -430,15 +665,20 @@ func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig // resource. So, there is no need to generate another one. continue } +<<<<<<< HEAD if _, ok := updates[name]; ok { // If the resource was present in the response, move on. continue } if state.md.Status == xdsresource.ServiceStatusNotExist { +======= + if _, ok := updates[name]; !ok { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The metadata status is set to "ServiceStatusNotExist" if a // previous update deleted this resource, in which case we do not // want to repeatedly call the watch callbacks with a // "resource-not-found" error. +<<<<<<< HEAD continue } if serverConfig.ServerFeaturesIgnoreResourceDeletion() { @@ -503,10 +743,281 @@ func (a *authority) handleADSResourceDoesNotExist(rType xdsresource.Type, resour return } +======= + if state.md.Status == xdsresource.ServiceStatusNotExist { + continue + } + // Per A53, resource deletions are ignored if the `ignore_resource_deletion` + // server feature is enabled through the bootstrap configuration. If the + // resource deletion is to be ignored, the resource is not removed from + // the cache and the corresponding OnResourceDoesNotExist() callback is + // not invoked on the watchers. + if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) + } + continue + } + // If resource exists in cache, but not in the new update, delete + // the resource from cache, and also send a resource not found error + // to indicate resource removed. Metadata for the resource is still + // maintained, as this is required by CSDS. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) + } + } + } +} + +type resourceDataErrTuple struct { + resource xdsresource.ResourceData + err error +} + +func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { + timestamp := time.Now() + md := xdsresource.UpdateMetadata{ + Version: update.Version, + Timestamp: timestamp, + } + + topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. + perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. + ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. + for _, r := range update.Resources { + result, err := rType.Decode(opts, r) + + // Name field of the result is left unpopulated only when resource + // deserialization fails. + name := "" + if result != nil { + name = xdsresource.ParseName(result.Name).String() + } + if err == nil { + ret[name] = resourceDataErrTuple{resource: result.Resource} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret[name] = resourceDataErrTuple{err: err} + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = xdsresource.ServiceStatusACKed + return ret, md, nil + } + + md.Status = xdsresource.ServiceStatusNACKed + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) + md.ErrState = &xdsresource.UpdateErrorMetadata{ + Version: update.Version, + Err: errRet, + Timestamp: timestamp, + } + return ret, md, errRet +} + +// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources +// requested on the underlying ADS stream. This satisfies the conditions to start +// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] +// +// Caller must hold a.resourcesMu. +func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { + resourceStates := a.resources[rType] + for _, resourceName := range resourceNames { + if state, ok := resourceStates[resourceName]; ok { + if state.wState != watchStateStarted { + continue + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.resourcesMu.Lock() + a.handleWatchTimerExpiryLocked(rType, resourceName, state) + a.resourcesMu.Unlock() + }) + state.wState = watchStateRequested + } + } +} + +// stopWatchTimersLocked is invoked upon connection errors to stops watch timers +// for resources that have been requested, but not yet responded to by the management +// server. +// +// Caller must hold a.resourcesMu. +func (a *authority) stopWatchTimersLocked() { + for _, rType := range a.resources { + for resourceName, state := range rType { + if state.wState != watchStateRequested { + continue + } + if !state.wTimer.Stop() { + // If the timer has already fired, it means that the timer watch expiry + // callback is blocked on the same lock that we currently hold. Don't change + // the watch state and instead let the watch expiry callback handle it. + a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) + continue + } + state.wTimer = nil + state.wState = watchStateStarted + } + } +} + +// newConnectionError is called by the underlying transport when it receives a +// connection error. The error will be forwarded to all the resource watchers. +func (a *authority) newConnectionError(err error) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + a.stopWatchTimersLocked() + + // We do not consider it an error if the ADS stream was closed after having received + // a response on the stream. This is because there are legitimate reasons why the server + // may need to close the stream during normal operations, such as needing to rebalance + // load or the underlying connection hitting its max connection age limit. + // See gRFC A57 for more details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return + } + + for _, rType := range a.resources { + for _, state := range rType { + // Propagate the connection error from the transport layer to all watchers. + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) + }) + } + } + } +} + +// Increments the reference count. Caller must hold parent's authorityMu. +func (a *authority) refLocked() { + a.refCount++ +} + +// Decrements the reference count. Caller must hold parent's authorityMu. +func (a *authority) unrefLocked() int { + a.refCount-- + return a.refCount +} + +func (a *authority) close() { + a.transport.Close() + + a.resourcesMu.Lock() + a.closed = true + a.resourcesMu.Unlock() +} + +func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { + if a.logger.V(2) { + a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // Lookup the ResourceType specific resources from the top-level map. If + // there is no entry for this ResourceType, create one. + resources := a.resources[rType] + if resources == nil { + resources = make(map[string]*resourceState) + a.resources[rType] = resources + } + + // Lookup the resourceState for the particular resource that the watch is + // being registered for. If this is the first watch for this resource, + // instruct the transport layer to send a DiscoveryRequest for the same. + state := resources[resourceName] + if state == nil { + if a.logger.V(2) { + a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + state = &resourceState{ + watchers: make(map[xdsresource.ResourceWatcher]bool), + md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, + wState: watchStateStarted, + } + resources[resourceName] = state + a.sendDiscoveryRequestLocked(rType, resources) + } + // Always add the new watcher to the set of watchers. + state.watchers[watcher] = true + + // If we have a cached copy of the resource, notify the new watcher. + if state.cache != nil { + if a.logger.V(2) { + a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + } + resource := state.cache + a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) + } + + return func() { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We already have a reference to the resourceState for this particular + // resource. Avoid indexing into the two-level map to figure this out. + + // Delete this particular watcher from the list of watchers, so that its + // callback will not be invoked in the future. + state.wState = watchStateCanceled + delete(state.watchers, watcher) + if len(state.watchers) > 0 { + return + } + + // There are no more watchers for this resource, delete the state + // associated with it, and instruct the transport to send a request + // which does not include this resource name. + if a.logger.V(2) { + a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + delete(resources, resourceName) + a.sendDiscoveryRequestLocked(rType, resources) + } +} + +func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourceName string, state *resourceState) { + if a.closed { + return + } + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) + + switch state.wState { + case watchStateRequested: + // This is the only state where we need to handle the timer expiry by + // invoking appropriate watch callbacks. This is handled outside the switch. + case watchStateCanceled: + return + default: + a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) + return + } + + state.wState = watchStateTimeout + // With the watch timer firing, it is safe to assume that the resource does + // not exist on the management server. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) state.cache = nil state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher +<<<<<<< HEAD a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) } } @@ -804,6 +1315,61 @@ func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig // // Only executed in the context of a serializer callback. func (a *authority) resourceConfig() []*v3statuspb.ClientConfig_GenericXdsConfig { +======= + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } +} + +func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + if a.closed { + return + } + resourceStates := a.resources[rType] + state, ok := resourceStates[resourceName] + if !ok { + return + } + // if watchStateTimeout already triggered resource not found above from + // normal watch expiry. + if state.wState == watchStateCanceled || state.wState == watchStateTimeout { + return + } + state.wState = watchStateTimeout + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } +} + +// sendDiscoveryRequestLocked sends a discovery request for the specified +// resource type and resource names. Even though this method does not directly +// access the resource cache, it is important that `resourcesMu` be held when +// calling this method to ensure that a consistent snapshot of resource names is +// being requested. +func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { + resourcesToRequest := make([]string, len(resources)) + i := 0 + for name := range resources { + resourcesToRequest[i] = name + i++ + } + a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) +} + +func (a *authority) reportLoad() (*load.Store, func()) { + return a.transport.ReportLoad() +} + +func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ret []*v3statuspb.ClientConfig_GenericXdsConfig for rType, resourceStates := range a.resources { typeURL := rType.TypeURL() @@ -833,6 +1399,7 @@ func (a *authority) resourceConfig() []*v3statuspb.ClientConfig_GenericXdsConfig return ret } +<<<<<<< HEAD func (a *authority) close() { a.xdsClientSerializerClose() <-a.xdsClientSerializer.Done() @@ -841,6 +1408,8 @@ func (a *authority) close() { } } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { case xdsresource.ServiceStatusUnknown: @@ -857,3 +1426,31 @@ func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.Cli return v3adminpb.ClientResourceStatus_UNKNOWN } } +<<<<<<< HEAD +======= + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index eba00907dc..b95dc74f16 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -32,9 +32,15 @@ import ( type XDSClient interface { // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how +<<<<<<< HEAD // xDS responses are are deserialized and validated, as received from the // xDS management server. Upon receipt of a response from the management // server, an appropriate callback on the watcher is invoked. +======= + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Most callers will not have a need to use this API directly. They will // instead use a resource-type-specific wrapper API provided by the relevant diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index 55299c457b..526e2070e4 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -26,11 +26,17 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" +<<<<<<< HEAD "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" "google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport" +======= + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -56,6 +62,7 @@ const NameForServer = "#server" // only when all references are released, and it is safe for the caller to // invoke this close function multiple times. func New(name string) (XDSClient, func(), error) { +<<<<<<< HEAD config, err := bootstrap.GetConfiguration() if err != nil { return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) @@ -69,11 +76,22 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, s c := &clientImpl{ done: grpcsync.NewEvent(), authorities: make(map[string]*authority), +======= + return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout, backoff.DefaultExponential.Backoff) +} + +// newClientImpl returns a new xdsClient with the given config. +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration, streamBackoff func(int) time.Duration) (*clientImpl, error) { + ctx, cancel := context.WithCancel(context.Background()) + c := &clientImpl{ + done: grpcsync.NewEvent(), +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) config: config, watchExpiryTimeout: watchExpiryTimeout, backoff: streamBackoff, serializer: grpcsync.NewCallbackSerializer(ctx), serializerClose: cancel, +<<<<<<< HEAD transportBuilder: &grpctransport.Builder{}, resourceTypes: newResourceTypeRegistry(), xdsActiveChannels: make(map[string]*channelState), @@ -101,6 +119,13 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, s getChannelForADS: c.getChannelForADS, logPrefix: clientPrefix(c), }) +======= + resourceTypes: newResourceTypeRegistry(), + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + } + +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.logger = prefixLogger(c) return c, nil } @@ -110,7 +135,10 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, s type OptionsForTesting struct { // Name is a unique name for this xDS client. Name string +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Contents contain a JSON representation of the bootstrap configuration to // be used when creating the xDS client. Contents []byte @@ -119,9 +147,19 @@ type OptionsForTesting struct { // unspecified, uses the default value used in non-test code. WatchExpiryTimeout time.Duration +<<<<<<< HEAD // StreamBackoffAfterFailure is the backoff function used to determine the // backoff duration after stream failures. // If unspecified, uses the default value used in non-test code. +======= + // AuthorityIdleTimeout is the timeout before idle authorities are deleted. + // If unspecified, uses the default value used in non-test code. + AuthorityIdleTimeout time.Duration + + // StreamBackoffAfterFailure is the backoff function used to determine the + // backoff duration after stream failures. If unspecified, uses the default + // value used in non-test code. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) StreamBackoffAfterFailure func(int) time.Duration } @@ -141,15 +179,29 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if opts.WatchExpiryTimeout == 0 { opts.WatchExpiryTimeout = defaultWatchExpiryTimeout } +<<<<<<< HEAD +======= + if opts.AuthorityIdleTimeout == 0 { + opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if opts.StreamBackoffAfterFailure == nil { opts.StreamBackoffAfterFailure = defaultStreamBackoffFunc } +<<<<<<< HEAD config, err := bootstrap.NewConfigForTesting(opts.Contents) if err != nil { return nil, nil, err } return newRefCounted(opts.Name, config, opts.WatchExpiryTimeout, opts.StreamBackoffAfterFailure) +======= + if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { + return nil, nil, err + } + client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout, opts.StreamBackoffAfterFailure) + return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GetForTesting returns an xDS client created earlier using the given name. @@ -175,7 +227,10 @@ func GetForTesting(name string) (XDSClient, func(), error) { func init() { internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting +<<<<<<< HEAD xdsclientinternal.ResourceWatchStateForTesting = resourceWatchStateForTesting +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { @@ -186,6 +241,7 @@ func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type return crc.clientImpl.triggerResourceNotFoundForTesting(typ, name) } +<<<<<<< HEAD func resourceWatchStateForTesting(client XDSClient, typ xdsresource.Type, name string) (ads.ResourceWatchState, error) { crc, ok := client.(*clientRefCounted) if !ok { @@ -194,6 +250,8 @@ func resourceWatchStateForTesting(client XDSClient, typ xdsresource.Type, name s return crc.clientImpl.resourceWatchStateForTesting(typ, name) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( clients = map[string]*clientRefCounted{} clientsMu sync.Mutex diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go index f5fc76d8a7..fdcef4a1c1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -19,6 +19,10 @@ package xdsclient import ( +<<<<<<< HEAD +======= + "fmt" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "time" @@ -27,7 +31,14 @@ import ( "google.golang.org/grpc/internal/xds/bootstrap" ) +<<<<<<< HEAD const defaultWatchExpiryTimeout = 15 * time.Second +======= +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( // The following functions are no-ops in the actual code, but can be @@ -40,6 +51,7 @@ var ( func clientRefCountedClose(name string) { clientsMu.Lock() +<<<<<<< HEAD client, ok := clients[name] if !ok { logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) @@ -58,13 +70,32 @@ func clientRefCountedClose(name string) { // Hence, this needs to be called without holding the lock. client.clientImpl.close() xdsClientImplCloseHook(name) +======= + defer clientsMu.Unlock() + + client, ok := clients[name] + if !ok { + logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) + return + } + if client.decrRef() != 0 { + return + } + client.clientImpl.close() + xdsClientImplCloseHook(name) + delete(clients, name) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // newRefCounted creates a new reference counted xDS client implementation for // name, if one does not exist already. If an xDS client for the given name // exists, it gets a reference to it and returns it. +<<<<<<< HEAD func newRefCounted(name string, config *bootstrap.Config, watchExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { +======= +func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) clientsMu.Lock() defer clientsMu.Unlock() @@ -74,7 +105,15 @@ func newRefCounted(name string, config *bootstrap.Config, watchExpiryTimeout tim } // Create the new client implementation. +<<<<<<< HEAD c, err := newClientImpl(config, watchExpiryTimeout, streamBackoff) +======= + config, err := bootstrap.GetConfiguration() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) + } + c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout, streamBackoff) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, err } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index bb8d904002..f0509d210e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -19,6 +19,7 @@ package xdsclient import ( +<<<<<<< HEAD "errors" "fmt" "sync" @@ -31,10 +32,20 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +======= + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var _ XDSClient = &clientImpl{} +<<<<<<< HEAD // ErrClientClosed is returned when the xDS client is closed. var ErrClientClosed = errors.New("xds: the xDS client is closed") @@ -131,6 +142,39 @@ func (cs *channelState) adsResourceDoesNotExist(typ xdsresource.Type, resourceNa for authority := range cs.interestedAuthorities { authority.adsResourceDoesNotExist(typ, resourceName) } +======= +// clientImpl is the real implementation of the xds client. The exported Client +// is a wrapper of this struct with a ref count. +type clientImpl struct { + done *grpcsync.Event + config *bootstrap.Config + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration + backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. + serializer *grpcsync.CallbackSerializer + serializerClose func() + resourceTypes *resourceTypeRegistry + + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // BootstrapConfig returns the configuration read from the bootstrap file. @@ -139,12 +183,17 @@ func (c *clientImpl) BootstrapConfig() *bootstrap.Config { return c.config } +<<<<<<< HEAD // close closes the xDS client and releases all resources. +======= +// close closes the gRPC connection to the management server. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *clientImpl) close() { if c.done.HasFired() { return } c.done.Fire() +<<<<<<< HEAD c.topLevelAuthority.close() for _, a := range c.authorities { @@ -169,6 +218,18 @@ func (c *clientImpl) close() { c.serializerClose() <-c.serializer.Done() +======= + // TODO: Should we invoke the registered callbacks here with an error that + // the client is closed? + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() + } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + c.serializerClose() +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, s := range c.config.XDSServers() { for _, f := range s.Cleanups() { @@ -184,6 +245,7 @@ func (c *clientImpl) close() { } c.logger.Infof("Shutdown") } +<<<<<<< HEAD // getChannelForADS returns an xdsChannel for the given server configuration. // @@ -350,3 +412,5 @@ func (c *clientImpl) releaseChannel(serverConfig *bootstrap.ServerConfig, state channelToClose.close() }) } +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go new file mode 100644 index 0000000000..56c26b8175 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -0,0 +1,146 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServers()[0] + if scheme == xdsresource.FederationScheme { + authorities := c.config.Authorities() + if authorities == nil { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + cfg, ok := authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + if len(cfg.XDSServers) >= 1 { + config = cfg.XDSServers[0] + } + } + + a, err := c.newAuthorityLocked(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.refLocked() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthorityLocked creates a new authority for the given config. If an +// authority for the given config exists in the cache, it is returned instead of +// creating a new one. +// +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + ret, err := newAuthority(authorityArgs{ + serverCfg: config, + bootstrapCfg: c.config, + serializer: c.serializer, + resourceTypeGetter: c.resourceTypes.get, + watchExpiryTimeout: c.watchExpiryTimeout, + backoff: c.backoff, + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), + }) + if err != nil { + return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) + } + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unrefLocked() > 0 { + return + } + configStr := a.serverCfg.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go index 9d75867730..abf0c1a14f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -24,7 +24,14 @@ import ( // dumpResources returns the status and contents of all xDS resources. func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { +<<<<<<< HEAD retCfg := c.topLevelAuthority.dumpResources() +======= + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + + var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, a := range c.authorities { retCfg = append(retCfg, a.dumpResources()...) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go index efb41b87db..b7f836b8c9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -28,6 +28,7 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { +<<<<<<< HEAD xc, releaseChannelRef, err := c.getChannelForLRS(server) if err != nil { c.logger.Warningf("Failed to create a channel to the management server to report load: %v", server, err) @@ -37,5 +38,22 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu return load, func() { stopLoadReporting() releaseChannelRef() +======= + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + if err != nil { + c.authorityMu.Unlock() + c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) + return nil, func() {} + } + // Hold the ref before starting load reporting. + a.refLocked() + c.authorityMu.Unlock() + + store, cancelF := a.reportLoad() + return store, func() { + cancelF() + c.unrefAuthority(a) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index ed4ee360fb..f8a2b5ee34 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -22,15 +22,25 @@ import ( "fmt" "sync" +<<<<<<< HEAD "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // WatchResource uses xDS to discover the resource associated with the provided +<<<<<<< HEAD // resource name. The resource type implementation determines how xDS responses // are are deserialized and validated, as received from the xDS management // server. Upon receipt of a response from the management server, an // appropriate callback on the watcher is invoked. +======= +// resource name. The resource type implementation determines how xDS requests +// are sent out and how responses are deserialized and validated. Upon receipt +// of a response from the management server, an appropriate callback on the +// watcher is invoked. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) { // Return early if the client is already closed. // @@ -49,6 +59,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, return func() {} } +<<<<<<< HEAD n := xdsresource.ParseName(resourceName) a := c.getAuthorityForResource(n) if a == nil { @@ -84,6 +95,22 @@ func (c *clientImpl) getAuthorityForResource(name *xdsresource.Name) *authority return c.topLevelAuthority } return c.authorities[name.Authority] +======= + // TODO: Make ParseName return an error if parsing fails, and + // schedule the OnError callback in that case. + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) + return func() {} + } + cancelF := a.watchResource(rType, n.String(), watcher) + return func() { + cancelF() + unref() + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A registry of xdsresource.Type implementations indexed by their corresponding @@ -118,6 +145,7 @@ func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { } func (c *clientImpl) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { +<<<<<<< HEAD c.channelsMu.Lock() defer c.channelsMu.Unlock() @@ -144,3 +172,18 @@ func (c *clientImpl) resourceWatchStateForTesting(rType xdsresource.Type, resour } return ads.ResourceWatchState{}, fmt.Errorf("unable to find watch state for resource type %q and name %q", rType.TypeName(), resourceName) } +======= + if c == nil || c.done.HasFired() { + return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but client is closed", rType.TypeName(), resourceName) + } + + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but authority %q is not found", rType.TypeName(), resourceName, n.Authority) + } + defer unref() + a.triggerResourceNotFoundForTesting(rType, n.String()) + return nil +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go index cdbb86db82..f4edfc612f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go @@ -20,6 +20,7 @@ package internal // The following vars can be overridden by tests. var ( +<<<<<<< HEAD // GRPCNewClient returns a new gRPC Client. GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) @@ -30,4 +31,8 @@ var ( // identified by the given resource type and resource name. Returns a // non-nil error if there is no such resource being watched. ResourceWatchStateForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error +======= + // NewADSStream is a function that returns a new ADS stream. + NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go index 00b6392d6a..978b9f8e4b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go @@ -34,3 +34,10 @@ func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { func clientPrefix(p *clientImpl) string { return fmt.Sprintf("[xds-client %p] ", p) } +<<<<<<< HEAD +======= + +func authorityPrefix(p *clientImpl, serverURI string) string { + return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) +} +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go new file mode 100644 index 0000000000..9acc33cbbf --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the transport package. +package internal + +// The following vars can be overridden by tests. +var ( + // GRPCNewClient creates a new gRPC Client. + GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go new file mode 100644 index 0000000000..e47fdd9846 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient + +// ReportLoad starts reporting loads to the management server the transport is +// configured to use. +// +// It returns a Store for the user to report loads and a function to cancel the +// load reporting. +func (t *Transport) ReportLoad() (*load.Store, func()) { + t.lrsStartStream() + return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) +} + +// lrsStartStream starts an LRS stream to the server, if none exists. +func (t *Transport) lrsStartStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount++ + if t.lrsRefCount != 1 { + // Return early if the stream has already been started. + return + } + + ctx, cancel := context.WithCancel(context.Background()) + t.lrsCancelStream = cancel + + // Create a new done channel everytime a new stream is created. This ensures + // that we don't close the same channel multiple times (from lrsRunner() + // goroutine) when multiple streams are created and closed. + t.lrsRunnerDoneCh = make(chan struct{}) + go t.lrsRunner(ctx) +} + +// lrsStopStream closes the LRS stream, if this is the last user of the stream. +func (t *Transport) lrsStopStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount-- + if t.lrsRefCount != 0 { + // Return early if the stream has other references. + return + } + + t.lrsCancelStream() + t.logger.Infof("Stopping LRS stream") + + // Wait for the runner goroutine to exit. The done channel will be + // recreated when a new stream is created. + <-t.lrsRunnerDoneCh +} + +// lrsRunner starts an LRS stream to report load data to the management server. +// It reports load at constant intervals (as configured by the management +// server) until the context is cancelled. +func (t *Transport) lrsRunner(ctx context.Context) { + defer close(t.lrsRunnerDoneCh) + + // This feature indicates that the client supports the + // LoadStatsResponse.send_all_clusters field in the LRS response. + node := proto.Clone(t.nodeProto).(*v3corepb.Node) + node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") + + runLoadReportStream := func() error { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) + return nil + } + t.logger.Infof("Created LRS stream to server %q", t.serverURI) + + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Sending first LRS request failed: %v", err) + return nil + } + + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Reading from LRS stream failed: %v", err) + return nil + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + t.sendLoads(streamCtx, stream, clusters, interval) + return backoff.ErrResetBackoff + } + backoff.RunF(ctx, runLoadReportStream, t.backoff) +} + +func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + case <-ctx.Done(): + return + } + if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { + t.logger.Warningf("Writing to LRS stream failed: %v", err) + return + } + } +} + +func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { + req := &v3lrspb.LoadStatsRequest{Node: node} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { + resp, err := stream.Recv() + if err != nil { + return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } + + rInterval := resp.GetLoadReportingInterval() + if rInterval.CheckValid() != nil { + return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) + } + interval := rInterval.AsDuration() + + if resp.ReportEndpointGranularity { + // TODO(easwars): Support per endpoint loads. + return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") + } + + clusters := resp.Clusters + if resp.SendAllClusters { + // Return nil to send stats for all clusters. + clusters = nil + } + + return clusters, interval, nil +} + +func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) + for _, sd := range loads { + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) + for category, count := range sd.Drops { + droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ + Category: category, + DroppedCount: count, + }) + } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) + for l, localityData := range sd.LocalityStats { + lid, err := internal.LocalityIDFromString(l) + if err != nil { + return err + } + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) + for name, loadData := range localityData.LoadStats { + loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ + MetricName: name, + NumRequestsFinishedWithMetric: loadData.Count, + TotalMetricValue: loadData.Sum, + }) + } + localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ + Locality: &v3corepb.Locality{ + Region: lid.Region, + Zone: lid.Zone, + SubZone: lid.SubZone, + }, + TotalSuccessfulRequests: localityData.RequestStats.Succeeded, + TotalRequestsInProgress: localityData.RequestStats.InProgress, + TotalErrorRequests: localityData.RequestStats.Errored, + TotalIssuedRequests: localityData.RequestStats.Issued, + LoadMetricStats: loadMetricStats, + UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. + }) + } + + clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ + ClusterName: sd.Cluster, + ClusterServiceName: sd.Service, + UpstreamLocalityStats: localityStats, + TotalDroppedRequests: sd.TotalDrops, + DroppedRequests: droppedReqs, + LoadReportInterval: durationpb.New(sd.ReportInterval), + }) + } + + req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go new file mode 100644 index 0000000000..59b221727a --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -0,0 +1,702 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport implements the xDS transport protocol functionality +// required by the xdsclient. +package transport + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/keepalive" + xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +func init() { + transportinternal.GRPCNewClient = grpc.NewClient + xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) + } +} + +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2. +const perRPCVerbosityLevel = 9 + +// Transport provides a resource-type agnostic implementation of the xDS +// transport protocol. At this layer, resource contents are supposed to be +// opaque blobs which should be meaningful only to the xDS data model layer +// which is implemented by the `xdsresource` package. +// +// Under the hood, it owns the gRPC connection to a single management server and +// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport +// protocol version. +type Transport struct { + // These fields are initialized at creation time and are read-only afterwards. + cc *grpc.ClientConn // ClientConn to the management server. + serverURI string // URI of the management server. + onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. + onErrorHandler func(error) // To report underlying stream errors. + onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + + // These channels enable synchronization amongst the different goroutines + // spawned by the transport, and between asynchronous events resulting from + // receipt of responses from the management server. + adsStreamCh chan adsStream // New ADS streams are pushed here. + adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. + + // mu guards the following runtime state maintained by the transport. + mu sync.Mutex + // resources is map from resource type URL to the set of resource names + // being requested for that type. When the ADS stream is restarted, the + // transport requests all these resources again from the management server. + resources map[string]map[string]bool + // versions is a map from resource type URL to the most recently ACKed + // version for that resource. Resource versions are a property of the + // resource type and not the stream, and will not be reset upon stream + // restarts. + versions map[string]string + // nonces is a map from resource type URL to the most recently received + // nonce for that resource type. Nonces are a property of the ADS stream and + // will be reset upon stream restarts. + nonces map[string]string + + lrsMu sync.Mutex // Protects all LRS state. + lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. + lrsRefCount int // Reference count on the load store. +} + +// OnRecvHandlerFunc is the implementation at the xDS data model layer, which +// determines if the configuration received from the management server can be +// applied locally or not. +// +// A nil error is returned from this function when the data model layer believes +// that the received configuration is good and can be applied locally. This will +// cause the transport layer to send an ACK to the management server. A non-nil +// error is returned from this function when the data model layer believes +// otherwise, and this will cause the transport layer to send a NACK. +// +// The implementation is expected to invoke onDone when local processing of the +// update is complete, i.e. it is consumed by all watchers. +type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error + +// OnSendHandlerFunc is the implementation at the authority, which handles state +// changes for the resource watch and stop watch timers accordingly. +type OnSendHandlerFunc func(update *ResourceSendInfo) + +// ResourceUpdate is a representation of the configuration update received from +// the management server. It only contains fields which are useful to the data +// model layer, and layers above it. +type ResourceUpdate struct { + // Resources is the list of resources received from the management server. + Resources []*anypb.Any + // URL is the resource type URL for the above resources. + URL string + // Version is the resource version, for the above resources, as specified by + // the management server. + Version string +} + +// Options specifies configuration knobs used when creating a new Transport. +type Options struct { + // ServerCfg contains all the configuration required to connect to the xDS + // management server. + ServerCfg *bootstrap.ServerConfig + // OnRecvHandler is the component which makes ACK/NACK decisions based on + // the received resources. + // + // Invoked inline and implementations must not block. + OnRecvHandler OnRecvHandlerFunc + // OnErrorHandler provides a way for the transport layer to report + // underlying stream errors. These can be bubbled all the way up to the user + // of the xdsClient. + // + // Invoked inline and implementations must not block. + OnErrorHandler func(error) + // OnSendHandler provides a way for the transport layer to report underlying + // resource requests sent on the stream. However, Send() on the ADS stream will + // return successfully as long as: + // 1. there is enough flow control quota to send the message. + // 2. the message is added to the send buffer. + // However, the connection may fail after the callback is invoked and before + // the message is actually sent on the wire. This is accepted. + // + // Invoked inline and implementations must not block. + OnSendHandler func(*ResourceSendInfo) + // Backoff controls the amount of time to backoff before recreating failed + // ADS streams. If unspecified, a default exponential backoff implementation + // is used. For more details, see: + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. + Backoff func(retries int) time.Duration + // Logger does logging with a prefix. + Logger *grpclog.PrefixLogger + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node +} + +// New creates a new Transport. +func New(opts Options) (*Transport, error) { + switch { + case opts.OnRecvHandler == nil: + return nil, errors.New("missing OnRecv callback handler when creating a new transport") + case opts.OnErrorHandler == nil: + return nil, errors.New("missing OnError callback handler when creating a new transport") + case opts.OnSendHandler == nil: + return nil, errors.New("missing OnSend callback handler when creating a new transport") + } + + // Dial the xDS management server with dial options specified by the server + // configuration and a static keepalive configuration that is common across + // gRPC language implementations. + kpCfg := grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }) + dopts := append([]grpc.DialOption{kpCfg}, opts.ServerCfg.DialOptions()...) + grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) + cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) + } + cc.Connect() + + boff := opts.Backoff + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } + ret := &Transport{ + cc: cc, + serverURI: opts.ServerCfg.ServerURI(), + onRecvHandler: opts.OnRecvHandler, + onErrorHandler: opts.OnErrorHandler, + onSendHandler: opts.OnSendHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: opts.NodeProto, + logger: opts.Logger, + + adsStreamCh: make(chan adsStream, 1), + adsRequestCh: buffer.NewUnbounded(), + resources: make(map[string]map[string]bool), + versions: make(map[string]string), + nonces: make(map[string]string), + adsRunnerDoneCh: make(chan struct{}), + } + + // This context is used for sending and receiving RPC requests and + // responses. It is also used by all the goroutines spawned by this + // Transport. Therefore, cancelling this context when the transport is + // closed will essentially cancel any pending RPCs, and cause the goroutines + // to terminate. + ctx, cancel := context.WithCancel(context.Background()) + ret.adsRunnerCancel = cancel + go ret.adsRunner(ctx) + + ret.logger.Infof("Created transport to server %q", ret.serverURI) + return ret, nil +} + +// resourceRequest wraps the resource type url and the resource names requested +// by the user of this transport. +type resourceRequest struct { + resources []string + url string +} + +// SendRequest sends out an ADS request for the provided resources of the +// specified resource type. +// +// The request is sent out asynchronously. If no valid stream exists at the time +// of processing this request, it is queued and will be sent out once a valid +// stream exists. +// +// If a successful response is received, the update handler callback provided at +// creation time is invoked. If an error is encountered, the stream error +// handler callback provided at creation time is invoked. +func (t *Transport) SendRequest(url string, resources []string) { + t.adsRequestCh.Put(&resourceRequest{ + url: url, + resources: resources, + }) +} + +// ResourceSendInfo wraps the names and url of resources sent to the management +// server. This is used by the `authority` type to start/stop the watch timer +// associated with every resource in the update. +type ResourceSendInfo struct { + ResourceNames []string + URL string +} + +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { + req := &v3discoverypb.DiscoveryRequest{ + TypeUrl: resourceURL, + ResourceNames: resourceNames, + VersionInfo: version, + ResponseNonce: nonce, + } + if sendNodeProto { + req.Node = t.nodeProto + } + if nackErr != nil { + req.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), Message: nackErr.Error(), + } + } + if err := stream.Send(req); err != nil { + return err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else { + if t.logger.V(2) { + t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } + } + t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) + return nil +} + +func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { + resp, err := stream.Recv() + if err != nil { + return nil, "", "", "", err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else if t.logger.V(2) { + t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } + return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil +} + +// adsRunner starts an ADS stream (and backs off exponentially, if the previous +// stream failed without receiving a single reply) and runs the sender and +// receiver routines to send and receive data from the stream respectively. +func (t *Transport) adsRunner(ctx context.Context) { + defer close(t.adsRunnerDoneCh) + + go t.send(ctx) + + // We reset backoff state when we successfully receive at least one + // message from the server. + runStreamWithBackoff := func() error { + newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) + stream, err := newStream(ctx, t.cc) + if err != nil { + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) + return nil + } + t.logger.Infof("ADS stream created") + + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + msgReceived := t.recv(ctx, stream) + if msgReceived { + return backoff.ErrResetBackoff + } + return nil + } + backoff.RunF(ctx, runStreamWithBackoff, t.backoff) +} + +// send is a separate goroutine for sending resource requests on the ADS stream. +// +// For every new stream received on the stream channel, all existing resources +// are re-requested from the management server. +// +// For every new resource request received on the resources channel, the +// resources map is updated (this ensures that resend will pick them up when +// there are new streams) and the appropriate request is sent out. +func (t *Transport) send(ctx context.Context) { + var stream adsStream + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request message wastes CPU resources on the client and the server. + sentNodeProto := false + for { + select { + case <-ctx.Done(): + return + case stream = <-t.adsStreamCh: + // We have a new stream and we've to ensure that the node proto gets + // sent out in the first request on the stream. + var err error + if sentNodeProto, err = t.sendExisting(stream); err != nil { + // Send failed, clear the current stream. Attempt to resend will + // only be made after a new stream is created. + stream = nil + continue + } + case u, ok := <-t.adsRequestCh.Get(): + if !ok { + // No requests will be sent after the adsRequestCh buffer is closed. + return + } + t.adsRequestCh.Load() + + var ( + resources []string + url, version, nonce string + send bool + nackErr error + ) + switch update := u.(type) { + case *resourceRequest: + resources, url, version, nonce = t.processResourceRequest(update) + case *ackRequest: + resources, url, version, nonce, send = t.processAckRequest(update, stream) + if !send { + continue + } + nackErr = update.nackErr + } + if stream == nil { + // There's no stream yet. Skip the request. This request + // will be resent to the new streams. If no stream is + // created, the watcher will timeout (same as server not + // sending response back). + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, resources, url, version, nonce, nackErr); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) + // Send failed, clear the current stream. + stream = nil + } + sentNodeProto = true + } + } +} + +// sendExisting sends out xDS requests for existing resources when recovering +// from a broken stream. +// +// We call stream.Send() here with the lock being held. It should be OK to do +// that here because the stream has just started and Send() usually returns +// quickly (once it pushes the message onto the transport layer) and is only +// ever blocked if we don't have enough flow control quota. +// +// Returns true if the node proto was sent. +func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err error) { + t.mu.Lock() + defer t.mu.Unlock() + + // Reset only the nonces map when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream + t.nonces = make(map[string]string) + + // Send node proto only in the first request on the stream. + for url, resources := range t.resources { + if len(resources) == 0 { + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) + return false, err + } + sentNodeProto = true + } + + return sentNodeProto, nil +} + +// recv receives xDS responses on the provided ADS stream and branches out to +// message specific handlers. Returns true if at least one message was +// successfully received. +func (t *Transport) recv(ctx context.Context, stream adsStream) bool { + // Initialize the flow control quota for the stream. This helps to block the + // next read until the previous one is consumed by all watchers. + fc := newADSFlowControl() + + msgReceived := false + for { + // Wait for ADS stream level flow control to be available. + if !fc.wait(ctx) { + if t.logger.V(2) { + t.logger.Infof("ADS stream context canceled") + } + return msgReceived + } + + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) + if err != nil { + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + t.onErrorHandler(err) + t.logger.Warningf("ADS stream closed: %v", err) + return msgReceived + } + msgReceived = true + + u := ResourceUpdate{ + Resources: resources, + URL: url, + Version: rVersion, + } + fc.setPending() + if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + t.logger.Warningf("%v", err) + continue + } + // If the data model layer returned an error, we need to NACK the + // response in which case we need to set the version to the most + // recently accepted version of this resource type. + if err != nil { + t.mu.Lock() + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: t.versions[url], + nackErr: err, + }) + t.mu.Unlock() + t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) + continue + } + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: rVersion, + }) + if t.logger.V(2) { + t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + } + } +} + +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) + for i := range m { + ret = append(ret, i) + } + return ret +} + +func sliceToMap(ss []string) map[string]bool { + ret := make(map[string]bool, len(ss)) + for _, s := range ss { + ret[s] = true + } + return ret +} + +// processResourceRequest pulls the fields needed to send out an ADS request. +// The resource type and the list of resources to request are provided by the +// user, while the version and nonce are maintained internally. +// +// The resources map, which keeps track of the resources being requested, is +// updated here. Any subsequent stream failure will re-request resources stored +// in this map. +// +// Returns the list of resources, resource type url, version and nonce. +func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { + t.mu.Lock() + defer t.mu.Unlock() + + resources := sliceToMap(req.resources) + t.resources[req.url] = resources + return req.resources, req.url, t.versions[req.url], t.nonces[req.url] +} + +type ackRequest struct { + url string // Resource type URL. + version string // NACK if version is an empty string. + nonce string + nackErr error // nil for ACK, non-nil for NACK. + // ACK/NACK are tagged with the stream it's for. When the stream is down, + // all the ACK/NACK for this stream will be dropped, and the version/nonce + // won't be updated. + stream grpc.ClientStream +} + +// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces +// and versions map is updated. +// +// Returns the list of resources, resource type url, version, nonce, and an +// indication of whether an ACK should be sent on the wire or not. +func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { + if ack.stream != stream { + // If ACK's stream isn't the current sending stream, this means the ACK + // was pushed to queue before the old stream broke, and a new stream has + // been started since. Return immediately here so we don't update the + // nonce for the new stream. + return nil, "", "", "", false + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Update the nonce irrespective of whether we send the ACK request on wire. + // An up-to-date nonce is required for the next request. + nonce := ack.nonce + t.nonces[ack.url] = nonce + + s, ok := t.resources[ack.url] + if !ok || len(s) == 0 { + // We don't send the ACK request if there are no resources of this type + // in our resources map. This can be either when the server sends + // responses before any request, or the resources are removed while the + // ackRequest was in queue). If we send a request with an empty + // resource name list, the server may treat it as a wild card and send + // us everything. + return nil, "", "", "", false + } + resources := mapToSlice(s) + + // Update the versions map only when we plan to send an ACK. + if ack.nackErr == nil { + t.versions[ack.url] = ack.version + } + + return resources, ack.url, ack.version, nonce, true +} + +// Close closes the Transport and frees any associated resources. +func (t *Transport) Close() { + t.adsRunnerCancel() + <-t.adsRunnerDoneCh + t.adsRequestCh.Close() + t.cc.Close() +} + +// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC +// channel to the management server. +// +// Only for testing purposes. +func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { + return t.cc.GetState() +} + +// adsFlowControl implements ADS stream level flow control that enables the +// transport to block the reading of the next message off of the stream until +// the previous update is consumed by all watchers. +// +// The lifetime of the flow control is tied to the lifetime of the stream. +type adsFlowControl struct { + logger *grpclog.PrefixLogger + + // Whether the most recent update is pending consumption by all watchers. + pending atomic.Bool + // Channel used to notify when all the watchers have consumed the most + // recent update. Wait() blocks on reading a value from this channel. + readyCh chan struct{} +} + +// newADSFlowControl returns a new adsFlowControl. +func newADSFlowControl() *adsFlowControl { + return &adsFlowControl{readyCh: make(chan struct{}, 1)} +} + +// setPending changes the internal state to indicate that there is an update +// pending consumption by all watchers. +func (fc *adsFlowControl) setPending() { + fc.pending.Store(true) +} + +// wait blocks until all the watchers have consumed the most recent update and +// returns true. If the context expires before that, it returns false. +func (fc *adsFlowControl) wait(ctx context.Context) bool { + // If there is no pending update, there is no need to block. + if !fc.pending.Load() { + // If all watchers finished processing the most recent update before the + // `recv` goroutine made the next call to `Wait()`, there would be an + // entry in the readyCh channel that needs to be drained to ensure that + // the next call to `Wait()` doesn't unblock before it actually should. + select { + case <-fc.readyCh: + default: + } + return true + } + + select { + case <-ctx.Done(): + return false + case <-fc.readyCh: + return true + } +} + +// onDone indicates that all watchers have consumed the most recent update. +func (fc *adsFlowControl) onDone() { + fc.pending.Store(false) + + select { + // Writes to the readyCh channel should not block ideally. The default + // branch here is to appease the paranoid mind. + case fc.readyCh <- struct{}{}: + default: + if fc.logger.V(2) { + fc.logger.Infof("ADS stream flow control readyCh is full") + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 8e9375fcbb..aff9de57df 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -86,8 +86,13 @@ type ClusterResourceData struct { Resource ClusterUpdate } +<<<<<<< HEAD // RawEqual returns true if other is equal to r. func (c *ClusterResourceData) RawEqual(other ResourceData) bool { +======= +// Equal returns true if other is equal to r. +func (c *ClusterResourceData) Equal(other ResourceData) bool { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 94c03d0c52..a438f05081 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -81,8 +81,13 @@ type EndpointsResourceData struct { Resource EndpointsUpdate } +<<<<<<< HEAD // RawEqual returns true if other is equal to r. func (e *EndpointsResourceData) RawEqual(other ResourceData) bool { +======= +// Equal returns true if other is equal to r. +func (e *EndpointsResourceData) Equal(other ResourceData) bool { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if e == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go index e3ca1134a0..84163ed38f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -118,8 +118,13 @@ type ListenerResourceData struct { Resource ListenerUpdate } +<<<<<<< HEAD // RawEqual returns true if other is equal to l. func (l *ListenerResourceData) RawEqual(other ResourceData) bool { +======= +// Equal returns true if other is equal to l. +func (l *ListenerResourceData) Equal(other ResourceData) bool { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if l == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go index 798f618849..a595eda6f4 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go @@ -19,7 +19,11 @@ package xdsresource import ( "fmt" +<<<<<<< HEAD rand "math/rand/v2" +======= + "math/rand" +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "google.golang.org/grpc/internal/grpcutil" @@ -142,11 +146,19 @@ func newFractionMatcher(fraction uint32) *fractionMatcher { return &fractionMatcher{fraction: int64(fraction)} } +<<<<<<< HEAD // RandInt64n overwrites rand for control in tests. var RandInt64n = rand.Int64N func (fm *fractionMatcher) match() bool { t := RandInt64n(1000000) +======= +// RandInt63n overwrites rand for control in tests. +var RandInt63n = rand.Int63n + +func (fm *fractionMatcher) match() bool { + t := RandInt63n(1000000) +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return t <= fm.fraction } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index e14f56f781..50c002d4ce 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -46,9 +46,15 @@ func init() { type Producer interface { // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how +<<<<<<< HEAD // xDS responses are are deserialized and validated, as received from the // xDS management server. Upon receipt of a response from the management // server, an appropriate callback on the watcher is invoked. +======= + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) } @@ -119,9 +125,17 @@ type Type interface { // provide an implementation of this interface to represent the configuration // received from the xDS management server. type ResourceData interface { +<<<<<<< HEAD // RawEqual returns true if the passed in resource data is equal to that of // the receiver, based on the underlying raw protobuf message. RawEqual(ResourceData) bool +======= + isResourceData() + + // Equal returns true if the passed in resource data is equal to that of the + // receiver. + Equal(ResourceData) bool +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ToJSON returns a JSON string representation of the resource data. ToJSON() string diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 98ac313288..ee9b1069a5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -81,8 +81,13 @@ type RouteConfigResourceData struct { Resource RouteConfigUpdate } +<<<<<<< HEAD // RawEqual returns true if other is equal to r. func (r *RouteConfigResourceData) RawEqual(other ResourceData) bool { +======= +// Equal returns true if other is equal to r. +func (r *RouteConfigResourceData) Equal(other ResourceData) bool { +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if r == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go index f94a17e7c6..15a2e37921 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -49,7 +49,11 @@ const ( // Endpoint contains information of an endpoint. type Endpoint struct { +<<<<<<< HEAD Addresses []string +======= + Address string +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) HealthStatus EndpointHealthStatus Weight uint32 } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index fd780d6632..b2e87d31d6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -26,7 +26,10 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" +<<<<<<< HEAD "google.golang.org/grpc/internal/envconfig" +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" "google.golang.org/protobuf/proto" @@ -94,6 +97,7 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs } weight = w.GetValue() } +<<<<<<< HEAD addrs := []string{parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress())} if envconfig.XDSDualstackEndpointsEnabled { for _, sa := range lbEndpoint.GetEndpoint().GetAdditionalAddresses() { @@ -110,6 +114,16 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Addresses: addrs, +======= + addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) + if uniqueEndpointAddrs[addr] { + return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) + } + uniqueEndpointAddrs[addr] = true + endpoints = append(endpoints, Endpoint{ + HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + Address: addr, +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Weight: weight, }) } diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto index e802a01439..13be7cbd8e 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -675,7 +675,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go index fb4d7041db..e3d7fd8945 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -687,7 +687,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -715,9 +715,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ResourceClaimReservedForMaxSize is the maximum number of entries in +// ReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 256 +const ResourceClaimReservedForMaxSize = 32 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index b41609d118..1a71d64c10 100644 --- a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -291,7 +291,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/k8s.io/api/resource/v1beta1/generated.proto b/vendor/k8s.io/api/resource/v1beta1/generated.proto index 4ea13e0337..6d525d5b85 100644 --- a/vendor/k8s.io/api/resource/v1beta1/generated.proto +++ b/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -683,7 +683,7 @@ message ResourceClaimStatus { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional diff --git a/vendor/k8s.io/api/resource/v1beta1/types.go b/vendor/k8s.io/api/resource/v1beta1/types.go index ca79c5a664..a7f1ee7b54 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types.go +++ b/vendor/k8s.io/api/resource/v1beta1/types.go @@ -695,7 +695,7 @@ type ResourceClaimStatus struct { // which issued it knows that it must put the pod back into the queue, // waiting for the ResourceClaim to become usable again. // - // There can be at most 256 such reservations. This may get increased in + // There can be at most 32 such reservations. This may get increased in // the future, but not reduced. // // +optional @@ -723,9 +723,9 @@ type ResourceClaimStatus struct { Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` } -// ResourceClaimReservedForMaxSize is the maximum number of entries in +// ReservedForMaxSize is the maximum number of entries in // claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 256 +const ResourceClaimReservedForMaxSize = 32 // ResourceClaimConsumerReference contains enough information to let you // locate the consumer of a ResourceClaim. The user must be a resource in the same diff --git a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go index 4ecc35d08a..1d0176cbca 100644 --- a/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -300,7 +300,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { var map_ResourceClaimStatus = map[string]string{ "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", } diff --git a/vendor/modules.txt b/vendor/modules.txt index ad09c51697..ccf190ee41 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,8 +4,13 @@ # 4d63.com/gochecknoglobals v0.2.1 ## explicit; go 1.15 4d63.com/gochecknoglobals/checknoglobals +<<<<<<< HEAD # cel.dev/expr v0.19.1 ## explicit; go 1.21.1 +======= +# cel.dev/expr v0.16.1 +## explicit; go 1.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cel.dev/expr # cloud.google.com/go v0.116.0 ## explicit; go 1.21 @@ -14,8 +19,13 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version +<<<<<<< HEAD # cloud.google.com/go/auth v0.14.0 ## explicit; go 1.22 +======= +# cloud.google.com/go/auth v0.11.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/idtoken @@ -33,10 +43,17 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert +<<<<<<< HEAD # cloud.google.com/go/auth/oauth2adapt v0.2.7 ## explicit; go 1.22 cloud.google.com/go/auth/oauth2adapt # cloud.google.com/go/compute/metadata v0.6.0 +======= +# cloud.google.com/go/auth/oauth2adapt v0.2.6 +## explicit; go 1.21 +cloud.google.com/go/auth/oauth2adapt +# cloud.google.com/go/compute/metadata v0.5.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 cloud.google.com/go/compute/metadata # cloud.google.com/go/firestore v1.17.0 @@ -48,7 +65,11 @@ cloud.google.com/go/firestore/internal ## explicit; go 1.21 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb +<<<<<<< HEAD # cloud.google.com/go/kms v1.20.4 +======= +# cloud.google.com/go/kms v1.20.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 cloud.google.com/go/kms/apiv1 cloud.google.com/go/kms/apiv1/kmspb @@ -63,8 +84,13 @@ cloud.google.com/go/longrunning/autogen/longrunningpb cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal +<<<<<<< HEAD # cloud.google.com/go/storage v1.50.0 ## explicit; go 1.22 +======= +# cloud.google.com/go/storage v1.48.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cloud.google.com/go/storage cloud.google.com/go/storage/experimental cloud.google.com/go/storage/internal @@ -76,10 +102,19 @@ contrib.go.opencensus.io/exporter/ocagent # contrib.go.opencensus.io/exporter/prometheus v0.4.2 ## explicit; go 1.13 contrib.go.opencensus.io/exporter/prometheus +<<<<<<< HEAD # dario.cat/mergo v1.0.1 ## explicit; go 1.13 # github.com/4meepo/tagalign v1.4.1 ## explicit; go 1.21.0 +======= +# filippo.io/edwards25519 v1.1.0 +## explicit; go 1.20 +filippo.io/edwards25519 +filippo.io/edwards25519/field +# github.com/4meepo/tagalign v1.3.4 +## explicit; go 1.19 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/4meepo/tagalign # github.com/Abirdcfly/dupword v0.1.3 ## explicit; go 1.22.0 @@ -90,7 +125,11 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider # github.com/Antonboom/errname v1.0.0 ## explicit; go 1.22.1 github.com/Antonboom/errname/pkg/analyzer +<<<<<<< HEAD # github.com/Antonboom/nilnil v1.0.1 +======= +# github.com/Antonboom/nilnil v1.0.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 github.com/Antonboom/nilnil/pkg/analyzer # github.com/Antonboom/testifylint v1.5.2 @@ -105,7 +144,11 @@ github.com/Antonboom/testifylint/internal/testify ## explicit github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry github.com/Azure/azure-sdk-for-go/version +<<<<<<< HEAD # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 +======= +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -127,7 +170,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing +<<<<<<< HEAD # github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 +======= +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -140,10 +187,17 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +<<<<<<< HEAD # github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys # github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 +======= +# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal # github.com/Azure/go-autorest v14.2.0+incompatible @@ -171,7 +225,11 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing +<<<<<<< HEAD # github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 +======= +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -210,7 +268,11 @@ github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure +<<<<<<< HEAD # github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 +======= +# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp # github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 @@ -222,7 +284,11 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp # github.com/IBM/sarama v1.43.3 ## explicit; go 1.19 github.com/IBM/sarama +<<<<<<< HEAD # github.com/Masterminds/semver/v3 v3.3.1 +======= +# github.com/Masterminds/semver/v3 v3.3.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/Masterminds/semver/v3 # github.com/Microsoft/go-winio v0.6.2 @@ -236,8 +302,13 @@ github.com/Microsoft/go-winio/pkg/guid ## explicit; go 1.20 github.com/OpenPeeDeeP/depguard/v2 github.com/OpenPeeDeeP/depguard/v2/internal/utils +<<<<<<< HEAD # github.com/ProtonMail/go-crypto v1.1.5 ## explicit; go 1.17 +======= +# github.com/ProtonMail/go-crypto v1.0.0 +## explicit; go 1.13 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool github.com/ProtonMail/go-crypto/eax @@ -248,8 +319,11 @@ github.com/ProtonMail/go-crypto/openpgp/aes/keywrap github.com/ProtonMail/go-crypto/openpgp/armor github.com/ProtonMail/go-crypto/openpgp/ecdh github.com/ProtonMail/go-crypto/openpgp/ecdsa +<<<<<<< HEAD github.com/ProtonMail/go-crypto/openpgp/ed25519 github.com/ProtonMail/go-crypto/openpgp/ed448 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/ProtonMail/go-crypto/openpgp/eddsa github.com/ProtonMail/go-crypto/openpgp/elgamal github.com/ProtonMail/go-crypto/openpgp/errors @@ -258,6 +332,7 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k +<<<<<<< HEAD github.com/ProtonMail/go-crypto/openpgp/x25519 github.com/ProtonMail/go-crypto/openpgp/x448 # github.com/ThalesIgnite/crypto11 v1.2.5 @@ -265,6 +340,13 @@ github.com/ProtonMail/go-crypto/openpgp/x448 github.com/ThalesIgnite/crypto11 # github.com/alecthomas/go-check-sumtype v0.3.1 ## explicit; go 1.22.0 +======= +# github.com/ThalesIgnite/crypto11 v1.2.5 +## explicit; go 1.13 +github.com/ThalesIgnite/crypto11 +# github.com/alecthomas/go-check-sumtype v0.2.0 +## explicit; go 1.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/alecthomas/go-check-sumtype # github.com/alexkohler/nakedret/v2 v2.0.5 ## explicit; go 1.21 @@ -275,7 +357,11 @@ github.com/alexkohler/prealloc/pkg # github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 ## explicit; go 1.14 github.com/alibabacloud-go/alibabacloud-gateway-spi/client +<<<<<<< HEAD # github.com/alibabacloud-go/cr-20160607 v1.0.1 => github.com/vdemeester/cr-20160607 v1.0.1 +======= +# github.com/alibabacloud-go/cr-20160607 v1.0.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.15 github.com/alibabacloud-go/cr-20160607/client # github.com/alibabacloud-go/cr-20181201 v1.0.10 @@ -306,10 +392,13 @@ github.com/alibabacloud-go/tea-xml/service # github.com/alingse/asasalint v0.0.11 ## explicit; go 1.18 github.com/alingse/asasalint +<<<<<<< HEAD # github.com/alingse/nilnesserr v0.1.1 ## explicit; go 1.22.0 github.com/alingse/nilnesserr github.com/alingse/nilnesserr/internal/typeparams +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/aliyun/credentials-go v1.3.2 ## explicit; go 1.14 github.com/aliyun/credentials-go/credentials @@ -325,10 +414,17 @@ github.com/asaskevich/govalidator # github.com/ashanbrown/forbidigo v1.6.0 ## explicit; go 1.13 github.com/ashanbrown/forbidigo/forbidigo +<<<<<<< HEAD # github.com/ashanbrown/makezero v1.2.0 ## explicit; go 1.12 github.com/ashanbrown/makezero/makezero # github.com/aws/aws-sdk-go v1.55.6 +======= +# github.com/ashanbrown/makezero v1.1.1 +## explicit; go 1.12 +github.com/ashanbrown/makezero/makezero +# github.com/aws/aws-sdk-go v1.55.5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer @@ -376,7 +472,11 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2 v1.34.0 +======= +# github.com/aws/aws-sdk-go-v2 v1.32.4 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -402,10 +502,17 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/config v1.29.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/config # github.com/aws/aws-sdk-go-v2/credentials v1.17.55 +======= +# github.com/aws/aws-sdk-go-v2/config v1.28.3 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/config +# github.com/aws/aws-sdk-go-v2/credentials v1.17.44 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -414,6 +521,7 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/feature/ec2/imds @@ -425,6 +533,19 @@ github.com/aws/aws-sdk-go-v2/internal/configsources ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 +======= +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds +github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/internal/configsources +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 @@ -437,6 +558,7 @@ github.com/aws/aws-sdk-go-v2/service/ecr/types github.com/aws/aws-sdk-go-v2/service/ecrpublic github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecrpublic/types +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding @@ -444,26 +566,51 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url # github.com/aws/aws-sdk-go-v2/service/kms v1.37.14 +======= +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 +## explicit; go 1.21 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +# github.com/aws/aws-sdk-go-v2/service/kms v1.37.5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/kms github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints github.com/aws/aws-sdk-go-v2/service/kms/types +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 +======= +# github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 +======= +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types +<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 +======= +# github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types +<<<<<<< HEAD # github.com/aws/smithy-go v1.22.2 +======= +# github.com/aws/smithy-go v1.22.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -516,8 +663,13 @@ github.com/blizzy78/varnamelen # github.com/bmatcuk/doublestar/v4 v4.0.2 ## explicit; go 1.16 github.com/bmatcuk/doublestar/v4 +<<<<<<< HEAD # github.com/bombsimon/wsl/v4 v4.5.0 ## explicit; go 1.22 +======= +# github.com/bombsimon/wsl/v4 v4.4.1 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/bombsimon/wsl/v4 # github.com/breml/bidichk v0.3.2 ## explicit; go 1.22.0 @@ -525,6 +677,7 @@ github.com/breml/bidichk/pkg/bidichk # github.com/breml/errchkjson v0.4.0 ## explicit; go 1.22.0 github.com/breml/errchkjson +<<<<<<< HEAD # github.com/buildkite/agent/v3 v3.91.0 ## explicit; go 1.22.7 github.com/buildkite/agent/v3/api @@ -532,11 +685,20 @@ github.com/buildkite/agent/v3/internal/agenthttp github.com/buildkite/agent/v3/logger github.com/buildkite/agent/v3/version # github.com/buildkite/go-pipeline v0.13.3 +======= +# github.com/buildkite/agent/v3 v3.81.0 +## explicit; go 1.22.6 +github.com/buildkite/agent/v3/api +github.com/buildkite/agent/v3/logger +github.com/buildkite/agent/v3/version +# github.com/buildkite/go-pipeline v0.13.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.6 github.com/buildkite/go-pipeline github.com/buildkite/go-pipeline/internal/env github.com/buildkite/go-pipeline/ordered github.com/buildkite/go-pipeline/warning +<<<<<<< HEAD # github.com/buildkite/interpolate v0.1.5 ## explicit; go 1.22 github.com/buildkite/interpolate @@ -544,11 +706,24 @@ github.com/buildkite/interpolate ## explicit; go 1.18 github.com/buildkite/roko # github.com/butuzov/ireturn v0.3.1 +======= +# github.com/buildkite/interpolate v0.1.3 +## explicit; go 1.22 +github.com/buildkite/interpolate +# github.com/buildkite/roko v1.2.0 +## explicit; go 1.18 +github.com/buildkite/roko +# github.com/butuzov/ireturn v0.3.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/butuzov/ireturn/analyzer github.com/butuzov/ireturn/analyzer/internal/config github.com/butuzov/ireturn/analyzer/internal/types +<<<<<<< HEAD # github.com/butuzov/mirror v1.3.0 +======= +# github.com/butuzov/mirror v1.2.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/butuzov/mirror github.com/butuzov/mirror/internal/checker @@ -591,7 +766,11 @@ github.com/chavacava/garif github.com/chrismellard/docker-credential-acr-env/pkg/credhelper github.com/chrismellard/docker-credential-acr-env/pkg/registry github.com/chrismellard/docker-credential-acr-env/pkg/token +<<<<<<< HEAD # github.com/ckaznocha/intrange v0.3.0 +======= +# github.com/ckaznocha/intrange v0.2.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22 github.com/ckaznocha/intrange # github.com/clbanning/mxj/v2 v2.7.0 @@ -640,6 +819,7 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be ## explicit github.com/common-nighthawk/go-figure +<<<<<<< HEAD # github.com/containerd/stargz-snapshotter/estargz v0.16.3 ## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz @@ -649,6 +829,17 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil github.com/coreos/go-oidc/v3/oidc # github.com/curioswitch/go-reassign v0.3.0 ## explicit; go 1.21 +======= +# github.com/containerd/stargz-snapshotter/estargz v0.14.3 +## explicit; go 1.19 +github.com/containerd/stargz-snapshotter/estargz +github.com/containerd/stargz-snapshotter/estargz/errorutil +# github.com/coreos/go-oidc/v3 v3.11.0 +## explicit; go 1.21 +github.com/coreos/go-oidc/v3/oidc +# github.com/curioswitch/go-reassign v0.2.0 +## explicit; go 1.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/curioswitch/go-reassign github.com/curioswitch/go-reassign/internal/analyzer # github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 @@ -680,7 +871,11 @@ github.com/digitorus/timestamp # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom +<<<<<<< HEAD # github.com/docker/cli v27.5.0+incompatible +======= +# github.com/docker/cli v27.1.1+incompatible +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -689,7 +884,11 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge +<<<<<<< HEAD # github.com/docker/docker-credential-helpers v0.8.2 +======= +# github.com/docker/docker-credential-helpers v0.8.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -705,6 +904,9 @@ github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 ## explicit github.com/eapache/queue +# github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d +## explicit; go 1.18 +github.com/edwarnicke/gitoid # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -717,7 +919,11 @@ github.com/emirpasic/gods/lists/arraylist github.com/emirpasic/gods/trees github.com/emirpasic/gods/trees/binaryheap github.com/emirpasic/gods/utils +<<<<<<< HEAD # github.com/envoyproxy/go-control-plane v0.13.1 +======= +# github.com/envoyproxy/go-control-plane v0.13.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/envoyproxy/go-control-plane/envoy/admin/v3 github.com/envoyproxy/go-control-plane/envoy/annotations @@ -807,6 +1013,10 @@ github.com/go-critic/go-critic/linter ## explicit; go 1.12 github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher +<<<<<<< HEAD +======= +github.com/go-jose/go-jose/v3/cryptosigner +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/go-jose/go-jose/v3/json github.com/go-jose/go-jose/v3/jwt # github.com/go-jose/go-jose/v4 v4.0.4 @@ -899,7 +1109,11 @@ github.com/go-toolsmith/typep ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 github.com/go-viper/mapstructure/v2/internal/errors +<<<<<<< HEAD # github.com/go-xmlfmt/xmlfmt v1.1.3 +======= +# github.com/go-xmlfmt/xmlfmt v1.1.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit github.com/go-xmlfmt/xmlfmt # github.com/gobwas/glob v0.2.3 @@ -949,12 +1163,21 @@ github.com/golangci/dupl/syntax/golang # github.com/golangci/go-printf-func-name v0.1.0 ## explicit; go 1.22.0 github.com/golangci/go-printf-func-name/pkg/analyzer +<<<<<<< HEAD # github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 ## explicit; go 1.22.0 github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/gofmt/internal/diff github.com/golangci/gofmt/goimports # github.com/golangci/golangci-lint v1.63.4 +======= +# github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 +## explicit; go 1.22 +github.com/golangci/gofmt/gofmt +github.com/golangci/gofmt/gofmt/internal/diff +github.com/golangci/gofmt/goimports +# github.com/golangci/golangci-lint v1.62.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.1 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache @@ -963,10 +1186,13 @@ github.com/golangci/golangci-lint/internal/go/cache github.com/golangci/golangci-lint/internal/go/mmap github.com/golangci/golangci-lint/internal/go/quoted github.com/golangci/golangci-lint/internal/go/robustio +<<<<<<< HEAD github.com/golangci/golangci-lint/internal/x/tools/analysisflags github.com/golangci/golangci-lint/internal/x/tools/analysisinternal github.com/golangci/golangci-lint/internal/x/tools/diff github.com/golangci/golangci-lint/internal/x/tools/diff/lcs +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/commands github.com/golangci/golangci-lint/pkg/commands/internal github.com/golangci/golangci-lint/pkg/config @@ -975,11 +1201,14 @@ github.com/golangci/golangci-lint/pkg/fsutils github.com/golangci/golangci-lint/pkg/goanalysis github.com/golangci/golangci-lint/pkg/goanalysis/load github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors +<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/goformatters github.com/golangci/golangci-lint/pkg/goformatters/gci github.com/golangci/golangci-lint/pkg/goformatters/gofmt github.com/golangci/golangci-lint/pkg/goformatters/gofumpt github.com/golangci/golangci-lint/pkg/goformatters/goimports +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters github.com/golangci/golangci-lint/pkg/golinters/asasalint github.com/golangci/golangci-lint/pkg/golinters/asciicheck @@ -1004,7 +1233,10 @@ github.com/golangci/golangci-lint/pkg/golinters/errorlint github.com/golangci/golangci-lint/pkg/golinters/exhaustive github.com/golangci/golangci-lint/pkg/golinters/exhaustruct github.com/golangci/golangci-lint/pkg/golinters/exportloopref +<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/golinters/exptostd +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters/fatcontext github.com/golangci/golangci-lint/pkg/golinters/forbidigo github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert @@ -1052,7 +1284,10 @@ github.com/golangci/golangci-lint/pkg/golinters/musttag github.com/golangci/golangci-lint/pkg/golinters/nakedret github.com/golangci/golangci-lint/pkg/golinters/nestif github.com/golangci/golangci-lint/pkg/golinters/nilerr +<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/golinters/nilnesserr +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters/nilnil github.com/golangci/golangci-lint/pkg/golinters/nlreturn github.com/golangci/golangci-lint/pkg/golinters/noctx @@ -1087,7 +1322,10 @@ github.com/golangci/golangci-lint/pkg/golinters/unconvert github.com/golangci/golangci-lint/pkg/golinters/unparam github.com/golangci/golangci-lint/pkg/golinters/unused github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars +<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/golinters/usetesting +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters/varnamelen github.com/golangci/golangci-lint/pkg/golinters/wastedassign github.com/golangci/golangci-lint/pkg/golinters/whitespace @@ -1107,6 +1345,12 @@ github.com/golangci/golangci-lint/pkg/timeutils # github.com/golangci/misspell v0.6.0 ## explicit; go 1.21 github.com/golangci/misspell +<<<<<<< HEAD +======= +# github.com/golangci/modinfo v0.3.4 +## explicit; go 1.21 +github.com/golangci/modinfo +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/golangci/plugin-module-register v0.1.1 ## explicit; go 1.21 github.com/golangci/plugin-module-register/register @@ -1119,8 +1363,13 @@ github.com/golangci/unconvert # github.com/google/addlicense v1.1.1 ## explicit; go 1.13 github.com/google/addlicense +<<<<<<< HEAD # github.com/google/cel-go v0.23.1 ## explicit; go 1.21.1 +======= +# github.com/google/cel-go v0.20.1 +## explicit; go 1.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls @@ -1141,8 +1390,13 @@ github.com/google/cel-go/common/types/traits github.com/google/cel-go/interpreter github.com/google/cel-go/parser github.com/google/cel-go/parser/gen +<<<<<<< HEAD # github.com/google/certificate-transparency-go v1.3.1 ## explicit; go 1.22.0 +======= +# github.com/google/certificate-transparency-go v1.2.1 +## explicit; go 1.21.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/google/certificate-transparency-go github.com/google/certificate-transparency-go/asn1 github.com/google/certificate-transparency-go/gossip/minimal/x509ext @@ -1165,8 +1419,13 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +<<<<<<< HEAD # github.com/google/go-containerregistry v0.20.3 ## explicit; go 1.23.0 +======= +# github.com/google/go-containerregistry v0.20.2 +## explicit; go 1.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression github.com/google/go-containerregistry/internal/estargz @@ -1228,7 +1487,11 @@ github.com/google/licenseclassifier/stringclassifier github.com/google/licenseclassifier/stringclassifier/internal/pq github.com/google/licenseclassifier/stringclassifier/searchset github.com/google/licenseclassifier/stringclassifier/searchset/tokenizer +<<<<<<< HEAD # github.com/google/s2a-go v0.1.9 +======= +# github.com/google/s2a-go v0.1.8 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback @@ -1261,7 +1524,11 @@ github.com/google/wire ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util +<<<<<<< HEAD # github.com/googleapis/gax-go/v2 v2.14.1 +======= +# github.com/googleapis/gax-go/v2 v2.14.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -1269,7 +1536,10 @@ github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal github.com/googleapis/gax-go/v2/internallog +<<<<<<< HEAD github.com/googleapis/gax-go/v2/internallog/grpclog +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/googleapis/gax-go/v2/internallog/internal github.com/googleapis/gax-go/v2/iterator # github.com/gordonklaus/ineffassign v0.1.0 @@ -1292,8 +1562,13 @@ github.com/gostaticanalysis/nilerr ## explicit; go 1.20 github.com/grafeas/grafeas/go/utils/intoto github.com/grafeas/grafeas/proto/v1/grafeas_go_proto +<<<<<<< HEAD # github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 ## explicit; go 1.22.0 +======= +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -1303,9 +1578,12 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp +<<<<<<< HEAD # github.com/hashicorp/go-immutable-radix/v2 v2.1.0 ## explicit; go 1.18 github.com/hashicorp/go-immutable-radix/v2 +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror @@ -1334,10 +1612,13 @@ github.com/hashicorp/go-version ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru +<<<<<<< HEAD # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/hashicorp/hcl v1.0.1-vault-5 ## explicit; go 1.15 github.com/hashicorp/hcl @@ -1358,10 +1639,26 @@ github.com/hashicorp/vault/api github.com/hexops/gotextdiff github.com/hexops/gotextdiff/myers github.com/hexops/gotextdiff/span -# github.com/in-toto/attestation v1.1.1 +<<<<<<< HEAD +# github.com/in-toto/archivista v0.8.0 +## explicit; go 1.23.0 +github.com/in-toto/archivista/pkg/api +======= +# github.com/in-toto/archivista v0.5.4 => ../archivista +## explicit; go 1.23.0 +github.com/in-toto/archivista/pkg/api +github.com/in-toto/archivista/pkg/http-client +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +# github.com/in-toto/attestation v1.1.0 ## explicit; go 1.20 github.com/in-toto/attestation/go/predicates/provenance/v1 github.com/in-toto/attestation/go/v1 +# github.com/in-toto/go-witness v0.7.0 +## explicit; go 1.22.8 +github.com/in-toto/go-witness/cryptoutil +github.com/in-toto/go-witness/dsse +github.com/in-toto/go-witness/log +github.com/in-toto/go-witness/timestamp # github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 ## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto @@ -1433,8 +1730,13 @@ github.com/jgautheron/goconst # github.com/jingyugao/rowserrcheck v1.1.1 ## explicit; go 1.13 github.com/jingyugao/rowserrcheck/passes/rowserr +<<<<<<< HEAD # github.com/jjti/go-spancheck v0.6.4 ## explicit; go 1.22.1 +======= +# github.com/jjti/go-spancheck v0.6.2 +## explicit; go 1.20 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/jjti/go-spancheck # github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 ## explicit; go 1.14 @@ -1445,8 +1747,13 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +<<<<<<< HEAD # github.com/julz/importas v0.2.0 ## explicit; go 1.20 +======= +# github.com/julz/importas v0.1.0 +## explicit; go 1.15 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/julz/importas # github.com/karamaru-alpha/copyloopvar v1.1.0 ## explicit; go 1.21 @@ -1490,6 +1797,7 @@ github.com/kyoh86/exportloopref # github.com/lasiar/canonicalheader v1.1.2 ## explicit; go 1.22.0 github.com/lasiar/canonicalheader +<<<<<<< HEAD # github.com/ldez/exptostd v0.3.1 ## explicit; go 1.22.0 github.com/ldez/exptostd @@ -1505,6 +1813,14 @@ github.com/ldez/tagliatelle # github.com/ldez/usetesting v0.4.2 ## explicit; go 1.22.0 github.com/ldez/usetesting +======= +# github.com/ldez/gomoddirectives v0.2.4 +## explicit; go 1.21 +github.com/ldez/gomoddirectives +# github.com/ldez/tagliatelle v0.5.0 +## explicit; go 1.19 +github.com/ldez/tagliatelle +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/leonklingele/grouper v1.1.2 ## explicit; go 1.18 github.com/leonklingele/grouper/pkg/analyzer @@ -1524,7 +1840,11 @@ github.com/letsencrypt/boulder/strictyaml # github.com/macabu/inamedparam v0.1.3 ## explicit; go 1.20 github.com/macabu/inamedparam +<<<<<<< HEAD # github.com/magiconair/properties v1.8.9 +======= +# github.com/magiconair/properties v1.8.7 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/magiconair/properties # github.com/mailru/easyjson v0.7.7 @@ -1564,7 +1884,11 @@ github.com/miekg/pkcs11 # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir +<<<<<<< HEAD # github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c +======= +# github.com/mitchellh/mapstructure v1.5.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.14 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -1601,7 +1925,11 @@ github.com/nishanths/predeclared/passes/predeclared # github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 ## explicit github.com/nozzle/throttler +<<<<<<< HEAD # github.com/nunnatsa/ginkgolinter v0.18.4 +======= +# github.com/nunnatsa/ginkgolinter v0.18.3 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 github.com/nunnatsa/ginkgolinter github.com/nunnatsa/ginkgolinter/internal/expression @@ -1694,8 +2022,13 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go +<<<<<<< HEAD # github.com/prometheus/common v0.62.0 ## explicit; go 1.21 +======= +# github.com/prometheus/common v0.55.0 +## explicit; go 1.20 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -1740,7 +2073,11 @@ github.com/quasilyte/regex/syntax # github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 ## explicit; go 1.17 github.com/quasilyte/stdinfo +<<<<<<< HEAD # github.com/raeperd/recvcheck v0.2.0 +======= +# github.com/raeperd/recvcheck v0.1.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 github.com/raeperd/recvcheck # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 @@ -1749,8 +2086,13 @@ github.com/rcrowley/go-metrics # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg +<<<<<<< HEAD # github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a ## explicit; go 1.22.0 +======= +# github.com/rogpeppe/go-internal v1.13.1 +## explicit; go 1.22 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/rogpeppe/go-internal/internal/syscall/windows github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll github.com/rogpeppe/go-internal/lockedfile @@ -1770,6 +2112,7 @@ github.com/sagikazarmark/locafero # github.com/sagikazarmark/slog-shim v0.1.0 ## explicit; go 1.20 github.com/sagikazarmark/slog-shim +<<<<<<< HEAD # github.com/sanposhiho/wastedassign/v2 v2.1.0 ## explicit; go 1.18 github.com/sanposhiho/wastedassign/v2 @@ -1781,6 +2124,19 @@ github.com/santhosh-tekuri/jsonschema/v6/kind ## explicit; go 1.18 github.com/sashamelentyev/interfacebloat/pkg/analyzer # github.com/sashamelentyev/usestdlibvars v1.28.0 +======= +# github.com/sanposhiho/wastedassign/v2 v2.0.7 +## explicit; go 1.14 +github.com/sanposhiho/wastedassign/v2 +# github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 +## explicit; go 1.19 +github.com/santhosh-tekuri/jsonschema/v5 +github.com/santhosh-tekuri/jsonschema/v5/httploader +# github.com/sashamelentyev/interfacebloat v1.1.0 +## explicit; go 1.18 +github.com/sashamelentyev/interfacebloat/pkg/analyzer +# github.com/sashamelentyev/usestdlibvars v1.27.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/sashamelentyev/usestdlibvars/pkg/analyzer github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping @@ -1788,7 +2144,11 @@ github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping ## explicit github.com/sassoftware/relic/lib/pkcs7 github.com/sassoftware/relic/lib/x509tools +<<<<<<< HEAD # github.com/secure-systems-lab/go-securesystemslib v0.9.0 +======= +# github.com/secure-systems-lab/go-securesystemslib v0.8.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse @@ -1813,8 +2173,13 @@ github.com/shazow/go-diff/difflib # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec +<<<<<<< HEAD # github.com/sigstore/cosign/v2 v2.4.2 ## explicit; go 1.23.4 +======= +# github.com/sigstore/cosign/v2 v2.4.1 +## explicit; go 1.22.7 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio github.com/sigstore/cosign/v2/cmd/cosign/cli/options github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/privacy @@ -1854,17 +2219,30 @@ github.com/sigstore/cosign/v2/pkg/providers/google github.com/sigstore/cosign/v2/pkg/providers/spiffe github.com/sigstore/cosign/v2/pkg/signature github.com/sigstore/cosign/v2/pkg/types +<<<<<<< HEAD # github.com/sigstore/fulcio v1.6.6 ## explicit; go 1.23.3 github.com/sigstore/fulcio/pkg/api # github.com/sigstore/protobuf-specs v0.4.0 ## explicit; go 1.22.0 +======= +# github.com/sigstore/fulcio v1.6.3 +## explicit; go 1.22.5 +github.com/sigstore/fulcio/pkg/api +# github.com/sigstore/protobuf-specs v0.3.2 +## explicit; go 1.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1 github.com/sigstore/protobuf-specs/gen/pb-go/common/v1 github.com/sigstore/protobuf-specs/gen/pb-go/dsse github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1 +<<<<<<< HEAD # github.com/sigstore/rekor v1.3.9 ## explicit; go 1.22.0 +======= +# github.com/sigstore/rekor v1.3.7 +## explicit; go 1.23.2 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/rekor/pkg/client github.com/sigstore/rekor/pkg/generated/client github.com/sigstore/rekor/pkg/generated/client/entries @@ -1893,8 +2271,13 @@ github.com/sigstore/rekor/pkg/types/intoto/v0.0.2 github.com/sigstore/rekor/pkg/types/rekord github.com/sigstore/rekor/pkg/types/rekord/v0.0.1 github.com/sigstore/rekor/pkg/util +<<<<<<< HEAD # github.com/sigstore/sigstore v1.8.15 ## explicit; go 1.22.0 +======= +# github.com/sigstore/sigstore v1.8.10 +## explicit; go 1.22.8 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/fulcioroots github.com/sigstore/sigstore/pkg/oauth @@ -1902,6 +2285,7 @@ github.com/sigstore/sigstore/pkg/oauthflow github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/dsse github.com/sigstore/sigstore/pkg/signature/kms +<<<<<<< HEAD github.com/sigstore/sigstore/pkg/signature/kms/cliplugin github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding @@ -1923,6 +2307,25 @@ github.com/sigstore/sigstore/pkg/signature/kms/gcp github.com/sigstore/sigstore/pkg/signature/kms/hashivault # github.com/sigstore/timestamp-authority v1.2.4 ## explicit; go 1.22.0 +======= +github.com/sigstore/sigstore/pkg/signature/options +github.com/sigstore/sigstore/pkg/signature/payload +github.com/sigstore/sigstore/pkg/tuf +# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 +## explicit; go 1.22.8 +github.com/sigstore/sigstore/pkg/signature/kms/aws +# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 +## explicit; go 1.22.8 +github.com/sigstore/sigstore/pkg/signature/kms/azure +# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 +## explicit; go 1.22.8 +github.com/sigstore/sigstore/pkg/signature/kms/gcp +# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 +## explicit; go 1.22.8 +github.com/sigstore/sigstore/pkg/signature/kms/hashivault +# github.com/sigstore/timestamp-authority v1.2.2 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/timestamp-authority/pkg/verification # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 @@ -1961,7 +2364,11 @@ github.com/spf13/cast # github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra +<<<<<<< HEAD # github.com/spf13/pflag v1.0.6 +======= +# github.com/spf13/pflag v1.0.5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.12 github.com/spf13/pflag # github.com/spf13/viper v1.19.0 @@ -1976,8 +2383,13 @@ github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/features +<<<<<<< HEAD # github.com/spiffe/go-spiffe/v2 v2.5.0 ## explicit; go 1.22.11 +======= +# github.com/spiffe/go-spiffe/v2 v2.4.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/spiffe/go-spiffe/v2/bundle/jwtbundle github.com/spiffe/go-spiffe/v2/bundle/spiffebundle github.com/spiffe/go-spiffe/v2/bundle/x509bundle @@ -2000,8 +2412,13 @@ github.com/src-d/gcfg/types # github.com/ssgreg/nlreturn/v2 v2.2.1 ## explicit; go 1.13 github.com/ssgreg/nlreturn/v2/pkg/nlreturn +<<<<<<< HEAD # github.com/stbenjam/no-sprintf-host-port v0.2.0 ## explicit; go 1.18 +======= +# github.com/stbenjam/no-sprintf-host-port v0.1.1 +## explicit; go 1.16 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/stbenjam/no-sprintf-host-port/pkg/analyzer # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 @@ -2031,11 +2448,19 @@ github.com/syndtr/goleveldb/leveldb/opt github.com/syndtr/goleveldb/leveldb/storage github.com/syndtr/goleveldb/leveldb/table github.com/syndtr/goleveldb/leveldb/util +<<<<<<< HEAD # github.com/tdakkota/asciicheck v0.3.0 ## explicit; go 1.18 github.com/tdakkota/asciicheck # github.com/tektoncd/pipeline v0.68.0 ## explicit; go 1.22.3 +======= +# github.com/tdakkota/asciicheck v0.2.0 +## explicit; go 1.18 +github.com/tdakkota/asciicheck +# github.com/tektoncd/pipeline v0.66.0 +## explicit; go 1.22 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/tektoncd/pipeline/internal/artifactref github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/config/resolver @@ -2127,11 +2552,19 @@ github.com/tektoncd/pipeline/pkg/result github.com/tektoncd/pipeline/pkg/spire/config github.com/tektoncd/pipeline/pkg/substitution github.com/tektoncd/pipeline/test +<<<<<<< HEAD # github.com/tektoncd/plumbing v0.0.0-20250115133002-f515628dffea ## explicit; go 1.22 github.com/tektoncd/plumbing github.com/tektoncd/plumbing/scripts # github.com/tetafro/godot v1.4.20 +======= +# github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1 +## explicit; go 1.19 +github.com/tektoncd/plumbing +github.com/tektoncd/plumbing/scripts +# github.com/tetafro/godot v1.4.18 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/tetafro/godot # github.com/thales-e-security/pool v0.0.2 @@ -2152,7 +2585,11 @@ github.com/theupdateframework/go-tuf/pkg/targets github.com/theupdateframework/go-tuf/sign github.com/theupdateframework/go-tuf/util github.com/theupdateframework/go-tuf/verify +<<<<<<< HEAD # github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 +======= +# github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.12 github.com/timakin/bodyclose/passes/bodyclose # github.com/timonwong/loggercheck v0.10.1 @@ -2169,7 +2606,11 @@ github.com/titanous/rocacheck # github.com/tjfoc/gmsm v1.4.1 ## explicit; go 1.14 github.com/tjfoc/gmsm/sm3 +<<<<<<< HEAD # github.com/tomarrell/wrapcheck/v2 v2.10.0 +======= +# github.com/tomarrell/wrapcheck/v2 v2.9.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/tomarrell/wrapcheck/v2/wrapcheck # github.com/tommy-muehle/go-mnd/v2 v2.5.1 @@ -2183,6 +2624,7 @@ github.com/transparency-dev/merkle github.com/transparency-dev/merkle/compact github.com/transparency-dev/merkle/proof github.com/transparency-dev/merkle/rfc6962 +<<<<<<< HEAD # github.com/ultraware/funlen v0.2.0 ## explicit; go 1.22.0 github.com/ultraware/funlen @@ -2193,17 +2635,39 @@ github.com/ultraware/whitespace ## explicit; go 1.19 github.com/uudashr/gocognit # github.com/uudashr/iface v1.3.0 +======= +# github.com/ultraware/funlen v0.1.0 +## explicit; go 1.20 +github.com/ultraware/funlen +# github.com/ultraware/whitespace v0.1.1 +## explicit; go 1.20 +github.com/ultraware/whitespace +# github.com/uudashr/gocognit v1.1.3 +## explicit; go 1.18 +github.com/uudashr/gocognit +# github.com/uudashr/iface v1.2.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.1 github.com/uudashr/iface/identical github.com/uudashr/iface/internal/directive github.com/uudashr/iface/opaque github.com/uudashr/iface/unused +<<<<<<< HEAD # github.com/vbatts/tar-split v0.11.6 +======= +# github.com/vbatts/tar-split v0.11.5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 +<<<<<<< HEAD +======= +# github.com/xanzy/go-gitlab v0.109.0 +## explicit; go 1.19 +github.com/xanzy/go-gitlab +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/xanzy/ssh-agent v0.3.3 ## explicit; go 1.16 github.com/xanzy/ssh-agent @@ -2233,15 +2697,22 @@ github.com/ykadowak/zerologlint # github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 ## explicit; go 1.17 github.com/youmark/pkcs8 +<<<<<<< HEAD # github.com/zeebo/errs v1.4.0 +======= +# github.com/zeebo/errs v1.3.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.12 github.com/zeebo/errs # gitlab.com/bosi/decorder v0.4.2 ## explicit; go 1.20 gitlab.com/bosi/decorder +<<<<<<< HEAD # gitlab.com/gitlab-org/api/client-go v0.121.0 ## explicit; go 1.22 gitlab.com/gitlab-org/api/client-go +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # go-simpler.org/musttag v0.13.0 ## explicit; go 1.20 go-simpler.org/musttag @@ -2318,6 +2789,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +<<<<<<< HEAD # go.opentelemetry.io/auto/sdk v1.1.0 ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk @@ -2331,12 +2803,28 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 ## explicit; go 1.22.0 +======= +# go.opentelemetry.io/contrib/detectors/gcp v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/contrib/detectors/gcp +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 +## explicit; go 1.21 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +<<<<<<< HEAD # go.opentelemetry.io/otel v1.34.0 ## explicit; go 1.22.0 +======= +# go.opentelemetry.io/otel v1.29.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -2352,6 +2840,7 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 +<<<<<<< HEAD # go.opentelemetry.io/otel/metric v1.34.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/metric @@ -2359,10 +2848,20 @@ go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop # go.opentelemetry.io/otel/sdk v1.34.0 ## explicit; go 1.22.0 +======= +# go.opentelemetry.io/otel/metric v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +go.opentelemetry.io/otel/metric/noop +# go.opentelemetry.io/otel/sdk v1.29.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource +<<<<<<< HEAD # go.opentelemetry.io/otel/sdk/metric v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/sdk/metric @@ -2376,6 +2875,32 @@ go.opentelemetry.io/otel/sdk/metric/metricdata go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop +======= +# go.opentelemetry.io/otel/sdk/metric v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/internal +go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/exemplar +go.opentelemetry.io/otel/sdk/metric/internal/x +go.opentelemetry.io/otel/sdk/metric/metricdata +# go.opentelemetry.io/otel/trace v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded +# go.step.sm/crypto v0.54.2 +## explicit; go 1.22 +go.step.sm/crypto/fingerprint +go.step.sm/crypto/internal/bcrypt_pbkdf +go.step.sm/crypto/internal/emoji +go.step.sm/crypto/internal/utils +go.step.sm/crypto/internal/utils/utfbom +go.step.sm/crypto/jose +go.step.sm/crypto/keyutil +go.step.sm/crypto/pemutil +go.step.sm/crypto/randutil +go.step.sm/crypto/x25519 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # go.uber.org/automaxprocs v1.6.0 ## explicit; go 1.20 go.uber.org/automaxprocs/internal/cgroups @@ -2423,7 +2948,11 @@ gocloud.dev/docstore/mongodocstore # gocloud.dev/pubsub/kafkapubsub v0.40.0 ## explicit; go 1.21.0 gocloud.dev/pubsub/kafkapubsub -# golang.org/x/crypto v0.33.0 +<<<<<<< HEAD +# golang.org/x/crypto v0.32.0 +======= +# golang.org/x/crypto v0.31.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -2459,7 +2988,11 @@ golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/ssh/terminal +<<<<<<< HEAD # golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +======= +# golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -2476,8 +3009,13 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver +golang.org/x/mod/sumdb/dirhash golang.org/x/mod/sumdb/note +<<<<<<< HEAD # golang.org/x/net v0.34.0 +======= +# golang.org/x/net v0.33.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -2490,7 +3028,11 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace +<<<<<<< HEAD # golang.org/x/oauth2 v0.26.0 +======= +# golang.org/x/oauth2 v0.24.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -2502,27 +3044,32 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.11.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.30.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.29.0 +<<<<<<< HEAD +# golang.org/x/term v0.28.0 +======= +# golang.org/x/term v0.27.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.22.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/unicode +<<<<<<< HEAD golang.org/x/text/feature/plural golang.org/x/text/internal golang.org/x/text/internal/catmsg @@ -2536,16 +3083,26 @@ golang.org/x/text/internal/utf8internal golang.org/x/text/language golang.org/x/text/message golang.org/x/text/message/catalog +======= +golang.org/x/text/internal/utf8internal +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width +<<<<<<< HEAD # golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.29.0 +======= +# golang.org/x/time v0.8.0 +## explicit; go 1.18 +golang.org/x/time/rate +# golang.org/x/tools v0.27.0 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/appends @@ -2584,7 +3141,10 @@ golang.org/x/tools/go/analysis/passes/sigchanyzer golang.org/x/tools/go/analysis/passes/slog golang.org/x/tools/go/analysis/passes/sortslice golang.org/x/tools/go/analysis/passes/stdmethods +<<<<<<< HEAD golang.org/x/tools/go/analysis/passes/stdversion +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/tools/go/analysis/passes/stringintconv golang.org/x/tools/go/analysis/passes/structtag golang.org/x/tools/go/analysis/passes/testinggoroutine @@ -2595,7 +3155,10 @@ golang.org/x/tools/go/analysis/passes/unreachable golang.org/x/tools/go/analysis/passes/unsafeptr golang.org/x/tools/go/analysis/passes/unusedresult golang.org/x/tools/go/analysis/passes/unusedwrite +<<<<<<< HEAD golang.org/x/tools/go/analysis/passes/waitgroup +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/buildutil @@ -2619,7 +3182,10 @@ golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports +<<<<<<< HEAD golang.org/x/tools/internal/modindex +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib @@ -2633,8 +3199,13 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 +<<<<<<< HEAD # google.golang.org/api v0.219.0 ## explicit; go 1.22 +======= +# google.golang.org/api v0.210.0 +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -2653,6 +3224,10 @@ google.golang.org/api/support/bundler google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http +<<<<<<< HEAD +======= +google.golang.org/api/transport/http/internal/propagation +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 ## explicit; go 1.21 google.golang.org/genproto/googleapis/cloud/location @@ -2661,8 +3236,13 @@ google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/googleapis/type/latlng google.golang.org/genproto/protobuf/field_mask +<<<<<<< HEAD # google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f ## explicit; go 1.22 +======= +# google.golang.org/genproto/googleapis/api v0.0.0-20241113202542-65e8d215514f +## explicit; go 1.21 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution @@ -2671,12 +3251,21 @@ google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres +<<<<<<< HEAD # google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 ## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.70.0 +======= +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 +## explicit; go 1.21 +google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/errdetails +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.68.1 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes @@ -2685,7 +3274,10 @@ google.golang.org/grpc/authz/audit/stdout google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +<<<<<<< HEAD google.golang.org/grpc/balancer/endpointsharding +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state @@ -2776,8 +3368,11 @@ google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats +<<<<<<< HEAD google.golang.org/grpc/stats/opentelemetry google.golang.org/grpc/stats/opentelemetry/internal +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/xds @@ -2808,13 +3403,24 @@ google.golang.org/grpc/xds/internal/xdsclient google.golang.org/grpc/xds/internal/xdsclient/internal google.golang.org/grpc/xds/internal/xdsclient/load google.golang.org/grpc/xds/internal/xdsclient/transport +<<<<<<< HEAD google.golang.org/grpc/xds/internal/xdsclient/transport/ads google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport google.golang.org/grpc/xds/internal/xdsclient/transport/lrs +======= +google.golang.org/grpc/xds/internal/xdsclient/transport/internal +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter google.golang.org/grpc/xds/internal/xdsclient/xdsresource google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version +<<<<<<< HEAD +======= +# google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a +## explicit; go 1.21 +google.golang.org/grpc/stats/opentelemetry +google.golang.org/grpc/stats/opentelemetry/internal +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # google.golang.org/protobuf v1.36.4 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim @@ -3109,7 +3715,7 @@ honnef.co/go/tools/stylecheck/st1021 honnef.co/go/tools/stylecheck/st1022 honnef.co/go/tools/stylecheck/st1023 honnef.co/go/tools/unused -# k8s.io/api v0.32.1 +# k8s.io/api v0.32.0 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admissionregistration/v1 @@ -3173,7 +3779,7 @@ k8s.io/api/storagemigration/v1alpha1 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.32.1 +# k8s.io/apimachinery v0.32.0 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -3230,7 +3836,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.1 +# k8s.io/client-go v0.32.0 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -3563,7 +4169,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.32.1 +# k8s.io/code-generator v0.32.0 ## explicit; go 1.23.0 k8s.io/code-generator/cmd/deepcopy-gen k8s.io/code-generator/cmd/deepcopy-gen/args @@ -3691,7 +4297,11 @@ mvdan.cc/unparam/check ## explicit; go 1.21 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json +<<<<<<< HEAD # sigs.k8s.io/release-utils v0.11.0 +======= +# sigs.k8s.io/release-utils v0.8.5 +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.23 sigs.k8s.io/release-utils/version # sigs.k8s.io/structured-merge-diff/v4 v4.4.2 @@ -3705,5 +4315,10 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 +<<<<<<< HEAD # github.com/alibabacloud-go/cr-20160607 => github.com/vdemeester/cr-20160607 v1.0.1 # github.com/golang/glog => github.com/jdolitsky/glog v0.0.0-20220729172235-78744e90d087 +======= +# github.com/golang/glog => github.com/jdolitsky/glog v0.0.0-20220729172235-78744e90d087 +# github.com/in-toto/archivista => ../archivista +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/sigs.k8s.io/release-utils/version/command.go b/vendor/sigs.k8s.io/release-utils/version/command.go index 0e26ea78b2..0f1068eafd 100644 --- a/vendor/sigs.k8s.io/release-utils/version/command.go +++ b/vendor/sigs.k8s.io/release-utils/version/command.go @@ -44,7 +44,10 @@ func WithFont(fontName string) *cobra.Command { func version(fontName string) *cobra.Command { var outputJSON bool +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cmd := &cobra.Command{ Use: "version", Short: "Prints the version", @@ -68,7 +71,10 @@ func version(fontName string) *cobra.Command { } else { cmd.Println(v.String()) } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil }, } diff --git a/vendor/sigs.k8s.io/release-utils/version/version.go b/vendor/sigs.k8s.io/release-utils/version/version.go index c1a6907b32..847dd1e120 100644 --- a/vendor/sigs.k8s.io/release-utils/version/version.go +++ b/vendor/sigs.k8s.io/release-utils/version/version.go @@ -79,7 +79,10 @@ func getBuildInfo() *debug.BuildInfo { if !ok { return nil } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return bi } @@ -105,22 +108,34 @@ func getDirty(bi *debug.BuildInfo) string { if modified == "true" { return "dirty" } +<<<<<<< HEAD if modified == "false" { return "clean" } +======= + if modified == "false" { + return "clean" + } +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return unknown } func getBuildDate(bi *debug.BuildInfo) string { buildTime := getKey(bi, "vcs.time") +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t, err := time.Parse("2006-01-02T15:04:05Z", buildTime) if err != nil { return unknown } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return t.Format("2006-01-02T15:04:05") } @@ -128,13 +143,19 @@ func getKey(bi *debug.BuildInfo, key string) string { if bi == nil { return unknown } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, iter := range bi.Settings { if iter.Key == key { return iter.Value } } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return unknown } @@ -143,7 +164,10 @@ func GetVersionInfo() Info { once.Do(func() { buildInfo := getBuildInfo() gitVersion = getGitVersion(buildInfo) +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if gitCommit == unknown { gitCommit = getCommit(buildInfo) } @@ -194,12 +218,18 @@ func (i *Info) String() string { f := figure.NewFigure(strings.ToUpper(i.Name), i.FontName, true) _, _ = fmt.Fprint(w, f.String()) } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, _ = fmt.Fprint(w, i.Name) if i.Description != "" { _, _ = fmt.Fprintf(w, ": %s", i.Description) } +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, _ = fmt.Fprint(w, "\n\n") } @@ -212,7 +242,10 @@ func (i *Info) String() string { _, _ = fmt.Fprintf(w, "Platform:\t%s\n", i.Platform) _ = w.Flush() +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return b.String() } @@ -236,6 +269,9 @@ func (i *Info) CheckFontName(fontName string) bool { } fmt.Fprintln(os.Stderr, "font not valid, using default") +<<<<<<< HEAD +======= +>>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return false } From 0cc8d956c55dae822b250dd871dcea94bdeab460 Mon Sep 17 00:00:00 2001 From: Cole Kennedy Date: Thu, 20 Feb 2025 19:11:56 -0600 Subject: [PATCH 2/2] [WIP] add archivista storage backend --- go.mod | 11 +- go.sum | 4 +- vendor/cel.dev/expr/.bazelversion | 4 - vendor/cel.dev/expr/.gitignore | 3 - vendor/cel.dev/expr/BUILD.bazel | 4 - vendor/cel.dev/expr/README.md | 8 - vendor/cel.dev/expr/WORKSPACE | 27 - vendor/cel.dev/expr/cloudbuild.yaml | 7 - vendor/cel.dev/expr/regen_go_proto.sh | 8 - vendor/cloud.google.com/go/auth/CHANGES.md | 3 - vendor/cloud.google.com/go/auth/auth.go | 22 - .../go/auth/credentials/compute.go | 16 - .../go/auth/credentials/detect.go | 24 - .../go/auth/credentials/filetypes.go | 18 - .../go/auth/credentials/idtoken/cache.go | 23 - .../go/auth/credentials/idtoken/compute.go | 23 - .../go/auth/credentials/idtoken/file.go | 47 - .../go/auth/credentials/idtoken/idtoken.go | 6 - .../go/auth/credentials/idtoken/validate.go | 49 - .../auth/credentials/impersonate/idtoken.go | 112 - .../credentials/impersonate/impersonate.go | 57 - .../go/auth/credentials/impersonate/user.go | 32 - .../internal/externalaccount/aws_provider.go | 33 - .../externalaccount/externalaccount.go | 37 - .../internal/externalaccount/url_provider.go | 15 - .../externalaccountuser.go | 12 - .../go/auth/credentials/internal/gdch/gdch.go | 41 - .../internal/impersonate/impersonate.go | 18 - .../internal/stsexchange/sts_exchange.go | 18 - .../go/auth/credentials/selfsignedjwt.go | 28 - .../go/auth/grpctransport/directpath.go | 9 - .../go/auth/grpctransport/grpctransport.go | 33 - .../go/auth/httptransport/httptransport.go | 18 - .../go/auth/httptransport/trace.go | 93 - .../go/auth/httptransport/transport.go | 24 - .../go/auth/internal/compute/compute.go | 5 - .../go/auth/internal/internal.go | 43 - .../go/auth/internal/jwt/jwt.go | 8 - .../go/auth/internal/transport/cba.go | 11 - .../go/auth/internal/transport/s2a.go | 29 - .../go/auth/internal/transport/transport.go | 3 - .../go/auth/oauth2adapt/CHANGES.md | 3 - .../cloud.google.com/go/auth/threelegged.go | 19 - .../go/compute/metadata/CHANGES.md | 3 - .../go/compute/metadata/metadata.go | 41 - .../go/kms/apiv1/autokey_admin_client.go | 186 - .../go/kms/apiv1/autokey_client.go | 206 - .../go/kms/apiv1/auxiliary.go | 28 - vendor/cloud.google.com/go/kms/apiv1/doc.go | 50 - .../go/kms/apiv1/ekm_client.go | 262 -- .../go/kms/apiv1/key_management_client.go | 652 --- .../go/kms/apiv1/kmspb/autokey.pb.go | 170 - .../go/kms/apiv1/kmspb/autokey_admin.pb.go | 129 - .../go/kms/apiv1/kmspb/ekm_service.pb.go | 347 -- .../go/kms/apiv1/kmspb/resources.pb.go | 291 -- .../go/kms/apiv1/kmspb/service.pb.go | 1060 ----- .../go/kms/internal/version.go | 4 - vendor/cloud.google.com/go/storage/CHANGES.md | 3 - vendor/cloud.google.com/go/storage/client.go | 18 - .../go/storage/experimental/experimental.go | 3 - .../go/storage/grpc_client.go | 754 ---- .../go/storage/grpc_metrics.go | 209 - .../go/storage/http_client.go | 37 - .../go/storage/internal/apiv2/auxiliary.go | 4 - .../storage/internal/apiv2/auxiliary_go123.go | 4 - .../go/storage/internal/apiv2/doc.go | 12 - .../internal/apiv2/gapic_metadata.json | 6 - .../go/storage/internal/apiv2/helpers.go | 14 - .../storage/internal/apiv2/storage_client.go | 169 - .../internal/apiv2/storagepb/storage.pb.go | 3690 +---------------- .../go/storage/internal/experimental.go | 3 - .../go/storage/internal/version.go | 4 - vendor/cloud.google.com/go/storage/invoke.go | 6 - vendor/cloud.google.com/go/storage/option.go | 13 - vendor/cloud.google.com/go/storage/reader.go | 19 - vendor/cloud.google.com/go/storage/storage.go | 147 - vendor/cloud.google.com/go/storage/writer.go | 6 - vendor/filippo.io/edwards25519/LICENSE | 27 - vendor/filippo.io/edwards25519/README.md | 14 - vendor/filippo.io/edwards25519/doc.go | 20 - .../filippo.io/edwards25519/edwards25519.go | 427 -- vendor/filippo.io/edwards25519/extra.go | 349 -- vendor/filippo.io/edwards25519/field/fe.go | 420 -- .../filippo.io/edwards25519/field/fe_amd64.go | 16 - .../filippo.io/edwards25519/field/fe_amd64.s | 379 -- .../edwards25519/field/fe_amd64_noasm.go | 12 - .../filippo.io/edwards25519/field/fe_arm64.go | 16 - .../filippo.io/edwards25519/field/fe_arm64.s | 42 - .../edwards25519/field/fe_arm64_noasm.go | 12 - .../filippo.io/edwards25519/field/fe_extra.go | 50 - .../edwards25519/field/fe_generic.go | 266 -- vendor/filippo.io/edwards25519/scalar.go | 343 -- vendor/filippo.io/edwards25519/scalar_fiat.go | 1147 ----- vendor/filippo.io/edwards25519/scalarmult.go | 214 - vendor/filippo.io/edwards25519/tables.go | 129 - vendor/github.com/4meepo/tagalign/.gitignore | 3 - .../4meepo/tagalign/.goreleaser.yml | 8 - vendor/github.com/4meepo/tagalign/options.go | 10 - vendor/github.com/4meepo/tagalign/tagalign.go | 219 - .../Antonboom/nilnil/pkg/analyzer/analyzer.go | 4 - .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 3 - .../internal/resource/resource_identifier.go | 3 - .../sdk/azcore/internal/pollers/op/op.go | 25 - .../sdk/azcore/internal/shared/constants.go | 4 - .../sdk/azcore/runtime/pager.go | 3 - .../sdk/azcore/runtime/poller.go | 16 - .../sdk/azidentity/BREAKING_CHANGES.md | 3 - .../sdk/azidentity/CHANGELOG.md | 3 - .../azure-sdk-for-go/sdk/azidentity/README.md | 24 - .../sdk/azidentity/TOKEN_CACHING.MD | 14 - .../sdk/azidentity/TROUBLESHOOTING.md | 6 - .../sdk/azidentity/azidentity.go | 10 - .../sdk/azidentity/azure_cli_credential.go | 6 - .../azure_developer_cli_credential.go | 6 - .../azidentity/chained_token_credential.go | 7 - .../azure-sdk-for-go/sdk/azidentity/ci.yml | 22 - .../sdk/azidentity/confidential_client.go | 4 - .../azidentity/default_azure_credential.go | 17 - .../sdk/azidentity/device_code_credential.go | 5 - .../interactive_browser_credential.go | 5 - .../sdk/azidentity/managed_identity_client.go | 42 - .../sdk/azidentity/public_client.go | 12 - .../sdk/azidentity/test-resources-post.ps1 | 14 - .../sdk/azidentity/version.go | 4 - .../sdk/security/keyvault/azkeys/CHANGELOG.md | 3 - .../sdk/security/keyvault/azkeys/assets.json | 4 - .../sdk/security/keyvault/azkeys/ci.yml | 18 - .../security/keyvault/azkeys/custom_client.go | 3 - .../keyvault/azkeys/platform-matrix.json | 5 - .../sdk/security/keyvault/azkeys/version.go | 4 - .../security/keyvault/internal/CHANGELOG.md | 3 - .../keyvault/internal/challenge_policy.go | 54 - .../keyvault/internal/ci.securitykeyvault.yml | 28 - .../security/keyvault/internal/constants.go | 4 - .../apps/confidential/confidential.go | 13 - .../apps/internal/base/base.go | 12 - .../apps/internal/json/json.go | 58 - .../apps/internal/local/server.go | 7 - .../apps/internal/oauth/oauth.go | 11 - .../internal/oauth/ops/authority/authority.go | 93 - .../internal/oauth/ops/internal/comm/comm.go | 19 - .../apps/internal/oauth/resolvers.go | 28 - .../detectors/gcp/app_engine.go | 7 - .../detectors/gcp/detector.go | 28 - .../detectors/gcp/faas.go | 11 - .../detectors/gcp/gce.go | 40 - .../detectors/gcp/gke.go | 16 - .../Masterminds/semver/v3/version.go | 27 - .../go-crypto/internal/byteutil/byteutil.go | 12 - .../ProtonMail/go-crypto/ocb/ocb.go | 65 - .../go-crypto/openpgp/armor/armor.go | 82 - .../go-crypto/openpgp/armor/encode.go | 50 - .../go-crypto/openpgp/canonical_text.go | 9 - .../ProtonMail/go-crypto/openpgp/ecdh/ecdh.go | 10 - .../go-crypto/openpgp/errors/errors.go | 24 - .../openpgp/internal/algorithm/cipher.go | 20 - .../openpgp/internal/ecc/curve_info.go | 18 - .../go-crypto/openpgp/internal/ecc/ed25519.go | 7 - .../go-crypto/openpgp/internal/ecc/ed448.go | 7 - .../go-crypto/openpgp/key_generation.go | 86 - .../ProtonMail/go-crypto/openpgp/keys.go | 92 - .../go-crypto/openpgp/packet/aead_crypter.go | 126 - .../openpgp/packet/aead_encrypted.go | 17 - .../go-crypto/openpgp/packet/compressed.go | 17 - .../go-crypto/openpgp/packet/config.go | 25 - .../go-crypto/openpgp/packet/encrypted_key.go | 156 - .../go-crypto/openpgp/packet/literal.go | 6 - .../openpgp/packet/one_pass_signature.go | 51 - .../go-crypto/openpgp/packet/opaque.go | 8 - .../go-crypto/openpgp/packet/packet.go | 52 - .../go-crypto/openpgp/packet/private_key.go | 232 -- .../go-crypto/openpgp/packet/public_key.go | 247 -- .../go-crypto/openpgp/packet/reader.go | 37 - .../go-crypto/openpgp/packet/signature.go | 417 -- .../openpgp/packet/symmetric_key_encrypted.go | 71 - .../openpgp/packet/symmetrically_encrypted.go | 3 - .../packet/symmetrically_encrypted_aead.go | 57 - .../packet/symmetrically_encrypted_mdc.go | 18 - .../go-crypto/openpgp/packet/userattribute.go | 8 - .../go-crypto/openpgp/packet/userid.go | 8 - .../ProtonMail/go-crypto/openpgp/read.go | 144 - .../go-crypto/openpgp/read_write_test_data.go | 21 - .../ProtonMail/go-crypto/openpgp/s2k/s2k.go | 31 - .../go-crypto/openpgp/s2k/s2k_cache.go | 4 - .../go-crypto/openpgp/s2k/s2k_config.go | 10 - .../ProtonMail/go-crypto/openpgp/write.go | 58 - .../alecthomas/go-check-sumtype/README.md | 3 - .../alecthomas/go-check-sumtype/check.go | 15 - .../alecthomas/go-check-sumtype/config.go | 3 - .../alecthomas/go-check-sumtype/def.go | 24 - .../aws/aws-sdk-go-v2/aws/config.go | 3 - .../aws-sdk-go-v2/aws/go_module_metadata.go | 4 - .../aws/middleware/user_agent.go | 32 - .../aws-sdk-go-v2/aws/protocol/query/array.go | 38 - .../aws/protocol/query/object.go | 15 - .../aws-sdk-go-v2/aws/protocol/query/value.go | 3 - .../aws/retry/retryable_error.go | 4 - .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 3 - .../aws/aws-sdk-go-v2/config/config.go | 7 - .../aws/aws-sdk-go-v2/config/env_config.go | 179 - .../config/go_module_metadata.go | 4 - .../aws/aws-sdk-go-v2/config/load_options.go | 11 - .../aws/aws-sdk-go-v2/config/provider.go | 3 - .../aws/aws-sdk-go-v2/config/resolve.go | 3 - .../aws/aws-sdk-go-v2/config/shared_config.go | 15 - .../aws-sdk-go-v2/credentials/CHANGELOG.md | 3 - .../credentials/go_module_metadata.go | 4 - .../feature/ec2/imds/CHANGELOG.md | 3 - .../feature/ec2/imds/go_module_metadata.go | 4 - .../internal/configsources/CHANGELOG.md | 3 - .../configsources/go_module_metadata.go | 4 - .../endpoints/awsrulesfn/partitions.json | 6 - .../internal/endpoints/v2/CHANGELOG.md | 3 - .../endpoints/v2/go_module_metadata.go | 4 - .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 3 - .../internal/ini/go_module_metadata.go | 4 - .../internal/accept-encoding/CHANGELOG.md | 3 - .../accept-encoding/go_module_metadata.go | 4 - .../internal/presigned-url/CHANGELOG.md | 3 - .../presigned-url/go_module_metadata.go | 4 - .../aws-sdk-go-v2/service/kms/CHANGELOG.md | 3 - .../aws-sdk-go-v2/service/kms/api_client.go | 4 - .../service/kms/go_module_metadata.go | 4 - .../kms/internal/endpoints/endpoints.go | 6 - .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 3 - .../aws-sdk-go-v2/service/sso/api_client.go | 4 - .../service/sso/go_module_metadata.go | 4 - .../service/ssooidc/CHANGELOG.md | 3 - .../service/ssooidc/api_client.go | 4 - .../service/ssooidc/api_op_CreateToken.go | 30 - .../ssooidc/api_op_CreateTokenWithIAM.go | 12 - .../service/ssooidc/api_op_RegisterClient.go | 10 - .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 6 - .../service/ssooidc/go_module_metadata.go | 4 - .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 3 - .../aws-sdk-go-v2/service/sts/api_client.go | 4 - .../service/sts/api_op_AssumeRole.go | 62 - .../service/sts/api_op_AssumeRoleWithSAML.go | 25 - .../sts/api_op_AssumeRoleWithWebIdentity.go | 63 - .../service/sts/api_op_GetFederationToken.go | 10 - .../service/sts/api_op_GetSessionToken.go | 11 - .../service/sts/deserializers.go | 6 - .../aws-sdk-go-v2/service/sts/generated.json | 3 - .../service/sts/go_module_metadata.go | 4 - .../sts/internal/endpoints/endpoints.go | 6 - .../aws-sdk-go-v2/service/sts/serializers.go | 6 - .../aws-sdk-go-v2/service/sts/types/errors.go | 12 - .../aws-sdk-go-v2/service/sts/validators.go | 9 - .../aws-sdk-go/aws/session/shared_config.go | 4 - .../github.com/aws/aws-sdk-go/aws/version.go | 4 - .../dynamodb/dynamodbattribute/converter.go | 27 - vendor/github.com/aws/smithy-go/CHANGELOG.md | 3 - .../github.com/aws/smithy-go/CONTRIBUTING.md | 3 - vendor/github.com/aws/smithy-go/Makefile | 3 - .../encoding/httpbinding/path_replace.go | 28 - .../aws/smithy-go/go_module_metadata.go | 4 - .../aws/smithy-go/transport/http/host.go | 4 - .../aws/smithy-go/transport/http/metrics.go | 51 - .../http/middleware_close_response_body.go | 15 - .../aws/smithy-go/transport/http/request.go | 12 - .../github.com/bombsimon/wsl/v4/.golangci.yml | 34 - .../github.com/bombsimon/wsl/v4/analyzer.go | 19 - vendor/github.com/bombsimon/wsl/v4/wsl.go | 24 - .../buildkite/agent/v3/api/artifacts.go | 61 - .../github.com/buildkite/agent/v3/api/auth.go | 37 - .../buildkite/agent/v3/api/client.go | 100 - .../github.com/buildkite/agent/v3/api/oidc.go | 17 - .../buildkite/agent/v3/api/steps.go | 3 - .../buildkite/agent/v3/version/VERSION | 4 - .../buildkite/agent/v3/version/version.go | 3 - .../buildkite/go-pipeline/README.md | 4 - .../go-pipeline/ordered/unmarshal.go | 60 - .../go-pipeline/step_command_matrix.go | 8 - .../buildkite/interpolate/interpolate.go | 31 - .../buildkite/interpolate/parser.go | 65 - vendor/github.com/buildkite/roko/retrier.go | 68 - .../butuzov/ireturn/analyzer/analyzer.go | 33 - .../ireturn/analyzer/internal/config/allow.go | 4 - .../ireturn/analyzer/internal/config/new.go | 4 - .../analyzer/internal/config/reject.go | 4 - .../ireturn/analyzer/internal/types/iface.go | 4 - .../github.com/butuzov/mirror/MIRROR_FUNCS.md | 204 - vendor/github.com/butuzov/mirror/Makefile | 33 - vendor/github.com/butuzov/mirror/analyzer.go | 6 - .../butuzov/mirror/checkers_maphash.go | 35 - .../mirror/internal/checker/checker.go | 12 - .../mirror/internal/checker/violation.go | 12 - vendor/github.com/butuzov/mirror/readme.md | 15 - .../ckaznocha/intrange/.golangci.yml | 31 - .../github.com/ckaznocha/intrange/intrange.go | 74 - .../stargz-snapshotter/estargz/build.go | 6 - .../stargz-snapshotter/estargz/testutil.go | 24 - .../github.com/coreos/go-oidc/v3/oidc/oidc.go | 39 - .../curioswitch/go-reassign/.golangci.yml | 15 - .../curioswitch/go-reassign/README.md | 7 - .../go-reassign/internal/analyzer/analyzer.go | 18 - .../cli/cli/config/credentials/file_store.go | 11 - .../client/client.go | 32 - .../credentials/error.go | 16 - .../envoy/config/cluster/v3/cluster.pb.go | 743 ---- .../config/cluster/v3/cluster.pb.validate.go | 3 - .../config/cluster/v3/cluster_vtproto.pb.go | 6 - .../envoy/config/core/v3/protocol.pb.go | 434 -- .../config/core/v3/protocol.pb.validate.go | 6 - .../config/core/v3/protocol_vtproto.pb.go | 12 - .../config/listener/v3/quic_config.pb.go | 46 - .../listener/v3/quic_config.pb.validate.go | 3 - .../listener/v3/quic_config_vtproto.pb.go | 6 - .../config/route/v3/route_components.pb.go | 1429 ------- .../v3/http_connection_manager.pb.go | 17 - .../v3/client_side_weighted_round_robin.pb.go | 42 - ...nt_side_weighted_round_robin_vtproto.pb.go | 6 - .../transport_sockets/tls/v3/common.pb.go | 154 - .../tls/v3/common.pb.validate.go | 4 - .../go-jose/v3/cryptosigner/cryptosigner.go | 147 - vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go | 18 - .../golangci/gofmt/gofmt/golangci.go | 6 - .../golangci-lint/cmd/golangci-lint/main.go | 17 - .../pkg/commands/config_verify.go | 34 - .../golangci-lint/pkg/commands/flagsets.go | 48 - .../golangci-lint/pkg/commands/help.go | 45 - .../golangci-lint/pkg/commands/run.go | 10 - .../golangci-lint/pkg/config/config.go | 25 - .../golangci-lint/pkg/config/issues.go | 9 - .../pkg/config/linters_settings.go | 77 - .../golangci-lint/pkg/config/loader.go | 67 - .../golangci-lint/pkg/config/output.go | 7 - .../golangci/golangci-lint/pkg/config/run.go | 7 - .../golangci-lint/pkg/fsutils/fsutils.go | 4 - .../golangci-lint/pkg/goanalysis/issue.go | 4 - .../golangci-lint/pkg/goanalysis/runner.go | 62 - .../pkg/goanalysis/runner_action.go | 26 - .../pkg/goanalysis/runner_action_cache.go | 44 - .../pkg/goanalysis/runner_base.go | 370 -- .../pkg/goanalysis/runner_loadingpackage.go | 27 - .../golangci-lint/pkg/goanalysis/runners.go | 14 - .../pkg/goanalysis/runners_cache.go | 8 - .../pkg/golinters/asasalint/asasalint.go | 9 - .../pkg/golinters/bidichk/bidichk.go | 43 - .../pkg/golinters/bodyclose/bodyclose.go | 4 - .../pkg/golinters/cyclop/cyclop.go | 4 - .../pkg/golinters/dogsled/dogsled.go | 102 - .../golangci-lint/pkg/golinters/dupl/dupl.go | 6 - .../pkg/golinters/dupword/dupword.go | 15 - .../pkg/golinters/errcheck/errcheck.go | 10 - .../pkg/golinters/errchkjson/errchkjson.go | 18 - .../pkg/golinters/errorlint/errorlint.go | 27 - .../pkg/golinters/forbidigo/forbidigo.go | 72 - .../pkg/golinters/funlen/funlen.go | 72 - .../golangci-lint/pkg/golinters/gci/gci.go | 241 -- .../golinters/ginkgolinter/ginkgolinter.go | 15 - .../gochecknoinits/gochecknoinits.go | 67 - .../gochecksumtype/gochecksumtype.go | 6 - .../pkg/golinters/gocritic/gocritic.go | 99 - .../pkg/golinters/godot/godot.go | 77 - .../pkg/golinters/godox/godox.go | 59 - .../pkg/golinters/gofmt/gofmt.go | 75 - .../pkg/golinters/gofumpt/gofumpt.go | 84 - .../pkg/golinters/goheader/goheader.go | 88 - .../pkg/golinters/goimports/goimports.go | 70 - .../gomoddirectives/gomoddirectives.go | 13 - .../pkg/golinters/gomodguard/gomodguard.go | 4 - .../pkg/golinters/gosec/gosec.go | 4 - .../golinters/gosmopolitan/gosmopolitan.go | 17 - .../pkg/golinters/govet/govet.go | 22 - .../pkg/golinters/grouper/grouper.go | 10 - .../pkg/golinters/importas/importas.go | 5 - .../pkg/golinters/internal/diff.go | 209 - .../pkg/golinters/internal/util.go | 22 - .../golangci-lint/pkg/golinters/lll/lll.go | 110 - .../pkg/golinters/loggercheck/loggercheck.go | 3 - .../pkg/golinters/maintidx/maintidx.go | 17 - .../pkg/golinters/makezero/makezero.go | 59 - .../pkg/golinters/mirror/mirror.go | 68 - .../pkg/golinters/misspell/misspell.go | 96 - .../pkg/golinters/musttag/musttag.go | 8 - .../pkg/golinters/nestif/nestif.go | 65 - .../pkg/golinters/nilnil/nilnil.go | 14 - .../nolintlint/internal/nolintlint.go | 227 - .../pkg/golinters/nolintlint/nolintlint.go | 84 - .../pkg/golinters/prealloc/prealloc.go | 49 - .../pkg/golinters/protogetter/protogetter.go | 62 - .../pkg/golinters/recvcheck/recvcheck.go | 8 - .../pkg/golinters/revive/revive.go | 24 - .../pkg/golinters/tagalign/tagalign.go | 57 - .../pkg/golinters/tagliatelle/tagliatelle.go | 15 - .../pkg/golinters/testpackage/testpackage.go | 16 - .../pkg/golinters/thelper/thelper.go | 20 - .../pkg/golinters/unparam/unparam.go | 55 - .../golinters/usestdlibvars/usestdlibvars.go | 25 - .../pkg/golinters/whitespace/whitespace.go | 90 - .../pkg/golinters/wrapcheck/wrapcheck.go | 3 - .../golangci-lint/pkg/lint/linter/config.go | 9 - .../pkg/lint/lintersdb/builder_linter.go | 132 - .../golangci/golangci-lint/pkg/lint/runner.go | 10 - .../golangci-lint/pkg/printers/codeclimate.go | 4 - .../golangci-lint/pkg/printers/teamcity.go | 14 - .../golangci-lint/pkg/result/issue.go | 34 - .../pkg/result/processors/fixer.go | 244 -- .../pkg/result/processors/max_from_linter.go | 4 - .../pkg/result/processors/max_same_issues.go | 4 - .../pkg/result/processors/severity.go | 10 - .../pkg/result/processors/uniq_by_line.go | 8 - vendor/github.com/golangci/modinfo/.gitignore | 1 - .../github.com/golangci/modinfo/.golangci.yml | 157 - vendor/github.com/golangci/modinfo/LICENSE | 674 --- vendor/github.com/golangci/modinfo/Makefile | 12 - vendor/github.com/golangci/modinfo/module.go | 157 - vendor/github.com/golangci/modinfo/readme.md | 73 - .../github.com/google/cel-go/cel/BUILD.bazel | 7 - vendor/github.com/google/cel-go/cel/decls.go | 25 - vendor/github.com/google/cel-go/cel/env.go | 85 - .../github.com/google/cel-go/cel/inlining.go | 4 - vendor/github.com/google/cel-go/cel/io.go | 126 - .../github.com/google/cel-go/cel/library.go | 72 - .../github.com/google/cel-go/cel/optimizer.go | 35 - .../github.com/google/cel-go/cel/options.go | 33 - .../github.com/google/cel-go/cel/program.go | 23 - .../google/cel-go/checker/BUILD.bazel | 4 - .../google/cel-go/checker/checker.go | 28 - .../github.com/google/cel-go/checker/cost.go | 336 -- .../google/cel-go/checker/standard.go | 35 - .../google/cel-go/common/BUILD.bazel | 4 - .../google/cel-go/common/ast/BUILD.bazel | 15 - .../google/cel-go/common/ast/ast.go | 34 - .../google/cel-go/common/ast/conversion.go | 41 - .../google/cel-go/common/ast/expr.go | 18 - .../google/cel-go/common/ast/factory.go | 40 - .../google/cel-go/common/ast/navigable.go | 3 - .../cel-go/common/containers/container.go | 9 - .../google/cel-go/common/debug/debug.go | 7 - .../google/cel-go/common/decls/decls.go | 31 - .../github.com/google/cel-go/common/error.go | 18 - .../github.com/google/cel-go/common/errors.go | 9 - .../google/cel-go/common/runes/buffer.go | 48 - .../github.com/google/cel-go/common/source.go | 26 - .../google/cel-go/common/stdlib/BUILD.bazel | 8 - .../google/cel-go/common/stdlib/standard.go | 60 - .../google/cel-go/common/types/BUILD.bazel | 6 - .../google/cel-go/common/types/bytes.go | 4 - .../google/cel-go/common/types/err.go | 3 - .../google/cel-go/common/types/list.go | 17 - .../google/cel-go/common/types/map.go | 30 - .../google/cel-go/common/types/null.go | 8 - .../google/cel-go/common/types/object.go | 4 - .../google/cel-go/common/types/pb/type.go | 19 - .../google/cel-go/common/types/provider.go | 3 - .../cel-go/common/types/traits/iterator.go | 3 - .../cel-go/common/types/traits/lister.go | 3 - .../cel-go/common/types/traits/mapper.go | 3 - .../cel-go/common/types/traits/traits.go | 5 - .../google/cel-go/common/types/types.go | 86 - .../google/cel-go/interpreter/activation.go | 41 - .../cel-go/interpreter/attribute_patterns.go | 25 - .../google/cel-go/interpreter/attributes.go | 181 - .../cel-go/interpreter/interpretable.go | 91 - .../google/cel-go/interpreter/planner.go | 27 - .../google/cel-go/interpreter/prune.go | 33 - .../google/cel-go/interpreter/runtimecost.go | 39 - .../github.com/google/cel-go/parser/errors.go | 13 - .../google/cel-go/parser/gen/BUILD.bazel | 4 - .../google/cel-go/parser/gen/CEL.g4 | 20 +- .../google/cel-go/parser/gen/CEL.interp | 15 +- .../google/cel-go/parser/gen/CEL.tokens | 3 - .../google/cel-go/parser/gen/CELLexer.interp | 15 +- .../google/cel-go/parser/gen/CELLexer.tokens | 3 - .../cel-go/parser/gen/cel_base_listener.go | 15 - .../cel-go/parser/gen/cel_base_visitor.go | 15 - .../google/cel-go/parser/gen/cel_lexer.go | 328 -- .../google/cel-go/parser/gen/cel_listener.go | 24 - .../google/cel-go/parser/gen/cel_parser.go | 3056 +------------- .../google/cel-go/parser/gen/cel_visitor.go | 21 - .../github.com/google/cel-go/parser/helper.go | 36 - .../github.com/google/cel-go/parser/macro.go | 61 - .../google/cel-go/parser/options.go | 6 - .../github.com/google/cel-go/parser/parser.go | 70 - .../google/cel-go/parser/unescape.go | 69 - .../google/cel-go/parser/unparser.go | 26 - .../certificate-transparency-go/AUTHORS | 7 - .../certificate-transparency-go/CHANGELOG.md | 3 - .../certificate-transparency-go/CONTRIBUTORS | 4 - .../certificate-transparency-go/README.md | 4 - .../internal/redact/redact.go | 12 - .../pkg/authn/keychain.go | 8 - .../go-containerregistry/pkg/name/ref.go | 4 - .../pkg/v1/mutate/mutate.go | 14 - .../pkg/v1/remote/referrers.go | 4 - .../pkg/v1/remote/transport/bearer.go | 38 - .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 156 - .../google/s2a-go/internal/v2/s2av2.go | 21 - .../v2/tlsconfigstore/tlsconfigstore.go | 4 - vendor/github.com/google/s2a-go/s2a.go | 19 - .../github.com/google/s2a-go/s2a_options.go | 63 - .../google/s2a-go/stream/s2a_stream.go | 6 - .../gax-go/v2/.release-please-manifest.json | 4 - .../googleapis/gax-go/v2/CHANGES.md | 3 - .../googleapis/gax-go/v2/internal/version.go | 4 - .../gax-go/v2/internallog/internallog.go | 4 - .../grpc-gateway/v2/runtime/convert.go | 12 - .../grpc-gateway/v2/runtime/errors.go | 3 - .../grpc-gateway/v2/runtime/fieldmask.go | 4 - .../grpc-gateway/v2/runtime/handler.go | 8 - .../grpc-gateway/v2/runtime/marshaler.go | 3 - .../v2/runtime/marshaler_registry.go | 5 - .../grpc-gateway/v2/runtime/proto2_convert.go | 8 - .../grpc-gateway/v2/runtime/query.go | 8 - .../grpc-gateway/v2/utilities/pattern.go | 4 - .../v2/utilities/string_array_flag.go | 4 - .../in-toto/archivista/pkg/api/client.go | 126 - .../in-toto/archivista/pkg/api/download.go | 19 - .../in-toto/archivista/pkg/api/graphql.go | 37 - .../in-toto/archivista/pkg/api/upload.go | 26 - .../github.com/jjti/go-spancheck/.gitignore | 5 +- .../jjti/go-spancheck/.golangci.yml | 13 - vendor/github.com/jjti/go-spancheck/go.work | 4 - .../github.com/jjti/go-spancheck/go.work.sum | 5 - .../github.com/jjti/go-spancheck/spancheck.go | 20 - vendor/github.com/julz/importas/analyzer.go | 12 - vendor/github.com/julz/importas/config.go | 23 - vendor/github.com/julz/importas/flags.go | 23 - .../ldez/gomoddirectives/.golangci.yml | 53 - .../github.com/ldez/gomoddirectives/LICENSE | 4 - .../ldez/gomoddirectives/gomoddirectives.go | 69 - .../github.com/ldez/gomoddirectives/module.go | 42 - .../github.com/ldez/gomoddirectives/readme.md | 17 - .../github.com/ldez/tagliatelle/.golangci.yml | 65 - vendor/github.com/ldez/tagliatelle/readme.md | 48 - .../ldez/tagliatelle/tagliatelle.go | 121 - .../magiconair/properties/CHANGELOG.md | 205 - .../magiconair/properties/README.md | 39 - .../magiconair/properties/decode.go | 9 - .../github.com/magiconair/properties/load.go | 21 - .../mitchellh/mapstructure/CHANGELOG.md | 3 - .../mitchellh/mapstructure/decode_hooks.go | 4 - .../mitchellh/mapstructure/mapstructure.go | 7 - .../nunnatsa/ginkgolinter/README.md | 58 - .../nunnatsa/ginkgolinter/analyzer.go | 18 - .../github.com/nunnatsa/ginkgolinter/doc.go | 4 - .../internal/expression/actual/actual.go | 14 - .../internal/expression/actual/actualarg.go | 12 - .../internal/expression/expression.go | 32 - .../internal/expression/matcher/matcher.go | 9 - .../internal/expression/value/value.go | 3 - .../internal/ginkgohandler/handling.go | 8 - .../internal/gomegahandler/dothandler.go | 33 - .../internal/gomegahandler/handler.go | 11 - .../internal/gomegahandler/namedhandler.go | 36 - .../internal/rules/asyncfunccallrule.go | 8 - .../internal/rules/asynctimeintervalsrule.go | 4 - .../internal/rules/equaldifferenttypesrule.go | 4 - .../internal/rules/equalnilrule.go | 4 - .../ginkgolinter/internal/rules/havelen0.go | 4 - .../internal/rules/haveoccurredrule.go | 4 - .../internal/rules/nilcomparerule.go | 6 - .../internal/rules/succeedrule.go | 4 - .../nunnatsa/ginkgolinter/types/boolean.go | 32 - .../nunnatsa/ginkgolinter/types/config.go | 19 - .../prometheus/common/expfmt/decode.go | 22 - .../prometheus/common/expfmt/encode.go | 45 - .../prometheus/common/expfmt/expfmt.go | 47 - .../common/expfmt/openmetrics_create.go | 17 - .../prometheus/common/expfmt/text_create.go | 8 - .../prometheus/common/expfmt/text_parse.go | 97 - .../prometheus/common/model/alert.go | 14 - .../prometheus/common/model/labels.go | 18 - .../common/model/labelset_string.go | 5 - .../common/model/labelset_string_go120.go | 39 - .../prometheus/common/model/metric.go | 98 - .../prometheus/common/model/silence.go | 30 - .../prometheus/common/model/value_float.go | 7 - .../common/model/value_histogram.go | 15 - .../github.com/raeperd/recvcheck/.gitignore | 5 - vendor/github.com/raeperd/recvcheck/Makefile | 12 - vendor/github.com/raeperd/recvcheck/README.md | 11 - .../github.com/raeperd/recvcheck/analyzer.go | 51 - .../santhosh-tekuri/jsonschema/v5/.gitignore | 4 - .../santhosh-tekuri/jsonschema/v5/.gitmodules | 3 - .../santhosh-tekuri/jsonschema/v5/LICENSE | 175 - .../santhosh-tekuri/jsonschema/v5/README.md | 220 - .../santhosh-tekuri/jsonschema/v5/compiler.go | 812 ---- .../santhosh-tekuri/jsonschema/v5/content.go | 29 - .../santhosh-tekuri/jsonschema/v5/doc.go | 49 - .../santhosh-tekuri/jsonschema/v5/draft.go | 1454 ------- .../santhosh-tekuri/jsonschema/v5/errors.go | 129 - .../jsonschema/v5/extension.go | 116 - .../santhosh-tekuri/jsonschema/v5/format.go | 567 --- .../jsonschema/v5/httploader/httploader.go | 38 - .../santhosh-tekuri/jsonschema/v5/loader.go | 60 - .../santhosh-tekuri/jsonschema/v5/output.go | 77 - .../santhosh-tekuri/jsonschema/v5/resource.go | 280 -- .../santhosh-tekuri/jsonschema/v5/schema.go | 900 ---- .../usestdlibvars/pkg/analyzer/analyzer.go | 48 - .../go-securesystemslib/dsse/verify.go | 4 - .../signerverifier/ecdsa.go | 7 - .../signerverifier/ed25519.go | 3 - .../go-securesystemslib/signerverifier/rsa.go | 9 - .../signerverifier/signerverifier.go | 10 - .../signerverifier/utils.go | 3 - .../cosign/v2/cmd/cosign/cli/fulcio/fulcio.go | 11 - .../cosign/v2/cmd/cosign/cli/options/copy.go | 4 - .../v2/cmd/cosign/cli/options/deprecate.go | 3 - .../v2/cmd/cosign/cli/options/initialize.go | 8 - .../v2/cmd/cosign/cli/options/registry.go | 20 - .../sigstore/cosign/v2/pkg/blob/load.go | 6 - .../cosign/v2/pkg/cosign/git/gitlab/gitlab.go | 4 - .../sigstore/cosign/v2/pkg/cosign/verify.go | 16 - .../cosign/v2/pkg/oci/remote/options.go | 3 - .../gen/pb-go/bundle/v1/sigstore_bundle.pb.go | 174 - .../gen/pb-go/common/v1/sigstore_common.pb.go | 493 --- .../gen/pb-go/dsse/envelope.pb.go | 84 - .../gen/pb-go/rekor/v1/sigstore_rekor.pb.go | 190 - .../v0.0.1/hashedrekord_v0_0_1_schema.json | 4 - .../pkg/signature/kms/azure/README.md | 14 - .../pkg/signature/kms/azure/client.go | 120 - .../sigstore/pkg/signature/kms/kms.go | 19 - .../pkg/verification/verify.go | 4 - vendor/github.com/spf13/pflag/flag.go | 47 - vendor/github.com/spf13/pflag/ip.go | 3 - vendor/github.com/spf13/pflag/string_array.go | 8 - .../go-spiffe/v2/bundle/jwtbundle/bundle.go | 7 - .../go-spiffe/v2/bundle/jwtbundle/doc.go | 12 - .../v2/bundle/spiffebundle/bundle.go | 7 - .../go-spiffe/v2/bundle/spiffebundle/doc.go | 12 - .../go-spiffe/v2/bundle/x509bundle/doc.go | 12 - .../spiffe/go-spiffe/v2/workloadapi/addr.go | 10 - .../go-spiffe/v2/workloadapi/client_posix.go | 4 - .../v2/workloadapi/client_windows.go | 4 - .../pkg/analyzer/analyzer.go | 18 - .../pipeline/pkg/apis/config/default.go | 30 - .../pkg/apis/pipeline/v1/param_types.go | 4 - .../v1alpha1/stepaction_validation.go | 7 - .../pkg/apis/pipeline/v1beta1/param_types.go | 4 - .../apis/pipeline/v1beta1/task_validation.go | 8 - .../pipeline/pkg/substitution/substitution.go | 8 - .../tektoncd/pipeline/test/e2e-tests.sh | 5 - .../tektoncd/pipeline/test/featureflags.go | 4 - .../tektoncd/pipeline/test/presubmit-tests.sh | 21 - .../github.com/tektoncd/plumbing/.gitignore | 5 +- vendor/github.com/tektoncd/plumbing/OWNERS | 11 - .../github.com/tetafro/godot/.goreleaser.yml | 3 - vendor/github.com/tetafro/godot/getters.go | 55 - .../bodyclose/passes/bodyclose/bodyclose.go | 17 - .../wrapcheck/v2/wrapcheck/wrapcheck.go | 7 - vendor/github.com/ultraware/funlen/README.md | 3 - vendor/github.com/ultraware/funlen/main.go | 124 - .../github.com/ultraware/whitespace/README.md | 4 - .../ultraware/whitespace/whitespace.go | 152 - vendor/github.com/uudashr/gocognit/README.md | 31 - vendor/github.com/uudashr/gocognit/doc.go | 4 - .../github.com/uudashr/gocognit/gocognit.go | 148 - vendor/github.com/uudashr/gocognit/recv.go | 3 - .../uudashr/gocognit/recv_pre118.go | 3 - .../github.com/uudashr/iface/opaque/opaque.go | 8 - .../github.com/uudashr/iface/unused/unused.go | 25 - .../vbatts/tar-split/archive/tar/reader.go | 3 - vendor/github.com/xanzy/go-gitlab/.gitignore | 33 - .../github.com/xanzy/go-gitlab/.golangci.yml | 56 - .../xanzy/go-gitlab/CONTRIBUTING.md | 53 - vendor/github.com/xanzy/go-gitlab/LICENSE | 201 - vendor/github.com/xanzy/go-gitlab/Makefile | 22 - vendor/github.com/xanzy/go-gitlab/README.md | 208 - .../xanzy/go-gitlab/access_requests.go | 253 -- .../github.com/xanzy/go-gitlab/appearance.go | 110 - .../xanzy/go-gitlab/applications.go | 106 - .../xanzy/go-gitlab/audit_events.go | 202 - vendor/github.com/xanzy/go-gitlab/avatar.go | 64 - .../xanzy/go-gitlab/award_emojis.go | 468 --- vendor/github.com/xanzy/go-gitlab/boards.go | 367 -- vendor/github.com/xanzy/go-gitlab/branches.go | 252 -- .../xanzy/go-gitlab/broadcast_messages.go | 191 - .../xanzy/go-gitlab/ci_yml_templates.go | 95 - .../xanzy/go-gitlab/client_options.go | 142 - .../xanzy/go-gitlab/cluster_agents.go | 294 -- vendor/github.com/xanzy/go-gitlab/commits.go | 610 --- .../xanzy/go-gitlab/container_registry.go | 311 -- .../xanzy/go-gitlab/custom_attributes.go | 188 - .../github.com/xanzy/go-gitlab/deploy_keys.go | 275 -- .../xanzy/go-gitlab/deploy_tokens.go | 290 -- .../github.com/xanzy/go-gitlab/deployments.go | 260 -- .../go-gitlab/deployments_merge_requests.go | 53 - .../github.com/xanzy/go-gitlab/discussions.go | 1143 ----- .../xanzy/go-gitlab/dockerfile_templates.go | 93 - .../xanzy/go-gitlab/dora_metrics.go | 110 - .../github.com/xanzy/go-gitlab/draft_notes.go | 233 -- .../xanzy/go-gitlab/environments.go | 238 -- .../github.com/xanzy/go-gitlab/epic_issues.go | 152 - vendor/github.com/xanzy/go-gitlab/epics.go | 275 -- .../xanzy/go-gitlab/error_tracking.go | 196 - .../xanzy/go-gitlab/event_parsing.go | 312 -- .../xanzy/go-gitlab/event_systemhook_types.go | 249 -- .../xanzy/go-gitlab/event_webhook_types.go | 1265 ------ vendor/github.com/xanzy/go-gitlab/events.go | 231 -- .../xanzy/go-gitlab/external_status_checks.go | 218 - .../xanzy/go-gitlab/feature_flags.go | 96 - .../xanzy/go-gitlab/freeze_periods.go | 194 - .../xanzy/go-gitlab/generic_packages.go | 158 - .../github.com/xanzy/go-gitlab/geo_nodes.go | 433 -- .../xanzy/go-gitlab/gitignore_templates.go | 93 - vendor/github.com/xanzy/go-gitlab/gitlab.go | 1049 ----- .../xanzy/go-gitlab/group_access_tokens.go | 199 - .../xanzy/go-gitlab/group_badges.go | 237 -- .../xanzy/go-gitlab/group_boards.go | 353 -- .../xanzy/go-gitlab/group_clusters.go | 217 - .../xanzy/go-gitlab/group_epic_boards.go | 104 - .../github.com/xanzy/go-gitlab/group_hooks.go | 268 -- .../xanzy/go-gitlab/group_import_export.go | 180 - .../xanzy/go-gitlab/group_iterations.go | 90 - .../xanzy/go-gitlab/group_labels.go | 258 -- .../xanzy/go-gitlab/group_members.go | 391 -- .../xanzy/go-gitlab/group_milestones.go | 322 -- .../go-gitlab/group_protected_environments.go | 281 -- .../group_repository_storage_move.go | 195 - .../xanzy/go-gitlab/group_serviceaccounts.go | 181 - .../xanzy/go-gitlab/group_ssh_certificates.go | 105 - .../xanzy/go-gitlab/group_variables.go | 218 - .../github.com/xanzy/go-gitlab/group_wikis.go | 204 - vendor/github.com/xanzy/go-gitlab/groups.go | 1177 ------ vendor/github.com/xanzy/go-gitlab/import.go | 266 -- .../xanzy/go-gitlab/instance_clusters.go | 153 - .../xanzy/go-gitlab/instance_variables.go | 186 - vendor/github.com/xanzy/go-gitlab/invites.go | 176 - .../github.com/xanzy/go-gitlab/issue_links.go | 186 - vendor/github.com/xanzy/go-gitlab/issues.go | 791 ---- .../xanzy/go-gitlab/issues_statistics.go | 187 - .../xanzy/go-gitlab/job_token_scope.go | 284 -- vendor/github.com/xanzy/go-gitlab/jobs.go | 585 --- vendor/github.com/xanzy/go-gitlab/keys.go | 97 - vendor/github.com/xanzy/go-gitlab/labels.go | 317 -- vendor/github.com/xanzy/go-gitlab/license.go | 128 - .../xanzy/go-gitlab/license_templates.go | 109 - vendor/github.com/xanzy/go-gitlab/markdown.go | 47 - .../xanzy/go-gitlab/member_roles.go | 144 - .../go-gitlab/merge_request_approvals.go | 440 -- .../xanzy/go-gitlab/merge_requests.go | 1080 ----- .../xanzy/go-gitlab/merge_trains.go | 170 - vendor/github.com/xanzy/go-gitlab/metadata.go | 63 - .../github.com/xanzy/go-gitlab/milestones.go | 272 -- .../github.com/xanzy/go-gitlab/namespaces.go | 174 - vendor/github.com/xanzy/go-gitlab/notes.go | 696 ---- .../xanzy/go-gitlab/notifications.go | 242 -- vendor/github.com/xanzy/go-gitlab/packages.go | 261 -- vendor/github.com/xanzy/go-gitlab/pages.go | 45 - .../xanzy/go-gitlab/pages_domains.go | 216 - .../xanzy/go-gitlab/personal_access_tokens.go | 222 - .../xanzy/go-gitlab/pipeline_schedules.go | 385 -- .../xanzy/go-gitlab/pipeline_triggers.go | 248 -- .../github.com/xanzy/go-gitlab/pipelines.go | 408 -- .../github.com/xanzy/go-gitlab/plan_limits.go | 104 - .../xanzy/go-gitlab/project_access_tokens.go | 200 - .../xanzy/go-gitlab/project_badges.go | 230 - .../xanzy/go-gitlab/project_clusters.go | 236 -- .../xanzy/go-gitlab/project_feature_flags.go | 246 -- .../xanzy/go-gitlab/project_import_export.go | 225 - .../xanzy/go-gitlab/project_iterations.go | 90 - .../go-gitlab/project_managed_licenses.go | 188 - .../xanzy/go-gitlab/project_members.go | 238 -- .../xanzy/go-gitlab/project_mirror.go | 195 - .../project_repository_storage_move.go | 199 - .../xanzy/go-gitlab/project_snippets.go | 209 - .../xanzy/go-gitlab/project_templates.go | 110 - .../xanzy/go-gitlab/project_variables.go | 232 -- .../go-gitlab/project_vulnerabilities.go | 150 - vendor/github.com/xanzy/go-gitlab/projects.go | 2263 ---------- .../xanzy/go-gitlab/protected_branches.go | 257 -- .../xanzy/go-gitlab/protected_environments.go | 282 -- .../xanzy/go-gitlab/protected_tags.go | 176 - .../xanzy/go-gitlab/releaselinks.go | 201 - vendor/github.com/xanzy/go-gitlab/releases.go | 281 -- .../xanzy/go-gitlab/repositories.go | 421 -- .../xanzy/go-gitlab/repository_files.go | 385 -- .../xanzy/go-gitlab/repository_submodules.go | 93 - .../xanzy/go-gitlab/request_options.go | 102 - .../xanzy/go-gitlab/resource_group.go | 165 - .../go-gitlab/resource_iteration_events.go | 122 - .../xanzy/go-gitlab/resource_label_events.go | 220 - .../go-gitlab/resource_milestone_events.go | 155 - .../xanzy/go-gitlab/resource_state_events.go | 154 - .../xanzy/go-gitlab/resource_weight_events.go | 80 - vendor/github.com/xanzy/go-gitlab/runners.go | 597 --- vendor/github.com/xanzy/go-gitlab/search.go | 359 -- vendor/github.com/xanzy/go-gitlab/services.go | 2179 ---------- vendor/github.com/xanzy/go-gitlab/settings.go | 965 ----- .../xanzy/go-gitlab/sidekiq_metrics.go | 157 - .../snippet_repository_storage_move.go | 203 - vendor/github.com/xanzy/go-gitlab/snippets.go | 314 -- vendor/github.com/xanzy/go-gitlab/strings.go | 93 - .../xanzy/go-gitlab/system_hooks.go | 176 - vendor/github.com/xanzy/go-gitlab/tags.go | 248 -- .../github.com/xanzy/go-gitlab/time_stats.go | 180 - vendor/github.com/xanzy/go-gitlab/todos.go | 163 - vendor/github.com/xanzy/go-gitlab/topics.go | 222 - vendor/github.com/xanzy/go-gitlab/types.go | 979 ----- vendor/github.com/xanzy/go-gitlab/users.go | 1591 ------- vendor/github.com/xanzy/go-gitlab/validate.go | 154 - vendor/github.com/xanzy/go-gitlab/version.go | 58 - vendor/github.com/xanzy/go-gitlab/wikis.go | 204 - vendor/github.com/zeebo/errs/AUTHORS | 4 - vendor/github.com/zeebo/errs/README.md | 4 - vendor/github.com/zeebo/errs/errs.go | 131 - vendor/github.com/zeebo/errs/group.go | 20 - .../contrib/detectors/gcp/version.go | 4 - .../google.golang.org/grpc/otelgrpc/config.go | 47 - .../grpc/otelgrpc/stats_handler.go | 29 - .../grpc/otelgrpc/version.go | 4 - .../net/http/otelhttp/client.go | 12 - .../net/http/otelhttp/common.go | 10 - .../net/http/otelhttp/handler.go | 41 - .../internal/request/resp_writer_wrapper.go | 8 - .../net/http/otelhttp/internal/semconv/env.go | 92 - .../otelhttp/internal/semconv/httpconv.go | 76 - .../http/otelhttp/internal/semconv/util.go | 14 - .../http/otelhttp/internal/semconv/v1.20.0.go | 70 - .../net/http/otelhttp/transport.go | 81 - .../net/http/otelhttp/version.go | 4 - vendor/go.opentelemetry.io/otel/.gitignore | 11 - vendor/go.opentelemetry.io/otel/.golangci.yml | 35 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 15 - vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 - .../go.opentelemetry.io/otel/CONTRIBUTING.md | 22 - vendor/go.opentelemetry.io/otel/Makefile | 29 - vendor/go.opentelemetry.io/otel/README.md | 31 - vendor/go.opentelemetry.io/otel/RELEASING.md | 20 - vendor/go.opentelemetry.io/otel/VERSIONING.md | 4 - .../go.opentelemetry.io/otel/attribute/set.go | 42 - .../otel/baggage/baggage.go | 20 - .../go.opentelemetry.io/otel/codes/codes.go | 7 - .../otel/internal/attribute/attribute.go | 36 - .../otel/internal/global/instruments.go | 28 - .../otel/internal/global/meter.go | 206 - .../otel/internal/global/trace.go | 13 - .../otel/internal/rawhelpers.go | 4 - .../otel/metric/asyncfloat64.go | 4 - .../otel/metric/asyncint64.go | 4 - .../otel/metric/instrument.go | 4 - vendor/go.opentelemetry.io/otel/renovate.json | 7 - .../otel/sdk/instrumentation/scope.go | 6 - .../otel/sdk/metric/config.go | 45 - .../otel/sdk/metric/exemplar.go | 52 - .../otel/sdk/metric/instrument.go | 16 - .../metric/internal/aggregate/aggregate.go | 17 - .../sdk/metric/internal/aggregate/exemplar.go | 7 - .../aggregate/exponential_histogram.go | 20 - .../metric/internal/aggregate/histogram.go | 24 - .../metric/internal/aggregate/lastvalue.go | 23 - .../otel/sdk/metric/internal/aggregate/sum.go | 34 - .../otel/sdk/metric/internal/exemplar/doc.go | 6 - .../otel/sdk/metric/internal/exemplar/drop.go | 23 - .../sdk/metric/internal/exemplar/exemplar.go | 29 - .../sdk/metric/internal/exemplar/filter.go | 29 - .../internal/exemplar/filtered_reservoir.go | 49 - .../otel/sdk/metric/internal/exemplar/hist.go | 46 - .../otel/sdk/metric/internal/exemplar/rand.go | 191 - .../sdk/metric/internal/exemplar/reservoir.go | 32 - .../sdk/metric/internal/exemplar/storage.go | 95 - .../sdk/metric/internal/exemplar/value.go | 58 - .../otel/sdk/metric/internal/x/x.go | 36 - .../otel/sdk/metric/manual_reader.go | 12 - .../otel/sdk/metric/meter.go | 112 - .../otel/sdk/metric/periodic_reader.go | 12 - .../otel/sdk/metric/pipeline.go | 144 - .../otel/sdk/metric/provider.go | 13 - .../otel/sdk/metric/version.go | 4 - .../otel/sdk/metric/view.go | 8 - .../otel/sdk/resource/auto.go | 61 - .../otel/sdk/resource/builtin.go | 10 - .../otel/sdk/resource/host_id_windows.go | 12 - .../otel/sdk/resource/os_windows.go | 4 - .../go.opentelemetry.io/otel/sdk/version.go | 4 - .../go.opentelemetry.io/otel/trace/config.go | 4 - .../go.opentelemetry.io/otel/trace/context.go | 4 - vendor/go.opentelemetry.io/otel/trace/doc.go | 4 - .../otel/verify_examples.sh | 74 - vendor/go.opentelemetry.io/otel/version.go | 4 - vendor/go.opentelemetry.io/otel/versions.yaml | 26 - vendor/go.step.sm/crypto/LICENSE | 201 - .../crypto/fingerprint/fingerprint.go | 78 - .../crypto/internal/bcrypt_pbkdf/LICENSE | 27 - .../crypto/internal/bcrypt_pbkdf/README | 22 - .../internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 100 - .../go.step.sm/crypto/internal/emoji/emoji.go | 274 -- vendor/go.step.sm/crypto/internal/utils/io.go | 70 - .../crypto/internal/utils/utfbom/LICENSE | 201 - .../crypto/internal/utils/utfbom/README.md | 66 - .../crypto/internal/utils/utfbom/utfbom.go | 195 - vendor/go.step.sm/crypto/jose/encrypt.go | 135 - vendor/go.step.sm/crypto/jose/generate.go | 204 - vendor/go.step.sm/crypto/jose/options.go | 125 - vendor/go.step.sm/crypto/jose/parse.go | 411 -- vendor/go.step.sm/crypto/jose/types.go | 310 -- vendor/go.step.sm/crypto/jose/validate.go | 221 - vendor/go.step.sm/crypto/jose/x25519.go | 66 - .../go.step.sm/crypto/keyutil/fingerprint.go | 74 - vendor/go.step.sm/crypto/keyutil/key.go | 265 -- vendor/go.step.sm/crypto/pemutil/cosign.go | 79 - vendor/go.step.sm/crypto/pemutil/pem.go | 856 ---- vendor/go.step.sm/crypto/pemutil/pkcs8.go | 353 -- vendor/go.step.sm/crypto/pemutil/ssh.go | 299 -- vendor/go.step.sm/crypto/randutil/random.go | 113 - vendor/go.step.sm/crypto/x25519/x25519.go | 321 -- vendor/golang.org/x/crypto/pkcs12/crypto.go | 4 - vendor/golang.org/x/net/http2/config.go | 4 - vendor/golang.org/x/net/http2/config_go124.go | 4 - vendor/golang.org/x/net/http2/transport.go | 19 - vendor/golang.org/x/oauth2/google/default.go | 6 - .../x/oauth2/google/externalaccount/aws.go | 4 - .../google/externalaccount/basecredentials.go | 16 - vendor/golang.org/x/oauth2/oauth2.go | 4 - .../x/tools/go/analysis/analysis.go | 10 - .../go/analysis/passes/buildtag/buildtag.go | 33 - .../tools/go/analysis/passes/printf/printf.go | 3 - .../go/analysis/passes/structtag/structtag.go | 4 - .../x/tools/go/ast/inspector/inspector.go | 43 - .../x/tools/go/ast/inspector/iter.go | 8 - .../x/tools/go/ast/inspector/typeof.go | 6 - .../x/tools/go/gcexportdata/gcexportdata.go | 20 - .../x/tools/go/packages/external.go | 14 - .../golang.org/x/tools/go/packages/golist.go | 17 - .../x/tools/go/packages/loadmode_string.go | 6 - .../x/tools/go/packages/packages.go | 67 - vendor/golang.org/x/tools/go/ssa/const.go | 51 - vendor/golang.org/x/tools/go/ssa/dom.go | 3 - vendor/golang.org/x/tools/go/ssa/util.go | 17 - .../x/tools/go/types/typeutil/map.go | 250 -- .../internal/analysisinternal/analysis.go | 265 -- .../x/tools/internal/gcimporter/exportdata.go | 91 - .../x/tools/internal/gcimporter/gcimporter.go | 205 - .../x/tools/internal/gcimporter/iimport.go | 8 - .../tools/internal/gcimporter/ureader_yes.go | 12 - .../x/tools/internal/imports/source.go | 4 - .../x/tools/internal/imports/source_env.go | 8 - .../internal/packagesinternal/packages.go | 8 - .../x/tools/internal/stdlib/manifest.go | 227 - .../x/tools/internal/typeparams/common.go | 75 - .../x/tools/internal/typesinternal/recv.go | 3 - .../x/tools/internal/typesinternal/types.go | 3 - .../x/tools/internal/versions/constraint.go | 13 - .../internal/versions/constraint_go121.go | 14 - .../api/googleapi/googleapi.go | 20 - .../iamcredentials/v1/iamcredentials-gen.go | 99 - .../google.golang.org/api/idtoken/idtoken.go | 3 - .../google.golang.org/api/internal/creds.go | 3 - .../api/internal/gensupport/media.go | 3 - .../api/internal/gensupport/resumable.go | 16 - .../api/internal/gensupport/send.go | 6 - .../google.golang.org/api/internal/version.go | 4 - vendor/google.golang.org/api/option/option.go | 9 - .../api/storage/v1/storage-api.json | 11 - .../api/storage/v1/storage-gen.go | 1279 ------ .../api/transport/grpc/dial.go | 24 - .../api/transport/http/dial.go | 34 - .../http/internal/propagation/http.go | 87 - .../googleapis/api/annotations/client.pb.go | 356 -- .../googleapis/api/httpbody/httpbody.pb.go | 9 - .../googleapis/api/metric/metric.pb.go | 84 - .../rpc/errdetails/error_details.pb.go | 68 - .../grpc/balancer/balancer.go | 105 - .../grpclb/grpc_lb_v1/load_balancer.pb.go | 225 - .../grpc/balancer/grpclb/grpclb_picker.go | 12 - .../balancer/leastrequest/leastrequest.go | 4 - .../balancer/pickfirst/internal/internal.go | 7 - .../grpc/balancer/pickfirst/pickfirst.go | 4 - .../pickfirst/pickfirstleaf/pickfirstleaf.go | 241 -- .../grpc/balancer/rls/config.go | 4 - .../grpc/balancer/rls/control_channel.go | 4 - .../rls/internal/adaptive/adaptive.go | 9 - .../grpc/balancer/roundrobin/roundrobin.go | 8 - .../balancer/weightedroundrobin/balancer.go | 405 -- .../balancer/weightedroundrobin/scheduler.go | 19 - .../weightedroundrobin/weightedroundrobin.go | 3 - .../balancer/weightedtarget/weightedtarget.go | 10 - .../grpc/balancer_wrapper.go | 19 - .../grpc_binarylog_v1/binarylog.pb.go | 201 - vendor/google.golang.org/grpc/clientconn.go | 19 - vendor/google.golang.org/grpc/codec.go | 4 - .../internal/proto/grpc_gcp/altscontext.pb.go | 33 - .../internal/proto/grpc_gcp/handshaker.pb.go | 249 -- .../grpc_gcp/transport_security_common.pb.go | 57 - .../grpc/credentials/google/google.go | 28 - .../google.golang.org/grpc/credentials/tls.go | 11 - vendor/google.golang.org/grpc/dialoptions.go | 31 - .../grpc/experimental/stats/metricregistry.go | 38 - .../grpc/experimental/stats/metrics.go | 78 - .../grpc/grpclog/internal/loggerv2.go | 74 - .../grpc/health/grpc_health_v1/health.pb.go | 57 - .../grpc/internal/backoff/backoff.go | 4 - .../grpc/internal/envconfig/envconfig.go | 4 - .../grpc/internal/envconfig/xds.go | 3 - .../grpc/internal/hierarchy/hierarchy.go | 6 - .../grpc/internal/internal.go | 13 - .../internal/proto/grpc_lookup_v1/rls.pb.go | 57 - .../proto/grpc_lookup_v1/rls_config.pb.go | 177 - .../internal/resolver/dns/dns_resolver.go | 45 - .../grpc/internal/transport/flowcontrol.go | 11 - .../grpc/internal/transport/handler_server.go | 51 - .../grpc/internal/transport/http2_client.go | 114 - .../grpc/internal/transport/http2_server.go | 111 - .../grpc/internal/transport/transport.go | 377 -- .../grpc/internal/wrr/random.go | 16 - .../internal/xds/matcher/matcher_header.go | 40 - .../grpc/internal/xds/rbac/matchers.go | 11 - .../grpc/internal/xds/rbac/rbac_engine.go | 3 - .../grpc/mem/buffer_slice.go | 11 - vendor/google.golang.org/grpc/preloader.go | 8 - .../grpc/resolver/resolver.go | 11 - vendor/google.golang.org/grpc/rpc_util.go | 56 - vendor/google.golang.org/grpc/server.go | 136 - .../google.golang.org/grpc/service_config.go | 24 - .../grpc/stats/opentelemetry/LICENSE | 202 - .../stats/opentelemetry/client_metrics.go | 17 - .../grpc/stats/opentelemetry/opentelemetry.go | 59 - .../stats/opentelemetry/server_metrics.go | 14 - vendor/google.golang.org/grpc/stats/stats.go | 79 - vendor/google.golang.org/grpc/stream.go | 113 - vendor/google.golang.org/grpc/version.go | 4 - .../grpc/xds/googledirectpath/googlec2p.go | 28 - .../balancer/clusterimpl/clusterimpl.go | 216 - .../internal/balancer/clusterimpl/picker.go | 18 - .../balancer/clustermanager/clustermanager.go | 13 - .../clusterresolver/clusterresolver.go | 20 - .../balancer/clusterresolver/configbuilder.go | 93 - .../clusterresolver/resource_resolver.go | 8 - .../clusterresolver/resource_resolver_dns.go | 32 - .../balancer/outlierdetection/balancer.go | 116 - .../outlierdetection/subconn_wrapper.go | 28 - .../internal/balancer/priority/balancer.go | 9 - .../internal/balancer/ringhash/ringhash.go | 4 - .../internal/balancer/wrrlocality/balancer.go | 3 - .../xds/internal/httpfilter/fault/fault.go | 8 - .../grpc/xds/internal/internal.go | 3 - .../xds/internal/resolver/serviceconfig.go | 4 - .../xds/internal/resolver/xds_resolver.go | 17 - .../grpc/xds/internal/xdsclient/authority.go | 597 --- .../grpc/xds/internal/xdsclient/client.go | 6 - .../grpc/xds/internal/xdsclient/client_new.go | 58 - .../internal/xdsclient/client_refcounted.go | 39 - .../grpc/xds/internal/xdsclient/clientimpl.go | 64 - .../xdsclient/clientimpl_authority.go | 146 - .../xds/internal/xdsclient/clientimpl_dump.go | 7 - .../xdsclient/clientimpl_loadreport.go | 18 - .../internal/xdsclient/clientimpl_watchers.go | 43 - .../internal/xdsclient/internal/internal.go | 5 - .../grpc/xds/internal/xdsclient/logging.go | 7 - .../xdsclient/transport/internal/internal.go | 25 - .../xdsclient/transport/loadreport.go | 259 -- .../internal/xdsclient/transport/transport.go | 702 ---- .../xdsresource/cluster_resource_type.go | 5 - .../xdsresource/endpoints_resource_type.go | 5 - .../xdsresource/listener_resource_type.go | 5 - .../internal/xdsclient/xdsresource/matcher.go | 12 - .../xdsclient/xdsresource/resource_type.go | 14 - .../xdsresource/route_config_resource_type.go | 5 - .../xdsclient/xdsresource/type_eds.go | 4 - .../xdsclient/xdsresource/unmarshal_eds.go | 14 - vendor/modules.txt | 603 +-- .../release-utils/version/command.go | 6 - .../release-utils/version/version.go | 36 - 1056 files changed, 15 insertions(+), 102818 deletions(-) delete mode 100644 vendor/cloud.google.com/go/auth/httptransport/trace.go delete mode 100644 vendor/filippo.io/edwards25519/LICENSE delete mode 100644 vendor/filippo.io/edwards25519/README.md delete mode 100644 vendor/filippo.io/edwards25519/doc.go delete mode 100644 vendor/filippo.io/edwards25519/edwards25519.go delete mode 100644 vendor/filippo.io/edwards25519/extra.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64.s delete mode 100644 vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64.s delete mode 100644 vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe_extra.go delete mode 100644 vendor/filippo.io/edwards25519/field/fe_generic.go delete mode 100644 vendor/filippo.io/edwards25519/scalar.go delete mode 100644 vendor/filippo.io/edwards25519/scalar_fiat.go delete mode 100644 vendor/filippo.io/edwards25519/scalarmult.go delete mode 100644 vendor/filippo.io/edwards25519/tables.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml delete mode 100644 vendor/github.com/buildkite/agent/v3/api/auth.go delete mode 100644 vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go delete mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go delete mode 100644 vendor/github.com/golangci/modinfo/.gitignore delete mode 100644 vendor/github.com/golangci/modinfo/.golangci.yml delete mode 100644 vendor/github.com/golangci/modinfo/LICENSE delete mode 100644 vendor/github.com/golangci/modinfo/Makefile delete mode 100644 vendor/github.com/golangci/modinfo/module.go delete mode 100644 vendor/github.com/golangci/modinfo/readme.md delete mode 100644 vendor/github.com/google/cel-go/checker/standard.go delete mode 100644 vendor/github.com/in-toto/archivista/pkg/api/client.go delete mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md delete mode 100644 vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go delete mode 100644 vendor/github.com/ultraware/funlen/main.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/.gitignore delete mode 100644 vendor/github.com/xanzy/go-gitlab/.golangci.yml delete mode 100644 vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md delete mode 100644 vendor/github.com/xanzy/go-gitlab/LICENSE delete mode 100644 vendor/github.com/xanzy/go-gitlab/Makefile delete mode 100644 vendor/github.com/xanzy/go-gitlab/README.md delete mode 100644 vendor/github.com/xanzy/go-gitlab/access_requests.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/appearance.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/applications.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/audit_events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/avatar.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/award_emojis.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/boards.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/branches.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/broadcast_messages.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/client_options.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/cluster_agents.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/commits.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/container_registry.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/custom_attributes.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/deploy_keys.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/deploy_tokens.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/deployments.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/discussions.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/dora_metrics.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/draft_notes.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/environments.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/epic_issues.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/epics.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/error_tracking.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/event_parsing.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/event_webhook_types.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/external_status_checks.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/feature_flags.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/freeze_periods.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/generic_packages.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/geo_nodes.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/gitignore_templates.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/gitlab.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_access_tokens.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_badges.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_boards.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_clusters.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_epic_boards.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_hooks.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_import_export.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_iterations.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_labels.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_members.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_milestones.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_protected_environments.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_variables.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/group_wikis.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/groups.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/import.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/instance_clusters.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/instance_variables.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/invites.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/issue_links.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/issues.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/issues_statistics.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/job_token_scope.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/jobs.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/keys.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/labels.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/license.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/license_templates.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/markdown.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/member_roles.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/merge_requests.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/merge_trains.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/metadata.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/milestones.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/namespaces.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/notes.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/notifications.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/packages.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/pages.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/pages_domains.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/pipelines.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/plan_limits.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_access_tokens.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_badges.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_clusters.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_feature_flags.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_import_export.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_iterations.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_members.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_mirror.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_snippets.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_templates.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_variables.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/projects.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/protected_branches.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/protected_environments.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/protected_tags.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/releaselinks.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/releases.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/repositories.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/repository_files.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/repository_submodules.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/request_options.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/resource_group.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/resource_label_events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/resource_state_events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/resource_weight_events.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/runners.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/search.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/services.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/settings.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/snippets.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/strings.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/system_hooks.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/tags.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/time_stats.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/todos.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/topics.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/types.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/users.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/validate.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/version.go delete mode 100644 vendor/github.com/xanzy/go-gitlab/wikis.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh delete mode 100644 vendor/go.step.sm/crypto/LICENSE delete mode 100644 vendor/go.step.sm/crypto/fingerprint/fingerprint.go delete mode 100644 vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE delete mode 100644 vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README delete mode 100644 vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go delete mode 100644 vendor/go.step.sm/crypto/internal/emoji/emoji.go delete mode 100644 vendor/go.step.sm/crypto/internal/utils/io.go delete mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE delete mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/README.md delete mode 100644 vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go delete mode 100644 vendor/go.step.sm/crypto/jose/encrypt.go delete mode 100644 vendor/go.step.sm/crypto/jose/generate.go delete mode 100644 vendor/go.step.sm/crypto/jose/options.go delete mode 100644 vendor/go.step.sm/crypto/jose/parse.go delete mode 100644 vendor/go.step.sm/crypto/jose/types.go delete mode 100644 vendor/go.step.sm/crypto/jose/validate.go delete mode 100644 vendor/go.step.sm/crypto/jose/x25519.go delete mode 100644 vendor/go.step.sm/crypto/keyutil/fingerprint.go delete mode 100644 vendor/go.step.sm/crypto/keyutil/key.go delete mode 100644 vendor/go.step.sm/crypto/pemutil/cosign.go delete mode 100644 vendor/go.step.sm/crypto/pemutil/pem.go delete mode 100644 vendor/go.step.sm/crypto/pemutil/pkcs8.go delete mode 100644 vendor/go.step.sm/crypto/pemutil/ssh.go delete mode 100644 vendor/go.step.sm/crypto/randutil/random.go delete mode 100644 vendor/go.step.sm/crypto/x25519/x25519.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/constraint_go121.go delete mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go delete mode 100644 vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE delete mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go delete mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go delete mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go diff --git a/go.mod b/go.mod index dd6f078de0..8219735deb 100644 --- a/go.mod +++ b/go.mod @@ -5,17 +5,8 @@ go 1.23.4 toolchain go1.23.6 require ( -<<<<<<< HEAD cloud.google.com/go/compute/metadata v0.6.0 cloud.google.com/go/storage v1.50.0 -======= - cloud.google.com/go/compute/metadata v0.5.2 - cloud.google.com/go/storage v1.48.0 -<<<<<<< HEAD - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ->>>>>>> 3b9297a78 ([WIP] uploads to archivista) -======= ->>>>>>> 06c2f6a79 ([WIP] add archivista storage backend) github.com/fsnotify/fsnotify v1.8.0 github.com/golangci/golangci-lint v1.63.4 github.com/google/addlicense v1.1.1 @@ -25,7 +16,7 @@ require ( github.com/google/go-licenses v1.6.0 github.com/grafeas/grafeas v0.2.3 github.com/hashicorp/go-multierror v1.1.1 - github.com/in-toto/archivista v0.5.4 + github.com/in-toto/archivista v0.9.0 github.com/in-toto/attestation v1.1.0 github.com/in-toto/go-witness v0.7.0 github.com/in-toto/in-toto-golang v0.9.1-0.20240317085821-8e2966059a09 diff --git a/go.sum b/go.sum index 72862c71cb..35efdba315 100644 --- a/go.sum +++ b/go.sum @@ -865,8 +865,8 @@ github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/archivista v0.8.0 h1:l8zb28HdKCuzEWWGtOksRCoeibOuyh7DG/1ao6O7/TE= -github.com/in-toto/archivista v0.8.0/go.mod h1:uXFghXRS0PTLeJXsnhnABc40ruDLzjhMSAg0zLgLzGo= +github.com/in-toto/archivista v0.9.0 h1:XlS+jkrcFjmwSMhp6BZbP5y8FOvFPXM1h23WvCDT8bQ= +github.com/in-toto/archivista v0.9.0/go.mod h1:cLhrICj86j+8wJZmrUzDbNQdcwdc2lqX+v1SKV4tXpE= github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= github.com/in-toto/go-witness v0.7.0 h1:I48FUCLfyos0uCSlHJoqCJO6HjtxF2f/y65TQVpxd8k= diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion index 56b46a3ad2..26bc914a3b 100644 --- a/vendor/cel.dev/expr/.bazelversion +++ b/vendor/cel.dev/expr/.bazelversion @@ -1,6 +1,2 @@ -<<<<<<< HEAD 7.0.1 -======= -6.4.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Keep this pinned version in parity with cel-go diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore index 0cae1c6dd0..0d4fed27c9 100644 --- a/vendor/cel.dev/expr/.gitignore +++ b/vendor/cel.dev/expr/.gitignore @@ -1,5 +1,2 @@ bazel-* -<<<<<<< HEAD MODULE.bazel.lock -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel index 701c29c88c..37d8adc950 100644 --- a/vendor/cel.dev/expr/BUILD.bazel +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -16,11 +16,7 @@ go_library( importpath = "cel.dev/expr", visibility = ["//visibility:public"], deps = [ -<<<<<<< HEAD "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", -======= - "//proto/cel/expr:google_rpc_status_go_proto", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//runtime/protoimpl", "@org_golang_google_protobuf//types/known/anypb", diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md index 542619e17c..7930c0b755 100644 --- a/vendor/cel.dev/expr/README.md +++ b/vendor/cel.dev/expr/README.md @@ -33,12 +33,7 @@ The required components of a system that supports CEL are: * The textual representation of an expression as written by a developer. It is of similar syntax to expressions in C/C++/Java/JavaScript -<<<<<<< HEAD * A representation of the program's abstract syntax tree (AST). -======= -* A binary representation of an expression. It is an abstract syntax tree - (AST). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) * A compiler library that converts the textual representation to the binary representation. This can be done ahead of time (in the control plane) or just before evaluation (in the data plane). @@ -47,7 +42,6 @@ The required components of a system that supports CEL are: * An evaluator library that takes the binary format in the context and produces a result, usually a Boolean. -<<<<<<< HEAD For use cases which require persistence or cross-process communcation, it is highly recommended to serialize the type-checked expression as a protocol buffer. The CEL team will maintains canonical protocol buffers for ASTs and @@ -57,8 +51,6 @@ will keep these versions identical and wire-compatible in perpetuity: * [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Example of boolean conditions and object construction: ``` c diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE index 441f6e8a0d..b6dc9ed673 100644 --- a/vendor/cel.dev/expr/WORKSPACE +++ b/vendor/cel.dev/expr/WORKSPACE @@ -27,7 +27,6 @@ http_archive( ], ) -<<<<<<< HEAD # googleapis as of 09/16/2024 http_archive( name = "com_google_googleapis", @@ -35,15 +34,6 @@ http_archive( sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8", urls = [ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz", -======= -# googleapis as of 05/26/2023 -http_archive( - name = "com_google_googleapis", - strip_prefix = "googleapis-07c27163ac591955d736f3057b1619ece66f5b99", - sha256 = "bd8e735d881fb829751ecb1a77038dda4a8d274c45490cb9fcf004583ee10571", - urls = [ - "https://github.com/googleapis/googleapis/archive/07c27163ac591955d736f3057b1619ece66f5b99.tar.gz", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ], ) @@ -105,39 +95,22 @@ switched_rules_by_language( # Do *not* call *_dependencies(), etc, yet. See comment at the end. # Generated Google APIs protos for Golang -<<<<<<< HEAD # Generated Google APIs protos for Golang 08/26/2024 -======= -# Generated Google APIs protos for Golang 05/25/2023 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go_repository( name = "org_golang_google_genproto_googleapis_api", build_file_proto_mode = "disable_global", importpath = "google.golang.org/genproto/googleapis/api", -<<<<<<< HEAD sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=", version = "v0.0.0-20240826202546-f6391c0de4c7", ) # Generated Google APIs protos for Golang 08/26/2024 -======= - sum = "h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=", - version = "v0.0.0-20230525234035-dd9d682886f9", -) - -# Generated Google APIs protos for Golang 05/25/2023 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go_repository( name = "org_golang_google_genproto_googleapis_rpc", build_file_proto_mode = "disable_global", importpath = "google.golang.org/genproto/googleapis/rpc", -<<<<<<< HEAD sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=", version = "v0.0.0-20240826202546-f6391c0de4c7", -======= - sum = "h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=", - version = "v0.0.0-20230525234030-28d5490b6b19", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) # gRPC deps diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml index 6224a7eeca..c40881f122 100644 --- a/vendor/cel.dev/expr/cloudbuild.yaml +++ b/vendor/cel.dev/expr/cloudbuild.yaml @@ -1,15 +1,8 @@ steps: -<<<<<<< HEAD - name: 'gcr.io/cloud-builders/bazel:7.0.1' entrypoint: bazel args: ['build', '...'] id: bazel-build -======= -- name: 'gcr.io/cloud-builders/bazel:6.4.0' - entrypoint: bazel - args: ['test', '--test_output=errors', '...'] - id: bazel-test ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) waitFor: ['-'] timeout: 15m options: diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh index 3d0fe8217e..fdcbb3ce25 100644 --- a/vendor/cel.dev/expr/regen_go_proto.sh +++ b/vendor/cel.dev/expr/regen_go_proto.sh @@ -1,17 +1,9 @@ #!/bin/sh -<<<<<<< HEAD bazel build //proto/cel/expr/conformance/... files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) for src in ${files[@]}; do dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/') -======= -bazel build //proto/test/... -files=($(bazel aquery 'kind(proto, //proto/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) -for src in ${files[@]}; -do - dst=$(echo $src | sed 's/\(.*\%\/github.com\/google\/cel-spec\/\(.*\)\)/\2/') ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) echo "copying $dst" $(cp $src $dst) done diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index b01854d82f..466426c0d8 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,6 +1,5 @@ # Changelog -<<<<<<< HEAD ## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08) @@ -47,8 +46,6 @@ * **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 0bf3eeb11e..cd5e988684 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -24,10 +24,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strings" @@ -36,10 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -498,14 +492,11 @@ type Options2LO struct { // UseIDToken requests that the token returned be an ID token if one is // returned from the server. Optional. UseIDToken bool -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options2LO) client() *http.Client { @@ -536,20 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { if err := opts.validate(); err != nil { return nil, err } -<<<<<<< HEAD return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil -======= - return tokenProvider2LO{opts: opts, Client: opts.client()}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type tokenProvider2LO struct { opts *Options2LO Client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { @@ -584,18 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") -<<<<<<< HEAD tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } -<<<<<<< HEAD tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 70c496133a..8afd0472ea 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,16 +37,11 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -<<<<<<< HEAD func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { return auth.NewCachedTokenProvider(&computeProvider{ scopes: opts.Scopes, client: client, }, &auth.CachedTokenProviderOptions{ -======= -func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExpireEarly: opts.EarlyTokenRefresh, DisableAsyncRefresh: opts.DisableAsyncRefresh, }) @@ -55,10 +50,7 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { // computeProvider fetches tokens from the google cloud metadata service. type computeProvider struct { scopes []string -<<<<<<< HEAD client *metadata.Client -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type metadataTokenResp struct { @@ -67,11 +59,7 @@ type metadataTokenResp struct { TokenType string `json:"token_type"` } -<<<<<<< HEAD func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { -======= -func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tokenURI, err := url.Parse(computeTokenURI) if err != nil { return nil, err @@ -81,11 +69,7 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("scopes", strings.Join(cs.scopes, ",")) tokenURI.RawQuery = v.Encode() } -<<<<<<< HEAD tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) -======= - tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index 4865d7ddeb..a1b5a93188 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -19,10 +19,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" "time" @@ -31,10 +28,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/compute/metadata" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -104,7 +98,6 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { } if OnGCE() { -<<<<<<< HEAD metadataClient := metadata.NewWithOptions(&metadata.Options{ Logger: opts.logger(), }) @@ -116,14 +109,6 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ MetadataClient: metadataClient, }, -======= - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { - return metadata.ProjectIDWithContext(ctx) - }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }), nil } @@ -180,14 +165,11 @@ type DetectOptions struct { // The default value is "googleapis.com". This option is ignored for // authentication flows that do not support universe domain. Optional. UniverseDomain string -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *DetectOptions) validate() error { @@ -223,13 +205,10 @@ func (o *DetectOptions) client() *http.Client { return internal.DefaultClient() } -<<<<<<< HEAD func (o *DetectOptions) logger() *slog.Logger { return internallog.New(o.Logger) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { b, err := os.ReadFile(filename) if err != nil { @@ -290,10 +269,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { AuthURL: c.AuthURI, TokenURL: c.TokenURI, Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EarlyTokenExpiry: opts.EarlyTokenRefresh, AuthHandlerOpts: handleOpts, // TODO(codyoss): refactor this out. We need to add in auto-detection diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index 91ccfbc6c9..e5243e6cfb 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -141,10 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) TokenURL: f.TokenURL, Subject: opts.Subject, Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -163,10 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return auth.New3LOTokenProvider(opts3LO) } @@ -185,10 +179,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { @@ -207,10 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU ClientSecret: f.ClientSecret, Scopes: opts.scopes(), Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return externalaccountuser.NewTokenProvider(externalOpts) } @@ -230,10 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil Tp: tp, Delegates: f.Delegates, Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } @@ -241,9 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO return gdch.NewTokenProvider(f, &gdch.Options{ STSAudience: opts.STSAudience, Client: opts.client(), -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go index 7574d53f90..2fbbdb8072 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go @@ -18,10 +18,7 @@ import ( "context" "encoding/json" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "strconv" "strings" @@ -29,10 +26,7 @@ import ( "time" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) type cachingClient struct { @@ -42,7 +36,6 @@ type cachingClient struct { // If nil, time.Now is used. clock func() time.Time -<<<<<<< HEAD mu sync.Mutex certs map[string]*cachedResponse logger *slog.Logger @@ -53,16 +46,6 @@ func newCachingClient(client *http.Client, logger *slog.Logger) *cachingClient { client: client, certs: make(map[string]*cachedResponse, 2), logger: logger, -======= - mu sync.Mutex - certs map[string]*cachedResponse -} - -func newCachingClient(client *http.Client) *cachingClient { - return &cachingClient{ - client: client, - certs: make(map[string]*cachedResponse, 2), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -79,18 +62,12 @@ func (c *cachingClient) getCert(ctx context.Context, url string) (*certResponse, if err != nil { return nil, err } -<<<<<<< HEAD c.logger.DebugContext(ctx, "cert request", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(c.client, req) if err != nil { return nil, err } -<<<<<<< HEAD c.logger.DebugContext(ctx, "cert response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("idtoken: unable to retrieve cert, got status code %d", resp.StatusCode) } diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go index a13eeb04f7..86f6cd77ac 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go @@ -23,10 +23,7 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" "cloud.google.com/go/compute/metadata" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const identitySuffix = "instance/service-accounts/default/identity" @@ -38,7 +35,6 @@ func computeCredentials(opts *Options) (*auth.Credentials, error) { if opts.CustomClaims != nil { return nil, fmt.Errorf("idtoken: Options.CustomClaims can't be used with the metadata service, please provide a service account if you would like to use this feature") } -<<<<<<< HEAD metadataClient := metadata.NewWithOptions(&metadata.Options{ Logger: internallog.New(opts.Logger), }) @@ -46,46 +42,27 @@ func computeCredentials(opts *Options) (*auth.Credentials, error) { audience: opts.Audience, format: opts.ComputeTokenFormat, client: metadataClient, -======= - tp := computeIDTokenProvider{ - audience: opts.Audience, - format: opts.ComputeTokenFormat, - client: *metadata.NewClient(opts.client()), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: 5 * time.Minute, }), ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { -<<<<<<< HEAD return metadataClient.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ MetadataClient: metadataClient, }, -======= - return metadata.ProjectIDWithContext(ctx) - }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }), nil } type computeIDTokenProvider struct { audience string format ComputeTokenFormat -<<<<<<< HEAD client *metadata.Client } func (c *computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { -======= - client metadata.Client -} - -func (c computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v := url.Values{} v.Set("audience", c.audience) if c.format != ComputeTokenFormatStandard { diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go index 06a7b77fb4..87fab751fb 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go @@ -22,15 +22,10 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/impersonate" -<<<<<<< HEAD intimpersonate "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "github.com/googleapis/gax-go/v2/internallog" -======= - "cloud.google.com/go/auth/internal" - "cloud.google.com/go/auth/internal/credsfile" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -50,7 +45,6 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials if err != nil { return nil, err } -<<<<<<< HEAD var tp auth.TokenProvider if resolveUniverseDomain(f) == internal.DefaultUniverseDomain { tp, err = new2LOTokenProvider(f, opts) @@ -69,44 +63,13 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials Audience: opts.Audience, }, } -======= - opts2LO := &auth.Options2LO{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - TokenURL: f.TokenURL, - UseIDToken: true, - } - if opts2LO.TokenURL == "" { - opts2LO.TokenURL = jwtTokenURL - } - - var customClaims map[string]interface{} - if opts != nil { - customClaims = opts.CustomClaims - } - if customClaims == nil { - customClaims = make(map[string]interface{}) - } - customClaims["target_audience"] = opts.Audience - - opts2LO.PrivateClaims = customClaims - tp, err := auth.New2LOTokenProvider(opts2LO) - if err != nil { - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tp = auth.NewCachedTokenProvider(tp, nil) return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: tp, JSON: b, -<<<<<<< HEAD ProjectIDProvider: auth.CredentialsPropertyFunc(creds.ProjectID), UniverseDomainProvider: auth.CredentialsPropertyFunc(creds.UniverseDomain), -======= - ProjectIDProvider: internal.StaticCredentialsProperty(f.ProjectID), - UniverseDomainProvider: internal.StaticCredentialsProperty(f.UniverseDomain), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }), nil case credsfile.ImpersonatedServiceAccountKey, credsfile.ExternalAccountKey: type url struct { @@ -118,20 +81,13 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials } account := filepath.Base(accountURL.ServiceAccountImpersonationURL) account = strings.Split(account, ":")[0] -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) config := impersonate.IDTokenOptions{ Audience: opts.Audience, TargetPrincipal: account, IncludeEmail: true, Client: opts.client(), Credentials: creds, -<<<<<<< HEAD Logger: internallog.New(opts.Logger), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } idTokenCreds, err := impersonate.NewIDTokenCredentials(&config) if err != nil { @@ -148,7 +104,6 @@ func credsFromDefault(creds *auth.Credentials, opts *Options) (*auth.Credentials return nil, fmt.Errorf("idtoken: unsupported credentials type: %v", t) } } -<<<<<<< HEAD func new2LOTokenProvider(f *credsfile.ServiceAccountFile, opts *Options) (auth.TokenProvider, error) { opts2LO := &auth.Options2LO{ @@ -185,5 +140,3 @@ func resolveUniverseDomain(f *credsfile.ServiceAccountFile) string { } return internal.DefaultUniverseDomain } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go index 0e03107bf1..86db9525df 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go @@ -22,10 +22,7 @@ package idtoken import ( "errors" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" @@ -89,7 +86,6 @@ type Options struct { // when fetching tokens. If provided this should be a fully-authenticated // client. Optional. Client *http.Client -<<<<<<< HEAD // UniverseDomain is the default service domain for a given Cloud universe. // The default value is "googleapis.com". This is the universe domain // configured for the client, which will be compared to the universe domain @@ -100,8 +96,6 @@ type Options struct { // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options) client() *http.Client { diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go index 0d3da7b89e..c8175a6a8e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go @@ -24,10 +24,7 @@ import ( "encoding/base64" "encoding/json" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math/big" "net/http" "strings" @@ -35,7 +32,6 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" ) @@ -49,18 +45,6 @@ const ( var ( defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient(), internallog.New(nil))} -======= -) - -const ( - es256KeySize int = 32 - googleIAPCertsURL string = "https://www.gstatic.com/iap/verify/public_key-jwk" - googleSACertsURL string = "https://www.googleapis.com/oauth2/v3/certs" -) - -var ( - defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient())} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // now aliases time.Now for testing. now = time.Now ) @@ -87,20 +71,15 @@ type jwk struct { // Validator provides a way to validate Google ID Tokens type Validator struct { -<<<<<<< HEAD client *cachingClient rs256URL string es256URL string -======= - client *cachingClient ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ValidatorOptions provides a way to configure a [Validator]. type ValidatorOptions struct { // Client used to make requests to the certs URL. Optional. Client *http.Client -<<<<<<< HEAD // Custom certs URL for RS256 JWK to be used. If not provided, the default // Google oauth2 endpoint will be used. Optional. RS256CertsURL string @@ -112,14 +91,11 @@ type ValidatorOptions struct { // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // Logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewValidator creates a Validator that uses the options provided to configure // a the internal http.Client that will be used to make requests to fetch JWKs. func NewValidator(opts *ValidatorOptions) (*Validator, error) { -<<<<<<< HEAD if opts == nil { opts = &ValidatorOptions{} } @@ -131,15 +107,6 @@ func NewValidator(opts *ValidatorOptions) (*Validator, error) { es256URL := opts.ES256CertsURL logger := internallog.New(opts.Logger) return &Validator{client: newCachingClient(client, logger), rs256URL: rs256URL, es256URL: es256URL}, nil -======= - var client *http.Client - if opts != nil && opts.Client != nil { - client = opts.Client - } else { - client = internal.DefaultClient() - } - return &Validator{client: newCachingClient(client)}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Validate is used to validate the provided idToken with a known Google cert @@ -194,11 +161,7 @@ func (v *Validator) validate(ctx context.Context, idToken string, audience strin if err := v.validateRS256(ctx, header.KeyID, hashedContent, sig); err != nil { return nil, err } -<<<<<<< HEAD case jwt.HeaderAlgES256: -======= - case "ES256": ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := v.validateES256(ctx, header.KeyID, hashedContent, sig); err != nil { return nil, err } @@ -210,11 +173,7 @@ func (v *Validator) validate(ctx context.Context, idToken string, audience strin } func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { -<<<<<<< HEAD certResp, err := v.client.getCert(ctx, v.rs256CertsURL()) -======= - certResp, err := v.client.getCert(ctx, googleSACertsURL) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -238,7 +197,6 @@ func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedConte return rsa.VerifyPKCS1v15(pk, crypto.SHA256, hashedContent, sig) } -<<<<<<< HEAD func (v *Validator) rs256CertsURL() string { if v.rs256URL == "" { return googleSACertsURL @@ -248,10 +206,6 @@ func (v *Validator) rs256CertsURL() string { func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { certResp, err := v.client.getCert(ctx, v.es256CertsURL()) -======= -func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { - certResp, err := v.client.getCert(ctx, googleIAPCertsURL) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -281,7 +235,6 @@ func (v *Validator) validateES256(ctx context.Context, keyID string, hashedConte return nil } -<<<<<<< HEAD func (v *Validator) es256CertsURL() string { if v.es256URL == "" { return googleIAPCertsURL @@ -289,8 +242,6 @@ func (v *Validator) es256CertsURL() string { return v.es256URL } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func findMatchingKey(response *certResponse, keyID string) (*jwk, error) { if response == nil { return nil, fmt.Errorf("idtoken: cert response is nil") diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go index 5f873adeaf..6c470822bd 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go @@ -15,7 +15,6 @@ package impersonate import ( -<<<<<<< HEAD "errors" "log/slog" "net/http" @@ -26,20 +25,6 @@ import ( "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/internal" "github.com/googleapis/gax-go/v2/internallog" -======= - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "time" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/credentials" - "cloud.google.com/go/auth/httptransport" - "cloud.google.com/go/auth/internal" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // IDTokenOptions for generating an impersonated ID token. @@ -68,7 +53,6 @@ type IDTokenOptions struct { // when fetching tokens. If provided this should be a fully-authenticated // client. Optional. Client *http.Client -<<<<<<< HEAD // UniverseDomain is the default service domain for a given Cloud universe. // The default value is "googleapis.com". This is the universe domain // configured for the client, which will be compared to the universe domain @@ -79,8 +63,6 @@ type IDTokenOptions struct { // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *IDTokenOptions) validate() error { @@ -109,7 +91,6 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { if err := opts.validate(); err != nil { return nil, err } -<<<<<<< HEAD client := opts.Client creds := opts.Credentials logger := internallog.New(opts.Logger) @@ -120,38 +101,21 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { Scopes: []string{defaultScope}, UseSelfSignedJWT: true, Logger: logger, -======= - - client := opts.Client - creds := opts.Credentials - if client == nil { - var err error - if creds == nil { - // TODO: test not signed jwt more - creds, err = credentials.DetectDefault(&credentials.DetectOptions{ - Scopes: []string{defaultScope}, - UseSelfSignedJWT: true, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err } } client, err = httptransport.NewClient(&httptransport.Options{ -<<<<<<< HEAD Credentials: creds, UniverseDomain: opts.UniverseDomain, Logger: logger, -======= - Credentials: creds, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err } } -<<<<<<< HEAD universeDomainProvider := resolveUniverseDomainProvider(creds) var delegates []string for _, v := range opts.Delegates { @@ -175,79 +139,3 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { UniverseDomainProvider: universeDomainProvider, }), nil } -======= - itp := impersonatedIDTokenProvider{ - client: client, - targetPrincipal: opts.TargetPrincipal, - audience: opts.Audience, - includeEmail: opts.IncludeEmail, - } - for _, v := range opts.Delegates { - itp.delegates = append(itp.delegates, formatIAMServiceAccountName(v)) - } - - var udp auth.CredentialsPropertyProvider - if creds != nil { - udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) - } - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: auth.NewCachedTokenProvider(itp, nil), - UniverseDomainProvider: udp, - }), nil -} - -type generateIDTokenRequest struct { - Audience string `json:"audience"` - IncludeEmail bool `json:"includeEmail"` - Delegates []string `json:"delegates,omitempty"` -} - -type generateIDTokenResponse struct { - Token string `json:"token"` -} - -type impersonatedIDTokenProvider struct { - client *http.Client - - targetPrincipal string - audience string - includeEmail bool - delegates []string -} - -func (i impersonatedIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { - genIDTokenReq := generateIDTokenRequest{ - Audience: i.audience, - IncludeEmail: i.includeEmail, - Delegates: i.delegates, - } - bodyBytes, err := json.Marshal(genIDTokenReq) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) - } - - url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - resp, body, err := internal.DoRequest(i.client, req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var generateIDTokenResp generateIDTokenResponse - if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) - } - return &auth.Token{ - Value: generateIDTokenResp.Token, - // Generated ID tokens are good for one hour. - Expiry: time.Now().Add(1 * time.Hour), - }, nil -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go index 16d66f8d44..7d8efd54ef 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go @@ -20,10 +20,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "strings" "time" @@ -32,18 +29,11 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( universeDomainPlaceholder = "UNIVERSE_DOMAIN" -<<<<<<< HEAD -======= - iamCredentialsEndpoint = "https://iamcredentials.googleapis.com" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN" oauth2Endpoint = "https://oauth2.googleapis.com" errMissingTargetPrincipal = errors.New("impersonate: target service account must be provided") @@ -76,31 +66,21 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { client := opts.Client creds := opts.Credentials -<<<<<<< HEAD logger := internallog.New(opts.Logger) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if client == nil { var err error if creds == nil { creds, err = credentials.DetectDefault(&credentials.DetectOptions{ Scopes: []string{defaultScope}, UseSelfSignedJWT: true, -<<<<<<< HEAD Logger: logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err } } -<<<<<<< HEAD client, err = httptransport.NewClient(transportOpts(opts, creds, logger)) -======= - client, err = httptransport.NewClient(transportOpts(opts, creds)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -125,16 +105,10 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { targetPrincipal: opts.TargetPrincipal, lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), universeDomainProvider: universeDomainProvider, -<<<<<<< HEAD logger: logger, } for _, v := range opts.Delegates { its.delegates = append(its.delegates, internal.FormatIAMServiceAccountResource(v)) -======= - } - for _, v := range opts.Delegates { - its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } its.scopes = make([]string, len(opts.Scopes)) copy(its.scopes, opts.Scopes) @@ -156,16 +130,10 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { // is provided, it will be used in the transport for a validation ensuring that it // matches the universe domain in the base credentials. If opts.UniverseDomain // is not provided, this validation will be skipped. -<<<<<<< HEAD func transportOpts(opts *CredentialsOptions, creds *auth.Credentials, logger *slog.Logger) *httptransport.Options { tOpts := &httptransport.Options{ Credentials: creds, Logger: logger, -======= -func transportOpts(opts *CredentialsOptions, creds *auth.Credentials) *httptransport.Options { - tOpts := &httptransport.Options{ - Credentials: creds, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts.UniverseDomain == "" { tOpts.InternalOptions = &httptransport.InternalOptions{ @@ -223,14 +191,11 @@ type CredentialsOptions struct { // This field has no default value, and only if provided will it be used to // verify the universe domain from the credentials. Optional. UniverseDomain string -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *CredentialsOptions) validate() error { @@ -249,13 +214,6 @@ func (o *CredentialsOptions) validate() error { return nil } -<<<<<<< HEAD -======= -func formatIAMServiceAccountName(name string) string { - return fmt.Sprintf("projects/-/serviceAccounts/%s", name) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type generateAccessTokenRequest struct { Delegates []string `json:"delegates,omitempty"` Lifetime string `json:"lifetime,omitempty"` @@ -268,15 +226,10 @@ type generateAccessTokenResponse struct { } type impersonatedTokenProvider struct { -<<<<<<< HEAD client *http.Client // universeDomain is used for endpoint construction. universeDomainProvider auth.CredentialsPropertyProvider logger *slog.Logger -======= - client *http.Client - universeDomainProvider auth.CredentialsPropertyProvider ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) targetPrincipal string lifetime string @@ -300,28 +253,18 @@ func (i impersonatedTokenProvider) Token(ctx context.Context) (*auth.Token, erro return nil, err } endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1) -<<<<<<< HEAD url := fmt.Sprintf("%s/v1/%s:generateAccessToken", endpoint, internal.FormatIAMServiceAccountResource(i.targetPrincipal)) -======= - url := fmt.Sprintf("%s/v1/%s:generateAccessToken", endpoint, formatIAMServiceAccountName(i.targetPrincipal)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(b)) if err != nil { return nil, fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") -<<<<<<< HEAD i.logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(i.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to generate access token: %w", err) } -<<<<<<< HEAD i.logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go index e611011dfc..e5e1d65028 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go @@ -20,10 +20,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strings" @@ -31,14 +28,11 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" ) var ( iamCredentialsEndpoint = "https://iamcredentials.googleapis.com" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // user provides an auth flow for domain-wide delegation, setting @@ -53,18 +47,11 @@ func user(opts *CredentialsOptions, client *http.Client, lifetime time.Duration, subject: opts.Subject, lifetime: lifetime, universeDomainProvider: universeDomainProvider, -<<<<<<< HEAD logger: internallog.New(opts.Logger), } u.delegates = make([]string, len(opts.Delegates)) for i, v := range opts.Delegates { u.delegates[i] = internal.FormatIAMServiceAccountResource(v) -======= - } - u.delegates = make([]string, len(opts.Delegates)) - for i, v := range opts.Delegates { - u.delegates[i] = formatIAMServiceAccountName(v) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } u.scopes = make([]string, len(opts.Scopes)) copy(u.scopes, opts.Scopes) @@ -108,10 +95,7 @@ type exchangeTokenResponse struct { type userTokenProvider struct { client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) targetPrincipal string subject string @@ -163,28 +147,18 @@ func (u userTokenProvider) signJWT(ctx context.Context) (string, error) { if err != nil { return "", fmt.Errorf("impersonate: unable to marshal request: %w", err) } -<<<<<<< HEAD reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, internal.FormatIAMServiceAccountResource(u.targetPrincipal)) -======= - reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := http.NewRequestWithContext(ctx, "POST", reqURL, bytes.NewReader(bodyBytes)) if err != nil { return "", fmt.Errorf("impersonate: unable to create request: %w", err) } req.Header.Set("Content-Type", "application/json") -<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user sign JWT request", "request", internallog.HTTPRequest(req, bodyBytes)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(u.client, req) if err != nil { return "", fmt.Errorf("impersonate: unable to sign JWT: %w", err) } -<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user sign JWT response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < 200 || c > 299 { return "", fmt.Errorf("impersonate: status code %d: %s", c, body) } @@ -205,18 +179,12 @@ func (u userTokenProvider) exchangeToken(ctx context.Context, signedJWT string) if err != nil { return nil, err } -<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user token exchange request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(u.client, req) if err != nil { return nil, fmt.Errorf("impersonate: unable to exchange token: %w", err) } -<<<<<<< HEAD u.logger.DebugContext(ctx, "impersonated user token exchange response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a013095f4c..9ecd1f64bd 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -23,10 +23,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "os" @@ -36,10 +33,7 @@ import ( "time" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( @@ -95,10 +89,7 @@ type awsSubjectProvider struct { reqOpts *RequestOptions Client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -204,18 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } @@ -245,18 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } @@ -311,18 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) } @@ -344,18 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } -<<<<<<< HEAD sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index d31747e343..a822064234 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -18,10 +18,7 @@ import ( "context" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "regexp" "strconv" @@ -32,10 +29,7 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal/credsfile" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -112,14 +106,11 @@ type Options struct { // This is important for X509 credentials which should create a new client if the default was used // but should respect a client explicitly passed in by the user. IsDefaultClient bool -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -240,10 +231,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { return nil, err } opts.resolveTokenURL() -<<<<<<< HEAD logger := internallog.New(opts.Logger) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stp, err := newSubjectTokenProvider(opts) if err != nil { return nil, err @@ -258,10 +246,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { client: client, opts: opts, stp: stp, -<<<<<<< HEAD logger: logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts.ServiceAccountImpersonationURL == "" { @@ -278,10 +263,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, -<<<<<<< HEAD Logger: logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err @@ -297,10 +279,7 @@ type subjectTokenProvider interface { // tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. type tokenProvider struct { client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) opts *Options stp subjectTokenProvider } @@ -342,10 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { Authentication: clientAuth, Headers: header, ExtraOpts: options, -<<<<<<< HEAD Logger: tp.logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err @@ -366,20 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { // newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a // subjectTokenProvider func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { -<<<<<<< HEAD logger := internallog.New(o.Logger) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} if o.AwsSecurityCredentialsProvider != nil { return &awsSubjectProvider{ securityCredentialsProvider: o.AwsSecurityCredentialsProvider, TargetResource: o.Audience, reqOpts: reqOpts, -<<<<<<< HEAD logger: logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } else if o.SubjectTokenProvider != nil { return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil @@ -396,10 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { CredVerificationURL: o.CredentialSource.URL, TargetResource: o.Audience, Client: o.Client, -<<<<<<< HEAD logger: logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if o.CredentialSource.IMDSv2SessionTokenURL != "" { awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL @@ -410,7 +377,6 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { } else if o.CredentialSource.File != "" { return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil } else if o.CredentialSource.URL != "" { -<<<<<<< HEAD return &urlSubjectProvider{ URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, @@ -418,9 +384,6 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { Client: o.Client, Logger: logger, }, nil -======= - return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if o.CredentialSource.Executable != nil { ec := o.CredentialSource.Executable if ec.Command == "" { diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index c9ee24a31e..754ecf4fef 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -19,18 +19,12 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -46,10 +40,7 @@ type urlSubjectProvider struct { Headers map[string]string Format *credsfile.Format Client *http.Client -<<<<<<< HEAD Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -61,18 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } -<<<<<<< HEAD sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } -<<<<<<< HEAD sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return "", fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go index 1010ef6f47..ae39206e5f 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -17,20 +17,14 @@ package externalaccountuser import ( "context" "errors" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Options stores the configuration for fetching tokens with external authorized @@ -59,11 +53,8 @@ type Options struct { // Client for token request. Client *http.Client -<<<<<<< HEAD // Logger for logging. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Options) validate() bool { @@ -103,10 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { RefreshToken: opts.RefreshToken, Authentication: clientAuth, Headers: headers, -<<<<<<< HEAD Logger: internallog.New(tp.o.Logger), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 230795f5e8..c2d320fdf4 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -16,20 +16,13 @@ package gdch import ( "context" -<<<<<<< HEAD "crypto" -======= - "crypto/rsa" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "os" @@ -40,10 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/auth/internal/jwt" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -63,10 +53,7 @@ var ( type Options struct { STSAudience string Client *http.Client -<<<<<<< HEAD Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a @@ -78,11 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok if o.STSAudience == "" { return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") } -<<<<<<< HEAD signer, err := internal.ParseKey([]byte(f.PrivateKey)) -======= - pk, err := internal.ParseKey([]byte(f.PrivateKey)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -95,18 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), tokenURL: f.TokenURL, aud: o.STSAudience, -<<<<<<< HEAD signer: signer, pkID: f.PrivateKeyID, certPool: certPool, client: o.Client, logger: internallog.New(o.Logger), -======= - pk: pk, - pkID: f.PrivateKeyID, - certPool: certPool, - client: o.Client, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return tp, nil } @@ -125,19 +101,12 @@ type gdchProvider struct { serviceIdentity string tokenURL string aud string -<<<<<<< HEAD signer crypto.Signer -======= - pk *rsa.PrivateKey ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pkID string certPool *x509.CertPool client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { @@ -156,11 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(g.pkID), } -<<<<<<< HEAD payload, err := jwt.EncodeJWS(&h, &claims, g.signer) -======= - payload, err := jwt.EncodeJWS(&h, &claims, g.pk) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -176,18 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") -<<<<<<< HEAD g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } -<<<<<<< HEAD g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, &auth.Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index 9958da8f39..b3a99261fa 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -20,19 +20,13 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -82,14 +76,11 @@ type Options struct { // Client configures the underlying client used to make network requests // when fetching tokens. Required. Client *http.Client -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options) validate() error { @@ -104,10 +95,7 @@ func (o *Options) validate() error { // Token performs the exchange to get a temporary service account token to allow access to GCP. func (o *Options) Token(ctx context.Context) (*auth.Token, error) { -<<<<<<< HEAD logger := internallog.New(o.Logger) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lifetime := defaultTokenLifetime if o.TokenLifetimeSeconds != 0 { lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) @@ -129,18 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } -<<<<<<< HEAD logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } -<<<<<<< HEAD logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index 8def2fdfad..e1d2b15034 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -19,10 +19,7 @@ import ( "encoding/base64" "encoding/json" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strconv" @@ -30,10 +27,7 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -48,10 +42,7 @@ const ( // Options stores the configuration for making an sts exchange request. type Options struct { Client *http.Client -<<<<<<< HEAD Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Endpoint string Request *TokenRequest Authentication ClientAuthentication @@ -92,10 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { opts.Authentication.InjectAuthentication(data, opts.Headers) encodedData := data.Encode() -<<<<<<< HEAD logger := internallog.New(opts.Logger) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) if err != nil { @@ -109,18 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) -<<<<<<< HEAD logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } -<<<<<<< HEAD logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index 6bf4c3440c..8d335ccecc 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -16,16 +16,10 @@ package credentials import ( "context" -<<<<<<< HEAD "crypto" "errors" "fmt" "log/slog" -======= - "crypto/rsa" - "errors" - "fmt" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "time" @@ -46,11 +40,7 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions if len(opts.scopes()) == 0 && opts.Audience == "" { return nil, errors.New("credentials: both scopes and audience are empty") } -<<<<<<< HEAD signer, err := internal.ParseKey([]byte(f.PrivateKey)) -======= - pk, err := internal.ParseKey([]byte(f.PrivateKey)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) } @@ -58,14 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions email: f.ClientEmail, audience: opts.Audience, scopes: opts.scopes(), -<<<<<<< HEAD signer: signer, pkID: f.PrivateKeyID, logger: opts.logger(), -======= - pk: pk, - pkID: f.PrivateKeyID, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -73,14 +58,9 @@ type selfSignedTokenProvider struct { email string audience string scopes []string -<<<<<<< HEAD signer crypto.Signer pkID string logger *slog.Logger -======= - pk *rsa.PrivateKey - pkID string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { @@ -100,18 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(tp.pkID), } -<<<<<<< HEAD tok, err := jwt.EncodeJWS(h, c, tp.signer) if err != nil { return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) } tp.logger.Debug("created self-signed JWT", "token", tok) return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil -======= - msg, err := jwt.EncodeJWS(h, c, tp.pk) - if err != nil { - return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) - } - return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index e356fb2db1..d781c3e49a 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -66,21 +66,12 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool if tok == nil { return false } -<<<<<<< HEAD if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { return true } -======= - if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { - return true - } - if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { - return false - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index d9b5f1cb30..95f259037f 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -21,10 +21,7 @@ import ( "crypto/tls" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" "sync" @@ -33,11 +30,7 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= - "go.opencensus.io/plugin/ocgrpc" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" @@ -125,14 +118,11 @@ type Options struct { // APIKey specifies an API key to be used as the basis for authentication. // If set DetectOpts are ignored. APIKey string -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -148,13 +138,10 @@ func (o *Options) client() *http.Client { return nil } -<<<<<<< HEAD func (o *Options) logger() *slog.Logger { return internallog.New(o.Logger) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *Options) validate() error { if o == nil { return errors.New("grpctransport: opts required to be non-nil") @@ -196,12 +183,9 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = credentials.GoogleMTLSTokenURL } -<<<<<<< HEAD if do.Logger == nil { do.Logger = o.logger() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return do } @@ -270,10 +254,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -351,10 +332,6 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. -<<<<<<< HEAD -======= - grpcOpts = addOCStatsHandler(grpcOpts, opts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) @@ -453,16 +430,6 @@ func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { return c.secure } -<<<<<<< HEAD -======= -func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { - if opts.DisableTelemetry { - return dialOpts - } - return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { if opts.DisableTelemetry { return dialOpts diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 9962965a9c..5758e85b5d 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -20,20 +20,14 @@ import ( "crypto/tls" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "cloud.google.com/go/auth" detect "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ClientCertProvider is a function that returns a TLS client certificate to be @@ -77,14 +71,11 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -117,13 +108,10 @@ func (o *Options) client() *http.Client { return nil } -<<<<<<< HEAD func (o *Options) logger() *slog.Logger { return internallog.New(o.Logger) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *Options) resolveDetectOptions() *detect.DetectOptions { io := o.InternalOptions // soft-clone these so we are not updating a ref the user holds and may reuse @@ -148,12 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = detect.GoogleMTLSTokenURL } -<<<<<<< HEAD if do.Logger == nil { do.Logger = o.logger() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return do } @@ -226,10 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) { ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, -<<<<<<< HEAD Logger: opts.logger(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go deleted file mode 100644 index 467c477c04..0000000000 --- a/vendor/cloud.google.com/go/auth/httptransport/trace.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptransport - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - cloudTraceHeader = `X-Cloud-Trace-Context` -) - -// asserts the httpFormat fulfills this foreign interface -var _ propagation.HTTPFormat = (*httpFormat)(nil) - -// httpFormat implements propagation.httpFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Cloud Trace. -type httpFormat struct{} - -// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. -func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(cloudTraceHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 32) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Cloud Trace header. -func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(cloudTraceHeader, header) -} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 2c51836db2..ee215b6dc6 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -27,10 +27,6 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" -<<<<<<< HEAD -======= - "go.opencensus.io/plugin/ochttp" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) @@ -46,14 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht -<<<<<<< HEAD trans = addOpenTelemetryTransport(trans, opts) -======= - // Give OpenTelemetry precedence over OpenCensus in case user configuration - // causes both to write the same header (`X-Cloud-Trace-Context`). - trans = addOpenTelemetryTransport(trans, opts) - trans = addOCTransport(trans, opts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch { case opts.DisableAuthentication: // Do nothing. @@ -186,19 +175,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.Roun return otelhttp.NewTransport(trans) } -<<<<<<< HEAD -======= -func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { - if opts.DisableTelemetry { - return trans - } - return &ochttp.Transport{ - Base: trans, - Propagation: &httpFormat{}, - } -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type authTransport struct { creds *auth.Credentials base http.RoundTripper diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go index 634b590b4f..05c7e8bdd4 100644 --- a/vendor/cloud.google.com/go/auth/internal/compute/compute.go +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -31,12 +31,7 @@ var ( // This is a copy of the gRPC internal googlecloud.OnGCE() func at: // https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go // The functionality is similar to the metadata.OnGCE() func at: -<<<<<<< HEAD // https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go -======= -// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go -// ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. // In particular, OnComputeEngine() will return false on Serverless. func OnComputeEngine() bool { diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 5a1e2ca845..6a8eab6eb9 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -16,11 +16,7 @@ package internal import ( "context" -<<<<<<< HEAD "crypto" -======= - "crypto/rsa" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/x509" "encoding/json" "encoding/pem" @@ -76,45 +72,27 @@ func DefaultClient() *http.Client { } // ParseKey converts the binary contents of a private key file -<<<<<<< HEAD // to an crypto.Signer. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (crypto.Signer, error) { -======= -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) block, _ := pem.Decode(key) if block != nil { key = block.Bytes } -<<<<<<< HEAD var parsedKey crypto.PrivateKey var err error parsedKey, err = x509.ParsePKCS8PrivateKey(key) -======= - parsedKey, err := x509.ParsePKCS8PrivateKey(key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) } } -<<<<<<< HEAD parsed, ok := parsedKey.(crypto.Signer) if !ok { return nil, errors.New("private key is not a signer") -======= - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return parsed, nil } @@ -203,10 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) { // ComputeUniverseDomainProvider fetches the credentials universe domain from // the google cloud metadata service. type ComputeUniverseDomainProvider struct { -<<<<<<< HEAD MetadataClient *metadata.Client -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) universeDomainOnce sync.Once universeDomain string universeDomainErr error @@ -216,11 +191,7 @@ type ComputeUniverseDomainProvider struct { // metadata service. func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { c.universeDomainOnce.Do(func() { -<<<<<<< HEAD c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) -======= - c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if c.universeDomainErr != nil { return "", c.universeDomainErr @@ -229,7 +200,6 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string } // httpGetMetadataUniverseDomain is a package var for unit test substitution. -<<<<<<< HEAD var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() @@ -238,16 +208,6 @@ var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.C func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) -======= -var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) - defer cancel() - return metadata.GetWithContext(ctx, "universe/universe-domain") -} - -func getMetadataUniverseDomain(ctx context.Context) (string, error) { - universeDomain, err := httpGetMetadataUniverseDomain(ctx) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == nil { return universeDomain, nil } @@ -257,12 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) { } return "", err } -<<<<<<< HEAD // FormatIAMServiceAccountResource sets a service account name in an IAM resource // name. func FormatIAMServiceAccountResource(name string) string { return fmt.Sprintf("projects/-/serviceAccounts/%s", name) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index a6a65b2f55..9bd55f510c 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -111,11 +111,7 @@ func (c *Claims) encode() (string, error) { } // EncodeJWS encodes the data using the provided key as a JSON web signature. -<<<<<<< HEAD func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { -======= -func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) head, err := header.encode() if err != nil { return "", err @@ -127,11 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { ss := fmt.Sprintf("%s.%s", head, claims) h := sha256.New() h.Write([]byte(ss)) -<<<<<<< HEAD sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) -======= - sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 8e49691967..2f922f7dfe 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -20,10 +20,7 @@ import ( "crypto/x509" "errors" "log" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "net/url" @@ -67,10 +64,7 @@ type Options struct { UniverseDomain string EnableDirectPath bool EnableDirectPathXds bool -<<<<<<< HEAD Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // getUniverseDomain returns the default service domain for a given Cloud @@ -271,13 +265,8 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { return &defaultTransportConfig, nil } -<<<<<<< HEAD s2aAddress := GetS2AAddress(opts.Logger) mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) -======= - s2aAddress := GetS2AAddress() - mtlsS2AAddress := GetMTLSS2AAddress() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 85ae91c014..a633099563 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -19,10 +19,7 @@ import ( "encoding/json" "fmt" "log" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "strconv" "sync" @@ -43,13 +40,8 @@ var ( // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. -<<<<<<< HEAD func GetS2AAddress(logger *slog.Logger) string { getMetadataMTLSAutoConfig(logger) -======= -func GetS2AAddress() string { - getMetadataMTLSAutoConfig() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !mtlsConfiguration.valid() { return "" } @@ -58,13 +50,8 @@ func GetS2AAddress() string { // GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. // Returns empty string if not set or invalid. -<<<<<<< HEAD func GetMTLSS2AAddress(logger *slog.Logger) string { getMetadataMTLSAutoConfig(logger) -======= -func GetMTLSS2AAddress() string { - getMetadataMTLSAutoConfig() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !mtlsConfiguration.valid() { return "" } @@ -88,24 +75,16 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -<<<<<<< HEAD func getMetadataMTLSAutoConfig(logger *slog.Logger) { var err error mtlsOnce.Do(func() { mtlsConfiguration, err = queryConfig(logger) -======= -func getMetadataMTLSAutoConfig() { - var err error - mtlsOnce.Do(func() { - mtlsConfiguration, err = queryConfig() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { log.Printf("Getting MTLS config failed: %v", err) } }) } -<<<<<<< HEAD var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { metadataClient := metadata.NewWithOptions(&metadata.Options{ Logger: logger, @@ -115,14 +94,6 @@ var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { resp, err := httpGetMetadataMTLSConfig(logger) -======= -var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.GetWithContext(context.Background(), configEndpointSuffix) -} - -func queryConfig() (*mtlsConfig, error) { - resp, err := httpGetMetadataMTLSConfig() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index a8cb41247b..992ac40df0 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -49,10 +49,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt // These fields are are pointer types that we just want to use exactly // as the user set, copy the ref Client: oldDo.Client, -<<<<<<< HEAD Logger: oldDo.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AuthHandlerOptions: oldDo.AuthHandlerOptions, } diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index aff47ec173..d9044f1a94 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,6 +1,5 @@ # Changelog -<<<<<<< HEAD ## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09) @@ -8,8 +7,6 @@ * **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 820b59eec8..07804dc162 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -20,10 +20,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "mime" "net/http" "net/url" @@ -32,10 +29,7 @@ import ( "time" "cloud.google.com/go/auth/internal" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for @@ -77,14 +71,11 @@ type Options3LO struct { // AuthHandlerOpts provides a set of options for doing a // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. AuthHandlerOpts *AuthorizationHandlerOptions -<<<<<<< HEAD // Logger is used for debug logging. If provided, logging will be enabled // at the loggers configured level. By default logging is disabled unless // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default // logger will be used. Optional. Logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Options3LO) validate() error { @@ -112,13 +103,10 @@ func (o *Options3LO) validate() error { return nil } -<<<<<<< HEAD func (o *Options3LO) logger() *slog.Logger { return internallog.New(o.Logger) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // PKCEOptions holds parameters to support PKCE. type PKCEOptions struct { // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. @@ -316,22 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin if o.AuthStyle == StyleInHeader { req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) } -<<<<<<< HEAD logger := o.logger() logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Make request resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } -<<<<<<< HEAD logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 2a8653f2e4..bcfb5d8165 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,6 +1,5 @@ # Changes -<<<<<<< HEAD ## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) @@ -8,8 +7,6 @@ * **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 9b0025a67d..4c18a383a4 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -24,10 +24,7 @@ import ( "encoding/json" "fmt" "io" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "net/url" @@ -64,14 +61,10 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -<<<<<<< HEAD var defaultClient = &Client{ hc: newDefaultHTTPClient(), logger: slog.New(noOpHandler{}), } -======= -var defaultClient = &Client{hc: newDefaultHTTPClient()} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newDefaultHTTPClient() *http.Client { return &http.Client{ @@ -419,7 +412,6 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { -<<<<<<< HEAD hc *http.Client logger *slog.Logger } @@ -431,16 +423,12 @@ type Options struct { // Logger is used to log information about HTTP request and responses. // If not provided, nothing will be logged. Optional. Logger *slog.Logger -======= - hc *http.Client ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. // If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { -<<<<<<< HEAD return NewWithOptions(&Options{ Client: c, }) @@ -460,12 +448,6 @@ func NewWithOptions(opts *Options) *Client { logger = slog.New(noOpHandler{}) } return &Client{hc: client, logger: logger} -======= - if c == nil { - return defaultClient - } - return &Client{hc: c} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // getETag returns a value from the metadata service as well as the associated ETag. @@ -495,20 +477,14 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error -<<<<<<< HEAD var body []byte retryer := newRetryer() for { c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) -======= - retryer := newRetryer() - for { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) res, reqErr = c.hc.Do(req) var code int if res != nil { code = res.StatusCode -<<<<<<< HEAD body, err = io.ReadAll(res.Body) if err != nil { res.Body.Close() @@ -516,8 +492,6 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string } c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) res.Body.Close() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { if res != nil && res.Body != nil { @@ -533,7 +507,6 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } -<<<<<<< HEAD if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } @@ -541,20 +514,6 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string return "", "", &Error{Code: res.StatusCode, Message: string(body)} } return string(body), res.Header.Get("Etag"), nil -======= - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := io.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Get returns a value from the metadata service. diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go index bd78a46098..2e3add07f5 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go @@ -20,11 +20,7 @@ import ( "bytes" "context" "fmt" -<<<<<<< HEAD "log/slog" -======= - "io" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -34,10 +30,6 @@ import ( kmspb "cloud.google.com/go/kms/apiv1/kmspb" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" -<<<<<<< HEAD -======= - "google.golang.org/api/googleapi" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -319,11 +311,8 @@ type autokeyAdminGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyAdminClient creates a new autokey admin client based on gRPC. @@ -357,10 +346,7 @@ func NewAutokeyAdminClient(ctx context.Context, opts ...option.ClientOption) (*A connPool: connPool, autokeyAdminClient: kmspb.NewAutokeyAdminClient(connPool), CallOptions: &client.CallOptions, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -410,11 +396,8 @@ type autokeyAdminRESTClient struct { // Points back to the CallOptions field of the containing AutokeyAdminClient CallOptions **AutokeyAdminCallOptions -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyAdminRESTClient creates a new autokey admin rest client. @@ -439,10 +422,7 @@ func NewAutokeyAdminRESTClient(ctx context.Context, opts ...option.ClientOption) endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -495,11 +475,7 @@ func (c *autokeyAdminGRPCClient) UpdateAutokeyConfig(ctx context.Context, req *k var resp *kmspb.AutokeyConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyAdminClient.UpdateAutokeyConfig, req, settings.GRPC, c.logger, "UpdateAutokeyConfig") -======= - resp, err = c.autokeyAdminClient.UpdateAutokeyConfig(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -517,11 +493,7 @@ func (c *autokeyAdminGRPCClient) GetAutokeyConfig(ctx context.Context, req *kmsp var resp *kmspb.AutokeyConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyAdminClient.GetAutokeyConfig, req, settings.GRPC, c.logger, "GetAutokeyConfig") -======= - resp, err = c.autokeyAdminClient.GetAutokeyConfig(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -539,11 +511,7 @@ func (c *autokeyAdminGRPCClient) ShowEffectiveAutokeyConfig(ctx context.Context, var resp *kmspb.ShowEffectiveAutokeyConfigResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyAdminClient.ShowEffectiveAutokeyConfig, req, settings.GRPC, c.logger, "ShowEffectiveAutokeyConfig") -======= - resp, err = c.autokeyAdminClient.ShowEffectiveAutokeyConfig(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -561,11 +529,7 @@ func (c *autokeyAdminGRPCClient) GetLocation(ctx context.Context, req *locationp var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") -======= - resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -594,11 +558,7 @@ func (c *autokeyAdminGRPCClient) ListLocations(ctx context.Context, req *locatio } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") -======= - resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -633,11 +593,7 @@ func (c *autokeyAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.Ge var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") -======= - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -655,11 +611,7 @@ func (c *autokeyAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.Se var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") -======= - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -677,11 +629,7 @@ func (c *autokeyAdminGRPCClient) TestIamPermissions(ctx context.Context, req *ia var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") -======= - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -699,11 +647,7 @@ func (c *autokeyAdminGRPCClient) GetOperation(ctx context.Context, req *longrunn var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") -======= - resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -765,21 +709,7 @@ func (c *autokeyAdminRESTClient) UpdateAutokeyConfig(ctx context.Context, req *k httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateAutokeyConfig") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -830,21 +760,7 @@ func (c *autokeyAdminRESTClient) GetAutokeyConfig(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetAutokeyConfig") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -894,21 +810,7 @@ func (c *autokeyAdminRESTClient) ShowEffectiveAutokeyConfig(ctx context.Context, httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ShowEffectiveAutokeyConfig") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -958,21 +860,7 @@ func (c *autokeyAdminRESTClient) GetLocation(ctx context.Context, req *locationp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1037,28 +925,10 @@ func (c *autokeyAdminRESTClient) ListLocations(ctx context.Context, req *locatio } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1125,21 +995,7 @@ func (c *autokeyAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.Ge httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1199,21 +1055,7 @@ func (c *autokeyAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.Se httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1275,21 +1117,7 @@ func (c *autokeyAdminRESTClient) TestIamPermissions(ctx context.Context, req *ia httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1339,21 +1167,7 @@ func (c *autokeyAdminRESTClient) GetOperation(ctx context.Context, req *longrunn httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go index 1a31508dcb..d4c7c3bd6d 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go @@ -20,11 +20,7 @@ import ( "bytes" "context" "fmt" -<<<<<<< HEAD "log/slog" -======= - "io" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -36,10 +32,6 @@ import ( lroauto "cloud.google.com/go/longrunning/autogen" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" -<<<<<<< HEAD -======= - "google.golang.org/api/googleapi" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -237,15 +229,9 @@ func (c *AutokeyClient) Connection() *grpc.ClientConn { // CreateKeyHandle creates a new KeyHandle, triggering the // provisioning of a new CryptoKey for CMEK // use with the given resource type in the configured key project and the same -<<<<<<< HEAD // location. GetOperation should // be used to resolve the resulting long-running operation and get the // resulting KeyHandle and -======= -// location. GetOperation should be used to resolve -// the resulting long-running operation and get the resulting -// KeyHandle and ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CryptoKey. func (c *AutokeyClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { return c.internalClient.CreateKeyHandle(ctx, req, opts...) @@ -334,11 +320,8 @@ type autokeyGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyClient creates a new autokey client based on gRPC. @@ -382,10 +365,7 @@ func NewAutokeyClient(ctx context.Context, opts ...option.ClientOption) (*Autoke connPool: connPool, autokeyClient: kmspb.NewAutokeyClient(connPool), CallOptions: &client.CallOptions, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -451,11 +431,8 @@ type autokeyRESTClient struct { // Points back to the CallOptions field of the containing AutokeyClient CallOptions **AutokeyCallOptions -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAutokeyRESTClient creates a new autokey rest client. @@ -490,10 +467,7 @@ func NewAutokeyRESTClient(ctx context.Context, opts ...option.ClientOption) (*Au endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -556,11 +530,7 @@ func (c *autokeyGRPCClient) CreateKeyHandle(ctx context.Context, req *kmspb.Crea var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyClient.CreateKeyHandle, req, settings.GRPC, c.logger, "CreateKeyHandle") -======= - resp, err = c.autokeyClient.CreateKeyHandle(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -580,11 +550,7 @@ func (c *autokeyGRPCClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyH var resp *kmspb.KeyHandle err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyClient.GetKeyHandle, req, settings.GRPC, c.logger, "GetKeyHandle") -======= - resp, err = c.autokeyClient.GetKeyHandle(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -613,11 +579,7 @@ func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListK } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.autokeyClient.ListKeyHandles, req, settings.GRPC, c.logger, "ListKeyHandles") -======= - resp, err = c.autokeyClient.ListKeyHandles(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -652,11 +614,7 @@ func (c *autokeyGRPCClient) GetLocation(ctx context.Context, req *locationpb.Get var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") -======= - resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -685,11 +643,7 @@ func (c *autokeyGRPCClient) ListLocations(ctx context.Context, req *locationpb.L } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") -======= - resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -724,11 +678,7 @@ func (c *autokeyGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamP var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") -======= - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -746,11 +696,7 @@ func (c *autokeyGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamP var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") -======= - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -768,11 +714,7 @@ func (c *autokeyGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.T var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") -======= - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -790,11 +732,7 @@ func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") -======= - resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -806,15 +744,9 @@ func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb // CreateKeyHandle creates a new KeyHandle, triggering the // provisioning of a new CryptoKey for CMEK // use with the given resource type in the configured key project and the same -<<<<<<< HEAD // location. GetOperation should // be used to resolve the resulting long-running operation and get the // resulting KeyHandle and -======= -// location. GetOperation should be used to resolve -// the resulting long-running operation and get the resulting -// KeyHandle and ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CryptoKey. func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) { m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} @@ -857,28 +789,10 @@ func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.Crea httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateKeyHandle") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -929,21 +843,7 @@ func (c *autokeyRESTClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyH httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetKeyHandle") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1008,28 +908,10 @@ func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListK } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListKeyHandles") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1092,21 +974,7 @@ func (c *autokeyRESTClient) GetLocation(ctx context.Context, req *locationpb.Get httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1171,28 +1039,10 @@ func (c *autokeyRESTClient) ListLocations(ctx context.Context, req *locationpb.L } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1259,21 +1109,7 @@ func (c *autokeyRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamP httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1333,21 +1169,7 @@ func (c *autokeyRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamP httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1409,21 +1231,7 @@ func (c *autokeyRESTClient) TestIamPermissions(ctx context.Context, req *iampb.T httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1473,21 +1281,7 @@ func (c *autokeyRESTClient) GetOperation(ctx context.Context, req *longrunningpb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go index 07fb4b521f..57eff9a98d 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go +++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go @@ -111,11 +111,7 @@ type CryptoKeyIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -162,11 +158,7 @@ type CryptoKeyVersionIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -213,11 +205,7 @@ type EkmConnectionIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -264,11 +252,7 @@ type ImportJobIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *ImportJobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -315,11 +299,7 @@ type KeyHandleIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyHandle, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *KeyHandleIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -366,11 +346,7 @@ type KeyRingIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *KeyRingIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } @@ -417,11 +393,7 @@ type LocationIterator struct { InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error) } -<<<<<<< HEAD // PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details. -======= -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (it *LocationIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go index 52f9925d57..8e9ced1a3b 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/doc.go +++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go @@ -36,10 +36,7 @@ // // To get started with this package, create a client. // -<<<<<<< HEAD // // go get cloud.google.com/go/kms/apiv1@latest -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ctx := context.Background() // // This snippet has been automatically generated and should be regarded as a code template only. // // It will require modifications to work: @@ -58,23 +55,7 @@ // // # Using the Client // -<<<<<<< HEAD // The following is an example of making an API call with the newly created client, mentioned above. -======= -// The following is an example of making an API call with the newly created client. -// -// ctx := context.Background() -// // This snippet has been automatically generated and should be regarded as a code template only. -// // It will require modifications to work: -// // - It may require correct/in-range values for request initialization. -// // - It may require specifying regional endpoints when creating the service client as shown in: -// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options -// c, err := kms.NewAutokeyClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // req := &kmspb.CreateKeyHandleRequest{ // // TODO: Fill request struct fields. @@ -106,34 +87,3 @@ // [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging // [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors package kms // import "cloud.google.com/go/kms/apiv1" -<<<<<<< HEAD -======= - -import ( - "context" - - "google.golang.org/api/option" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -var versionClient string - -func getVersionClient() string { - if versionClient == "" { - return "UNKNOWN" - } - return versionClient -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloudkms", - } -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go index da180ab529..7488bb432c 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go @@ -20,11 +20,7 @@ import ( "bytes" "context" "fmt" -<<<<<<< HEAD "log/slog" -======= - "io" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -34,10 +30,6 @@ import ( kmspb "cloud.google.com/go/kms/apiv1/kmspb" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" -<<<<<<< HEAD -======= - "google.golang.org/api/googleapi" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -377,11 +369,8 @@ type ekmGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewEkmClient creates a new ekm service client based on gRPC. @@ -413,10 +402,7 @@ func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient, connPool: connPool, ekmClient: kmspb.NewEkmServiceClient(connPool), CallOptions: &client.CallOptions, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -466,11 +452,8 @@ type ekmRESTClient struct { // Points back to the CallOptions field of the containing EkmClient CallOptions **EkmCallOptions -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewEkmRESTClient creates a new ekm service rest client. @@ -493,10 +476,7 @@ func NewEkmRESTClient(ctx context.Context, opts ...option.ClientOption) (*EkmCli endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -560,11 +540,7 @@ func (c *ekmGRPCClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.ListEkmConnections, req, settings.GRPC, c.logger, "ListEkmConnections") -======= - resp, err = c.ekmClient.ListEkmConnections(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -599,11 +575,7 @@ func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC var resp *kmspb.EkmConnection err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.GetEkmConnection, req, settings.GRPC, c.logger, "GetEkmConnection") -======= - resp, err = c.ekmClient.GetEkmConnection(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -621,11 +593,7 @@ func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea var resp *kmspb.EkmConnection err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.CreateEkmConnection, req, settings.GRPC, c.logger, "CreateEkmConnection") -======= - resp, err = c.ekmClient.CreateEkmConnection(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -643,11 +611,7 @@ func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda var resp *kmspb.EkmConnection err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.UpdateEkmConnection, req, settings.GRPC, c.logger, "UpdateEkmConnection") -======= - resp, err = c.ekmClient.UpdateEkmConnection(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -665,11 +629,7 @@ func (c *ekmGRPCClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfi var resp *kmspb.EkmConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.GetEkmConfig, req, settings.GRPC, c.logger, "GetEkmConfig") -======= - resp, err = c.ekmClient.GetEkmConfig(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -687,11 +647,7 @@ func (c *ekmGRPCClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk var resp *kmspb.EkmConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.UpdateEkmConfig, req, settings.GRPC, c.logger, "UpdateEkmConfig") -======= - resp, err = c.ekmClient.UpdateEkmConfig(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -709,11 +665,7 @@ func (c *ekmGRPCClient) VerifyConnectivity(ctx context.Context, req *kmspb.Verif var resp *kmspb.VerifyConnectivityResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.ekmClient.VerifyConnectivity, req, settings.GRPC, c.logger, "VerifyConnectivity") -======= - resp, err = c.ekmClient.VerifyConnectivity(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -731,11 +683,7 @@ func (c *ekmGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLoca var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") -======= - resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -764,11 +712,7 @@ func (c *ekmGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListL } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") -======= - resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -803,11 +747,7 @@ func (c *ekmGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") -======= - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -825,11 +765,7 @@ func (c *ekmGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") -======= - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -847,11 +783,7 @@ func (c *ekmGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestI var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") -======= - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -869,11 +801,7 @@ func (c *ekmGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.Get var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") -======= - resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -933,28 +861,10 @@ func (c *ekmRESTClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListEkmConnections") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1018,21 +928,7 @@ func (c *ekmRESTClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetEkmConnection") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1091,21 +987,7 @@ func (c *ekmRESTClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateEkmConnection") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1169,21 +1051,7 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateEkmConnection") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1234,21 +1102,7 @@ func (c *ekmRESTClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfi httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetEkmConfig") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1313,21 +1167,7 @@ func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateEkmConfig") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1381,21 +1221,7 @@ func (c *ekmRESTClient) VerifyConnectivity(ctx context.Context, req *kmspb.Verif httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "VerifyConnectivity") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1445,21 +1271,7 @@ func (c *ekmRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLoca httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1524,28 +1336,10 @@ func (c *ekmRESTClient) ListLocations(ctx context.Context, req *locationpb.ListL } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -1612,21 +1406,7 @@ func (c *ekmRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1686,21 +1466,7 @@ func (c *ekmRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1762,21 +1528,7 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1826,21 +1578,7 @@ func (c *ekmRESTClient) GetOperation(ctx context.Context, req *longrunningpb.Get httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go index af7a60594d..07f8e4ba05 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go +++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go @@ -20,11 +20,7 @@ import ( "bytes" "context" "fmt" -<<<<<<< HEAD "log/slog" -======= - "io" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/http" "net/url" @@ -34,10 +30,6 @@ import ( kmspb "cloud.google.com/go/kms/apiv1/kmspb" longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" gax "github.com/googleapis/gax-go/v2" -<<<<<<< HEAD -======= - "google.golang.org/api/googleapi" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -1137,11 +1129,8 @@ type keyManagementGRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewKeyManagementClient creates a new key management service client based on gRPC. @@ -1182,10 +1171,7 @@ func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (* connPool: connPool, keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool), CallOptions: &client.CallOptions, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) operationsClient: longrunningpb.NewOperationsClient(connPool), iamPolicyClient: iampb.NewIAMPolicyClient(connPool), locationsClient: locationpb.NewLocationsClient(connPool), @@ -1235,11 +1221,8 @@ type keyManagementRESTClient struct { // Points back to the CallOptions field of the containing KeyManagementClient CallOptions **KeyManagementCallOptions -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewKeyManagementRESTClient creates a new key management service rest client. @@ -1271,10 +1254,7 @@ func NewKeyManagementRESTClient(ctx context.Context, opts ...option.ClientOption endpoint: endpoint, httpClient: httpClient, CallOptions: &callOpts, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -1338,11 +1318,7 @@ func (c *keyManagementGRPCClient) ListKeyRings(ctx context.Context, req *kmspb.L } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListKeyRings, req, settings.GRPC, c.logger, "ListKeyRings") -======= - resp, err = c.keyManagementClient.ListKeyRings(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1388,11 +1364,7 @@ func (c *keyManagementGRPCClient) ListCryptoKeys(ctx context.Context, req *kmspb } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListCryptoKeys, req, settings.GRPC, c.logger, "ListCryptoKeys") -======= - resp, err = c.keyManagementClient.ListCryptoKeys(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1438,11 +1410,7 @@ func (c *keyManagementGRPCClient) ListCryptoKeyVersions(ctx context.Context, req } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListCryptoKeyVersions, req, settings.GRPC, c.logger, "ListCryptoKeyVersions") -======= - resp, err = c.keyManagementClient.ListCryptoKeyVersions(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1488,11 +1456,7 @@ func (c *keyManagementGRPCClient) ListImportJobs(ctx context.Context, req *kmspb } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ListImportJobs, req, settings.GRPC, c.logger, "ListImportJobs") -======= - resp, err = c.keyManagementClient.ListImportJobs(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1527,11 +1491,7 @@ func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.Get var resp *kmspb.KeyRing err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetKeyRing, req, settings.GRPC, c.logger, "GetKeyRing") -======= - resp, err = c.keyManagementClient.GetKeyRing(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1549,11 +1509,7 @@ func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.G var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetCryptoKey, req, settings.GRPC, c.logger, "GetCryptoKey") -======= - resp, err = c.keyManagementClient.GetCryptoKey(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1571,11 +1527,7 @@ func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req * var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetCryptoKeyVersion, req, settings.GRPC, c.logger, "GetCryptoKeyVersion") -======= - resp, err = c.keyManagementClient.GetCryptoKeyVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1593,11 +1545,7 @@ func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.G var resp *kmspb.PublicKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetPublicKey, req, settings.GRPC, c.logger, "GetPublicKey") -======= - resp, err = c.keyManagementClient.GetPublicKey(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1615,11 +1563,7 @@ func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.G var resp *kmspb.ImportJob err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GetImportJob, req, settings.GRPC, c.logger, "GetImportJob") -======= - resp, err = c.keyManagementClient.GetImportJob(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1637,11 +1581,7 @@ func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb. var resp *kmspb.KeyRing err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateKeyRing, req, settings.GRPC, c.logger, "CreateKeyRing") -======= - resp, err = c.keyManagementClient.CreateKeyRing(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1659,11 +1599,7 @@ func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmsp var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateCryptoKey, req, settings.GRPC, c.logger, "CreateCryptoKey") -======= - resp, err = c.keyManagementClient.CreateCryptoKey(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1681,11 +1617,7 @@ func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, re var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateCryptoKeyVersion, req, settings.GRPC, c.logger, "CreateCryptoKeyVersion") -======= - resp, err = c.keyManagementClient.CreateCryptoKeyVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1703,11 +1635,7 @@ func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, re var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.ImportCryptoKeyVersion, req, settings.GRPC, c.logger, "ImportCryptoKeyVersion") -======= - resp, err = c.keyManagementClient.ImportCryptoKeyVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1725,11 +1653,7 @@ func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmsp var resp *kmspb.ImportJob err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.CreateImportJob, req, settings.GRPC, c.logger, "CreateImportJob") -======= - resp, err = c.keyManagementClient.CreateImportJob(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1747,11 +1671,7 @@ func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmsp var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKey, req, settings.GRPC, c.logger, "UpdateCryptoKey") -======= - resp, err = c.keyManagementClient.UpdateCryptoKey(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1769,11 +1689,7 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, re var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKeyVersion, req, settings.GRPC, c.logger, "UpdateCryptoKeyVersion") -======= - resp, err = c.keyManagementClient.UpdateCryptoKeyVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1791,11 +1707,7 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont var resp *kmspb.CryptoKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKeyPrimaryVersion, req, settings.GRPC, c.logger, "UpdateCryptoKeyPrimaryVersion") -======= - resp, err = c.keyManagementClient.UpdateCryptoKeyPrimaryVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1813,11 +1725,7 @@ func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, r var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.DestroyCryptoKeyVersion, req, settings.GRPC, c.logger, "DestroyCryptoKeyVersion") -======= - resp, err = c.keyManagementClient.DestroyCryptoKeyVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1835,11 +1743,7 @@ func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, r var resp *kmspb.CryptoKeyVersion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.RestoreCryptoKeyVersion, req, settings.GRPC, c.logger, "RestoreCryptoKeyVersion") -======= - resp, err = c.keyManagementClient.RestoreCryptoKeyVersion(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1857,11 +1761,7 @@ func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.Encryp var resp *kmspb.EncryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.Encrypt, req, settings.GRPC, c.logger, "Encrypt") -======= - resp, err = c.keyManagementClient.Encrypt(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1879,11 +1779,7 @@ func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.Decryp var resp *kmspb.DecryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.Decrypt, req, settings.GRPC, c.logger, "Decrypt") -======= - resp, err = c.keyManagementClient.Decrypt(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1901,11 +1797,7 @@ func (c *keyManagementGRPCClient) RawEncrypt(ctx context.Context, req *kmspb.Raw var resp *kmspb.RawEncryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.RawEncrypt, req, settings.GRPC, c.logger, "RawEncrypt") -======= - resp, err = c.keyManagementClient.RawEncrypt(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1923,11 +1815,7 @@ func (c *keyManagementGRPCClient) RawDecrypt(ctx context.Context, req *kmspb.Raw var resp *kmspb.RawDecryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.RawDecrypt, req, settings.GRPC, c.logger, "RawDecrypt") -======= - resp, err = c.keyManagementClient.RawDecrypt(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1945,11 +1833,7 @@ func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb var resp *kmspb.AsymmetricSignResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.AsymmetricSign, req, settings.GRPC, c.logger, "AsymmetricSign") -======= - resp, err = c.keyManagementClient.AsymmetricSign(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1967,11 +1851,7 @@ func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *km var resp *kmspb.AsymmetricDecryptResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.AsymmetricDecrypt, req, settings.GRPC, c.logger, "AsymmetricDecrypt") -======= - resp, err = c.keyManagementClient.AsymmetricDecrypt(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1989,11 +1869,7 @@ func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSig var resp *kmspb.MacSignResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.MacSign, req, settings.GRPC, c.logger, "MacSign") -======= - resp, err = c.keyManagementClient.MacSign(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2011,11 +1887,7 @@ func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacV var resp *kmspb.MacVerifyResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.MacVerify, req, settings.GRPC, c.logger, "MacVerify") -======= - resp, err = c.keyManagementClient.MacVerify(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2033,11 +1905,7 @@ func (c *keyManagementGRPCClient) GenerateRandomBytes(ctx context.Context, req * var resp *kmspb.GenerateRandomBytesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.keyManagementClient.GenerateRandomBytes, req, settings.GRPC, c.logger, "GenerateRandomBytes") -======= - resp, err = c.keyManagementClient.GenerateRandomBytes(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2055,11 +1923,7 @@ func (c *keyManagementGRPCClient) GetLocation(ctx context.Context, req *location var resp *locationpb.Location err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation") -======= - resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2088,11 +1952,7 @@ func (c *keyManagementGRPCClient) ListLocations(ctx context.Context, req *locati } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations") -======= - resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2127,11 +1987,7 @@ func (c *keyManagementGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.G var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") -======= - resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2149,11 +2005,7 @@ func (c *keyManagementGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.S var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") -======= - resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2171,11 +2023,7 @@ func (c *keyManagementGRPCClient) TestIamPermissions(ctx context.Context, req *i var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") -======= - resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2193,11 +2041,7 @@ func (c *keyManagementGRPCClient) GetOperation(ctx context.Context, req *longrun var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation") -======= - resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -2257,28 +2101,10 @@ func (c *keyManagementRESTClient) ListKeyRings(ctx context.Context, req *kmspb.L } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListKeyRings") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2362,28 +2188,10 @@ func (c *keyManagementRESTClient) ListCryptoKeys(ctx context.Context, req *kmspb } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListCryptoKeys") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2467,28 +2275,10 @@ func (c *keyManagementRESTClient) ListCryptoKeyVersions(ctx context.Context, req } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListCryptoKeyVersions") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2569,28 +2359,10 @@ func (c *keyManagementRESTClient) ListImportJobs(ctx context.Context, req *kmspb } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListImportJobs") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -2653,21 +2425,7 @@ func (c *keyManagementRESTClient) GetKeyRing(ctx context.Context, req *kmspb.Get httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetKeyRing") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2719,21 +2477,7 @@ func (c *keyManagementRESTClient) GetCryptoKey(ctx context.Context, req *kmspb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetCryptoKey") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2784,21 +2528,7 @@ func (c *keyManagementRESTClient) GetCryptoKeyVersion(ctx context.Context, req * httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetCryptoKeyVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2853,21 +2583,7 @@ func (c *keyManagementRESTClient) GetPublicKey(ctx context.Context, req *kmspb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetPublicKey") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2917,21 +2633,7 @@ func (c *keyManagementRESTClient) GetImportJob(ctx context.Context, req *kmspb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetImportJob") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -2990,21 +2692,7 @@ func (c *keyManagementRESTClient) CreateKeyRing(ctx context.Context, req *kmspb. httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateKeyRing") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3070,21 +2758,7 @@ func (c *keyManagementRESTClient) CreateCryptoKey(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateCryptoKey") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3146,21 +2820,7 @@ func (c *keyManagementRESTClient) CreateCryptoKeyVersion(ctx context.Context, re httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateCryptoKeyVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3223,21 +2883,7 @@ func (c *keyManagementRESTClient) ImportCryptoKeyVersion(ctx context.Context, re httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ImportCryptoKeyVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3299,21 +2945,7 @@ func (c *keyManagementRESTClient) CreateImportJob(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateImportJob") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3377,21 +3009,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKey") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3466,21 +3084,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKeyVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3541,21 +3145,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKeyPrimaryVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3631,21 +3221,7 @@ func (c *keyManagementRESTClient) DestroyCryptoKeyVersion(ctx context.Context, r httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "DestroyCryptoKeyVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3709,21 +3285,7 @@ func (c *keyManagementRESTClient) RestoreCryptoKeyVersion(ctx context.Context, r httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RestoreCryptoKeyVersion") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3782,21 +3344,7 @@ func (c *keyManagementRESTClient) Encrypt(ctx context.Context, req *kmspb.Encryp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Encrypt") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3855,21 +3403,7 @@ func (c *keyManagementRESTClient) Decrypt(ctx context.Context, req *kmspb.Decryp httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Decrypt") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -3930,21 +3464,7 @@ func (c *keyManagementRESTClient) RawEncrypt(ctx context.Context, req *kmspb.Raw httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RawEncrypt") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4003,21 +3523,7 @@ func (c *keyManagementRESTClient) RawDecrypt(ctx context.Context, req *kmspb.Raw httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RawDecrypt") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4077,21 +3583,7 @@ func (c *keyManagementRESTClient) AsymmetricSign(ctx context.Context, req *kmspb httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "AsymmetricSign") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4151,21 +3643,7 @@ func (c *keyManagementRESTClient) AsymmetricDecrypt(ctx context.Context, req *km httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "AsymmetricDecrypt") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4223,21 +3701,7 @@ func (c *keyManagementRESTClient) MacSign(ctx context.Context, req *kmspb.MacSig httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "MacSign") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4296,21 +3760,7 @@ func (c *keyManagementRESTClient) MacVerify(ctx context.Context, req *kmspb.MacV httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "MacVerify") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4367,21 +3817,7 @@ func (c *keyManagementRESTClient) GenerateRandomBytes(ctx context.Context, req * httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "GenerateRandomBytes") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4431,21 +3867,7 @@ func (c *keyManagementRESTClient) GetLocation(ctx context.Context, req *location httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4510,28 +3932,10 @@ func (c *keyManagementRESTClient) ListLocations(ctx context.Context, req *locati } httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations") if err != nil { return err } -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) - if err != nil { - return err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := unm.Unmarshal(buf, resp); err != nil { return err } @@ -4598,21 +4002,7 @@ func (c *keyManagementRESTClient) GetIamPolicy(ctx context.Context, req *iampb.G httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4672,21 +4062,7 @@ func (c *keyManagementRESTClient) SetIamPolicy(ctx context.Context, req *iampb.S httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4748,21 +4124,7 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -4812,21 +4174,7 @@ func (c *keyManagementRESTClient) GetOperation(ctx context.Context, req *longrun httpReq = httpReq.WithContext(ctx) httpReq.Header = headers -<<<<<<< HEAD buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation") -======= - httpRsp, err := c.httpClient.Do(httpReq) - if err != nil { - return err - } - defer httpRsp.Body.Close() - - if err = googleapi.CheckResponse(httpRsp); err != nil { - return err - } - - buf, err := io.ReadAll(httpRsp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go index 05be16d86e..e4737aca92 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/autokey.proto @@ -66,17 +62,9 @@ type CreateKeyHandleRequest struct { func (x *CreateKeyHandleRequest) Reset() { *x = CreateKeyHandleRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateKeyHandleRequest) String() string { @@ -87,11 +75,7 @@ func (*CreateKeyHandleRequest) ProtoMessage() {} func (x *CreateKeyHandleRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -141,17 +125,9 @@ type GetKeyHandleRequest struct { func (x *GetKeyHandleRequest) Reset() { *x = GetKeyHandleRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetKeyHandleRequest) String() string { @@ -162,11 +138,7 @@ func (*GetKeyHandleRequest) ProtoMessage() {} func (x *GetKeyHandleRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -220,17 +192,9 @@ type KeyHandle struct { func (x *KeyHandle) Reset() { *x = KeyHandle{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyHandle) String() string { @@ -241,11 +205,7 @@ func (*KeyHandle) ProtoMessage() {} func (x *KeyHandle) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -292,17 +252,9 @@ type CreateKeyHandleMetadata struct { func (x *CreateKeyHandleMetadata) Reset() { *x = CreateKeyHandleMetadata{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateKeyHandleMetadata) String() string { @@ -313,11 +265,7 @@ func (*CreateKeyHandleMetadata) ProtoMessage() {} func (x *CreateKeyHandleMetadata) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,13 +297,8 @@ type ListKeyHandlesRequest struct { // [KeyHandles][google.cloud.kms.v1.KeyHandle] can subsequently be obtained by // including the // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token] -<<<<<<< HEAD // in a subsequent request. If unspecified, at most 100 // [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned. -======= - // in a subsequent request. If unspecified, at most - // 100 [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` // Optional. Optional pagination token, returned earlier via // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token]. @@ -368,17 +311,9 @@ type ListKeyHandlesRequest struct { func (x *ListKeyHandlesRequest) Reset() { *x = ListKeyHandlesRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyHandlesRequest) String() string { @@ -389,11 +324,7 @@ func (*ListKeyHandlesRequest) ProtoMessage() {} func (x *ListKeyHandlesRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -453,17 +384,9 @@ type ListKeyHandlesResponse struct { func (x *ListKeyHandlesResponse) Reset() { *x = ListKeyHandlesResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyHandlesResponse) String() string { @@ -474,11 +397,7 @@ func (*ListKeyHandlesResponse) ProtoMessage() {} func (x *ListKeyHandlesResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -676,83 +595,6 @@ func file_google_cloud_kms_v1_autokey_proto_init() { if File_google_cloud_kms_v1_autokey_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_google_cloud_kms_v1_autokey_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*CreateKeyHandleRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GetKeyHandleRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*KeyHandle); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*CreateKeyHandleMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ListKeyHandlesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListKeyHandlesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ @@ -788,15 +630,9 @@ type AutokeyClient interface { // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK // use with the given resource type in the configured key project and the same -<<<<<<< HEAD // location. [GetOperation][google.longrunning.Operations.GetOperation] should // be used to resolve the resulting long-running operation and get the // resulting [KeyHandle][google.cloud.kms.v1.KeyHandle] and -======= - // location. [GetOperation][Operations.GetOperation] should be used to resolve - // the resulting long-running operation and get the resulting - // [KeyHandle][google.cloud.kms.v1.KeyHandle] and ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [CryptoKey][google.cloud.kms.v1.CryptoKey]. CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle]. @@ -845,15 +681,9 @@ type AutokeyServer interface { // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK // use with the given resource type in the configured key project and the same -<<<<<<< HEAD // location. [GetOperation][google.longrunning.Operations.GetOperation] should // be used to resolve the resulting long-running operation and get the // resulting [KeyHandle][google.cloud.kms.v1.KeyHandle] and -======= - // location. [GetOperation][Operations.GetOperation] should be used to resolve - // the resulting long-running operation and get the resulting - // [KeyHandle][google.cloud.kms.v1.KeyHandle] and ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [CryptoKey][google.cloud.kms.v1.CryptoKey]. CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error) // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle]. diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go index 0bd9759bdc..bb1abb0af6 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/autokey_admin.proto @@ -122,17 +118,9 @@ type UpdateAutokeyConfigRequest struct { func (x *UpdateAutokeyConfigRequest) Reset() { *x = UpdateAutokeyConfigRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateAutokeyConfigRequest) String() string { @@ -143,11 +131,7 @@ func (*UpdateAutokeyConfigRequest) ProtoMessage() {} func (x *UpdateAutokeyConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -190,17 +174,9 @@ type GetAutokeyConfigRequest struct { func (x *GetAutokeyConfigRequest) Reset() { *x = GetAutokeyConfigRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetAutokeyConfigRequest) String() string { @@ -211,11 +187,7 @@ func (*GetAutokeyConfigRequest) ProtoMessage() {} func (x *GetAutokeyConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -264,17 +236,9 @@ type AutokeyConfig struct { func (x *AutokeyConfig) Reset() { *x = AutokeyConfig{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AutokeyConfig) String() string { @@ -285,11 +249,7 @@ func (*AutokeyConfig) ProtoMessage() {} func (x *AutokeyConfig) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -340,17 +300,9 @@ type ShowEffectiveAutokeyConfigRequest struct { func (x *ShowEffectiveAutokeyConfigRequest) Reset() { *x = ShowEffectiveAutokeyConfigRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ShowEffectiveAutokeyConfigRequest) String() string { @@ -361,11 +313,7 @@ func (*ShowEffectiveAutokeyConfigRequest) ProtoMessage() {} func (x *ShowEffectiveAutokeyConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -401,17 +349,9 @@ type ShowEffectiveAutokeyConfigResponse struct { func (x *ShowEffectiveAutokeyConfigResponse) Reset() { *x = ShowEffectiveAutokeyConfigResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ShowEffectiveAutokeyConfigResponse) String() string { @@ -422,11 +362,7 @@ func (*ShowEffectiveAutokeyConfigResponse) ProtoMessage() {} func (x *ShowEffectiveAutokeyConfigResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,71 +549,6 @@ func file_google_cloud_kms_v1_autokey_admin_proto_init() { if File_google_cloud_kms_v1_autokey_admin_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*UpdateAutokeyConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GetAutokeyConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*AutokeyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ShowEffectiveAutokeyConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ShowEffectiveAutokeyConfigResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go index 8f25eae9d2..d0739cca51 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/ekm_service.proto @@ -72,10 +68,7 @@ const ( // All [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this // [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key // management operations initiated from Cloud KMS. This means that: -<<<<<<< HEAD // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] // associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection] // is @@ -85,12 +78,8 @@ const ( // external key material. // - Destruction of external key material associated with this // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by -<<<<<<< HEAD // calling // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]. -======= - // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion]. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // - Automatic rotation of key material is supported. EkmConnection_CLOUD_KMS EkmConnection_KeyManagementMode = 2 ) @@ -172,17 +161,9 @@ type ListEkmConnectionsRequest struct { func (x *ListEkmConnectionsRequest) Reset() { *x = ListEkmConnectionsRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListEkmConnectionsRequest) String() string { @@ -193,11 +174,7 @@ func (*ListEkmConnectionsRequest) ProtoMessage() {} func (x *ListEkmConnectionsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -267,17 +244,9 @@ type ListEkmConnectionsResponse struct { func (x *ListEkmConnectionsResponse) Reset() { *x = ListEkmConnectionsResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListEkmConnectionsResponse) String() string { @@ -288,11 +257,7 @@ func (*ListEkmConnectionsResponse) ProtoMessage() {} func (x *ListEkmConnectionsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -342,17 +307,9 @@ type GetEkmConnectionRequest struct { func (x *GetEkmConnectionRequest) Reset() { *x = GetEkmConnectionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetEkmConnectionRequest) String() string { @@ -363,11 +320,7 @@ func (*GetEkmConnectionRequest) ProtoMessage() {} func (x *GetEkmConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -410,17 +363,9 @@ type CreateEkmConnectionRequest struct { func (x *CreateEkmConnectionRequest) Reset() { *x = CreateEkmConnectionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateEkmConnectionRequest) String() string { @@ -431,11 +376,7 @@ func (*CreateEkmConnectionRequest) ProtoMessage() {} func (x *CreateEkmConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -487,17 +428,9 @@ type UpdateEkmConnectionRequest struct { func (x *UpdateEkmConnectionRequest) Reset() { *x = UpdateEkmConnectionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateEkmConnectionRequest) String() string { @@ -508,11 +441,7 @@ func (*UpdateEkmConnectionRequest) ProtoMessage() {} func (x *UpdateEkmConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -555,17 +484,9 @@ type GetEkmConfigRequest struct { func (x *GetEkmConfigRequest) Reset() { *x = GetEkmConfigRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetEkmConfigRequest) String() string { @@ -576,11 +497,7 @@ func (*GetEkmConfigRequest) ProtoMessage() {} func (x *GetEkmConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -617,17 +534,9 @@ type UpdateEkmConfigRequest struct { func (x *UpdateEkmConfigRequest) Reset() { *x = UpdateEkmConfigRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateEkmConfigRequest) String() string { @@ -638,11 +547,7 @@ func (*UpdateEkmConfigRequest) ProtoMessage() {} func (x *UpdateEkmConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -707,17 +612,9 @@ type Certificate struct { func (x *Certificate) Reset() { *x = Certificate{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Certificate) String() string { @@ -728,11 +625,7 @@ func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -815,11 +708,7 @@ func (x *Certificate) GetSha256Fingerprint() string { // [CryptoKeys][google.cloud.kms.v1.CryptoKey] and // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of -<<<<<<< HEAD // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC], as well as -======= -// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], as well as ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // performing cryptographic operations using keys created within the // [EkmConnection][google.cloud.kms.v1.EkmConnection]. type EkmConnection struct { @@ -858,17 +747,9 @@ type EkmConnection struct { func (x *EkmConnection) Reset() { *x = EkmConnection{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EkmConnection) String() string { @@ -879,11 +760,7 @@ func (*EkmConnection) ProtoMessage() {} func (x *EkmConnection) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -945,11 +822,7 @@ func (x *EkmConnection) GetCryptoSpacePath() string { // [CryptoKeys][google.cloud.kms.v1.CryptoKey] and // [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of -<<<<<<< HEAD // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] in a given -======= -// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC] in a given ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // project and location. type EkmConfig struct { state protoimpl.MessageState @@ -968,17 +841,9 @@ type EkmConfig struct { func (x *EkmConfig) Reset() { *x = EkmConfig{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EkmConfig) String() string { @@ -989,11 +854,7 @@ func (*EkmConfig) ProtoMessage() {} func (x *EkmConfig) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1036,17 +897,9 @@ type VerifyConnectivityRequest struct { func (x *VerifyConnectivityRequest) Reset() { *x = VerifyConnectivityRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerifyConnectivityRequest) String() string { @@ -1057,11 +910,7 @@ func (*VerifyConnectivityRequest) ProtoMessage() {} func (x *VerifyConnectivityRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1093,17 +942,9 @@ type VerifyConnectivityResponse struct { func (x *VerifyConnectivityResponse) Reset() { *x = VerifyConnectivityResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerifyConnectivityResponse) String() string { @@ -1114,11 +955,7 @@ func (*VerifyConnectivityResponse) ProtoMessage() {} func (x *VerifyConnectivityResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1162,17 +999,9 @@ type EkmConnection_ServiceResolver struct { func (x *EkmConnection_ServiceResolver) Reset() { *x = EkmConnection_ServiceResolver{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EkmConnection_ServiceResolver) String() string { @@ -1183,11 +1012,7 @@ func (*EkmConnection_ServiceResolver) ProtoMessage() {} func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1519,11 +1344,7 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, -<<<<<<< HEAD 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x82, 0x02, -======= - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, @@ -1537,17 +1358,10 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{ 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, -<<<<<<< HEAD 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, - 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, - 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -1621,167 +1435,6 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() { if File_google_cloud_kms_v1_ekm_service_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ListEkmConnectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ListEkmConnectionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*GetEkmConnectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*CreateEkmConnectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*UpdateEkmConnectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GetEkmConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*UpdateEkmConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Certificate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*EkmConnection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*EkmConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*VerifyConnectivityRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*VerifyConnectivityResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*EkmConnection_ServiceResolver); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go index d9bd076269..3f40fa3ef8 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/resources.proto @@ -408,11 +404,8 @@ func (KeyOperationAttestation_AttestationFormat) EnumDescriptor() ([]byte, []int // The suffix following `HMAC_` corresponds to the hash algorithm being used // (eg. SHA256). // -<<<<<<< HEAD // Algorithms beginning with `PQ_` are post-quantum. // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // For more information, see [Key purposes and algorithms] // (https://cloud.google.com/kms/docs/algorithms). type CryptoKeyVersion_CryptoKeyVersionAlgorithm int32 @@ -964,17 +957,9 @@ type KeyRing struct { func (x *KeyRing) Reset() { *x = KeyRing{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyRing) String() string { @@ -985,11 +970,7 @@ func (*KeyRing) ProtoMessage() {} func (x *KeyRing) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1100,11 +1081,7 @@ type CryptoKey struct { // where all related cryptographic operations are performed. Only applicable // if [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] have a // [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of -<<<<<<< HEAD // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC], with the -======= - // [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], with the ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // resource name in the format `projects/*/locations/*/ekmConnections/*`. // Note, this list is non-exhaustive and may apply to additional // [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future. @@ -1122,17 +1099,9 @@ type CryptoKey struct { func (x *CryptoKey) Reset() { *x = CryptoKey{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CryptoKey) String() string { @@ -1143,11 +1112,7 @@ func (*CryptoKey) ProtoMessage() {} func (x *CryptoKey) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1306,17 +1271,9 @@ type CryptoKeyVersionTemplate struct { func (x *CryptoKeyVersionTemplate) Reset() { *x = CryptoKeyVersionTemplate{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CryptoKeyVersionTemplate) String() string { @@ -1327,11 +1284,7 @@ func (*CryptoKeyVersionTemplate) ProtoMessage() {} func (x *CryptoKeyVersionTemplate) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1379,17 +1332,9 @@ type KeyOperationAttestation struct { func (x *KeyOperationAttestation) Reset() { *x = KeyOperationAttestation{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyOperationAttestation) String() string { @@ -1400,11 +1345,7 @@ func (*KeyOperationAttestation) ProtoMessage() {} func (x *KeyOperationAttestation) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1533,17 +1474,9 @@ type CryptoKeyVersion struct { func (x *CryptoKeyVersion) Reset() { *x = CryptoKeyVersion{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CryptoKeyVersion) String() string { @@ -1554,11 +1487,7 @@ func (*CryptoKeyVersion) ProtoMessage() {} func (x *CryptoKeyVersion) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1731,17 +1660,9 @@ type PublicKey struct { func (x *PublicKey) Reset() { *x = PublicKey{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKey) String() string { @@ -1752,11 +1673,7 @@ func (*PublicKey) ProtoMessage() {} func (x *PublicKey) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1890,17 +1807,9 @@ type ImportJob struct { func (x *ImportJob) Reset() { *x = ImportJob{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ImportJob) String() string { @@ -1911,11 +1820,7 @@ func (*ImportJob) ProtoMessage() {} func (x *ImportJob) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2023,17 +1928,9 @@ type ExternalProtectionLevelOptions struct { func (x *ExternalProtectionLevelOptions) Reset() { *x = ExternalProtectionLevelOptions{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ExternalProtectionLevelOptions) String() string { @@ -2044,11 +1941,7 @@ func (*ExternalProtectionLevelOptions) ProtoMessage() {} func (x *ExternalProtectionLevelOptions) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2097,17 +1990,9 @@ type KeyAccessJustificationsPolicy struct { func (x *KeyAccessJustificationsPolicy) Reset() { *x = KeyAccessJustificationsPolicy{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyAccessJustificationsPolicy) String() string { @@ -2118,11 +2003,7 @@ func (*KeyAccessJustificationsPolicy) ProtoMessage() {} func (x *KeyAccessJustificationsPolicy) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2162,17 +2043,9 @@ type KeyOperationAttestation_CertificateChains struct { func (x *KeyOperationAttestation_CertificateChains) Reset() { *x = KeyOperationAttestation_CertificateChains{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KeyOperationAttestation_CertificateChains) String() string { @@ -2183,11 +2056,7 @@ func (*KeyOperationAttestation_CertificateChains) ProtoMessage() {} func (x *KeyOperationAttestation_CertificateChains) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2241,17 +2110,9 @@ type ImportJob_WrappingPublicKey struct { func (x *ImportJob_WrappingPublicKey) Reset() { *x = ImportJob_WrappingPublicKey{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ImportJob_WrappingPublicKey) String() string { @@ -2262,11 +2123,7 @@ func (*ImportJob_WrappingPublicKey) ProtoMessage() {} func (x *ImportJob_WrappingPublicKey) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2764,27 +2621,16 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{ 0x4f, 0x44, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x45, 0x52, 0x54, 0x10, 0x0a, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x41, 0x55, 0x54, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, -<<<<<<< HEAD 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x0b, 0x42, 0x85, 0x01, 0x0a, -======= - 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x0b, 0x42, 0x88, 0x01, 0x0a, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, -<<<<<<< HEAD 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, - 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, - 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -2877,143 +2723,6 @@ func file_google_cloud_kms_v1_resources_proto_init() { if File_google_cloud_kms_v1_resources_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_google_cloud_kms_v1_resources_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*KeyRing); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*CryptoKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*CryptoKeyVersionTemplate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*KeyOperationAttestation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*CryptoKeyVersion); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*PublicKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ImportJob); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ExternalProtectionLevelOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*KeyAccessJustificationsPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*KeyOperationAttestation_CertificateChains); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_resources_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ImportJob_WrappingPublicKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_google_cloud_kms_v1_resources_proto_msgTypes[1].OneofWrappers = []any{ (*CryptoKey_RotationPeriod)(nil), } diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go index 4bee1e82c3..ce28760227 100644 --- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go +++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/cloud/kms/v1/service.proto @@ -82,17 +78,9 @@ type ListKeyRingsRequest struct { func (x *ListKeyRingsRequest) Reset() { *x = ListKeyRingsRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyRingsRequest) String() string { @@ -103,11 +91,7 @@ func (*ListKeyRingsRequest) ProtoMessage() {} func (x *ListKeyRingsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -194,17 +178,9 @@ type ListCryptoKeysRequest struct { func (x *ListCryptoKeysRequest) Reset() { *x = ListCryptoKeysRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeysRequest) String() string { @@ -215,11 +191,7 @@ func (*ListCryptoKeysRequest) ProtoMessage() {} func (x *ListCryptoKeysRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -314,17 +286,9 @@ type ListCryptoKeyVersionsRequest struct { func (x *ListCryptoKeyVersionsRequest) Reset() { *x = ListCryptoKeyVersionsRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeyVersionsRequest) String() string { @@ -335,11 +299,7 @@ func (*ListCryptoKeyVersionsRequest) ProtoMessage() {} func (x *ListCryptoKeyVersionsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -431,17 +391,9 @@ type ListImportJobsRequest struct { func (x *ListImportJobsRequest) Reset() { *x = ListImportJobsRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListImportJobsRequest) String() string { @@ -452,11 +404,7 @@ func (*ListImportJobsRequest) ProtoMessage() {} func (x *ListImportJobsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -526,17 +474,9 @@ type ListKeyRingsResponse struct { func (x *ListKeyRingsResponse) Reset() { *x = ListKeyRingsResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListKeyRingsResponse) String() string { @@ -547,11 +487,7 @@ func (*ListKeyRingsResponse) ProtoMessage() {} func (x *ListKeyRingsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -607,17 +543,9 @@ type ListCryptoKeysResponse struct { func (x *ListCryptoKeysResponse) Reset() { *x = ListCryptoKeysResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeysResponse) String() string { @@ -628,11 +556,7 @@ func (*ListCryptoKeysResponse) ProtoMessage() {} func (x *ListCryptoKeysResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -689,17 +613,9 @@ type ListCryptoKeyVersionsResponse struct { func (x *ListCryptoKeyVersionsResponse) Reset() { *x = ListCryptoKeyVersionsResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListCryptoKeyVersionsResponse) String() string { @@ -710,11 +626,7 @@ func (*ListCryptoKeyVersionsResponse) ProtoMessage() {} func (x *ListCryptoKeyVersionsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -770,17 +682,9 @@ type ListImportJobsResponse struct { func (x *ListImportJobsResponse) Reset() { *x = ListImportJobsResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListImportJobsResponse) String() string { @@ -791,11 +695,7 @@ func (*ListImportJobsResponse) ProtoMessage() {} func (x *ListImportJobsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -845,17 +745,9 @@ type GetKeyRingRequest struct { func (x *GetKeyRingRequest) Reset() { *x = GetKeyRingRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetKeyRingRequest) String() string { @@ -866,11 +758,7 @@ func (*GetKeyRingRequest) ProtoMessage() {} func (x *GetKeyRingRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -906,17 +794,9 @@ type GetCryptoKeyRequest struct { func (x *GetCryptoKeyRequest) Reset() { *x = GetCryptoKeyRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetCryptoKeyRequest) String() string { @@ -927,11 +807,7 @@ func (*GetCryptoKeyRequest) ProtoMessage() {} func (x *GetCryptoKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -967,17 +843,9 @@ type GetCryptoKeyVersionRequest struct { func (x *GetCryptoKeyVersionRequest) Reset() { *x = GetCryptoKeyVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetCryptoKeyVersionRequest) String() string { @@ -988,11 +856,7 @@ func (*GetCryptoKeyVersionRequest) ProtoMessage() {} func (x *GetCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1028,17 +892,9 @@ type GetPublicKeyRequest struct { func (x *GetPublicKeyRequest) Reset() { *x = GetPublicKeyRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetPublicKeyRequest) String() string { @@ -1049,11 +905,7 @@ func (*GetPublicKeyRequest) ProtoMessage() {} func (x *GetPublicKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1089,17 +941,9 @@ type GetImportJobRequest struct { func (x *GetImportJobRequest) Reset() { *x = GetImportJobRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetImportJobRequest) String() string { @@ -1110,11 +954,7 @@ func (*GetImportJobRequest) ProtoMessage() {} func (x *GetImportJobRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1157,17 +997,9 @@ type CreateKeyRingRequest struct { func (x *CreateKeyRingRequest) Reset() { *x = CreateKeyRingRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateKeyRingRequest) String() string { @@ -1178,11 +1010,7 @@ func (*CreateKeyRingRequest) ProtoMessage() {} func (x *CreateKeyRingRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1247,17 +1075,9 @@ type CreateCryptoKeyRequest struct { func (x *CreateCryptoKeyRequest) Reset() { *x = CreateCryptoKeyRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateCryptoKeyRequest) String() string { @@ -1268,11 +1088,7 @@ func (*CreateCryptoKeyRequest) ProtoMessage() {} func (x *CreateCryptoKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1333,17 +1149,9 @@ type CreateCryptoKeyVersionRequest struct { func (x *CreateCryptoKeyVersionRequest) Reset() { *x = CreateCryptoKeyVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateCryptoKeyVersionRequest) String() string { @@ -1354,11 +1162,7 @@ func (*CreateCryptoKeyVersionRequest) ProtoMessage() {} func (x *CreateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1412,13 +1216,9 @@ type ImportCryptoKeyVersionRequest struct { // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], the // [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] must be a child of // [ImportCryptoKeyVersionRequest.parent][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.parent], -<<<<<<< HEAD // have been previously created via // [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion], // and be in -======= - // have been previously created via [ImportCryptoKeyVersion][], and be in ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED] // or // [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED] @@ -1491,17 +1291,9 @@ type ImportCryptoKeyVersionRequest struct { func (x *ImportCryptoKeyVersionRequest) Reset() { *x = ImportCryptoKeyVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ImportCryptoKeyVersionRequest) String() string { @@ -1512,11 +1304,7 @@ func (*ImportCryptoKeyVersionRequest) ProtoMessage() {} func (x *ImportCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1616,17 +1404,9 @@ type CreateImportJobRequest struct { func (x *CreateImportJobRequest) Reset() { *x = CreateImportJobRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateImportJobRequest) String() string { @@ -1637,11 +1417,7 @@ func (*CreateImportJobRequest) ProtoMessage() {} func (x *CreateImportJobRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1692,17 +1468,9 @@ type UpdateCryptoKeyRequest struct { func (x *UpdateCryptoKeyRequest) Reset() { *x = UpdateCryptoKeyRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateCryptoKeyRequest) String() string { @@ -1713,11 +1481,7 @@ func (*UpdateCryptoKeyRequest) ProtoMessage() {} func (x *UpdateCryptoKeyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1762,17 +1526,9 @@ type UpdateCryptoKeyVersionRequest struct { func (x *UpdateCryptoKeyVersionRequest) Reset() { *x = UpdateCryptoKeyVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateCryptoKeyVersionRequest) String() string { @@ -1783,11 +1539,7 @@ func (*UpdateCryptoKeyVersionRequest) ProtoMessage() {} func (x *UpdateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1833,17 +1585,9 @@ type UpdateCryptoKeyPrimaryVersionRequest struct { func (x *UpdateCryptoKeyPrimaryVersionRequest) Reset() { *x = UpdateCryptoKeyPrimaryVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateCryptoKeyPrimaryVersionRequest) String() string { @@ -1854,11 +1598,7 @@ func (*UpdateCryptoKeyPrimaryVersionRequest) ProtoMessage() {} func (x *UpdateCryptoKeyPrimaryVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1901,17 +1641,9 @@ type DestroyCryptoKeyVersionRequest struct { func (x *DestroyCryptoKeyVersionRequest) Reset() { *x = DestroyCryptoKeyVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DestroyCryptoKeyVersionRequest) String() string { @@ -1922,11 +1654,7 @@ func (*DestroyCryptoKeyVersionRequest) ProtoMessage() {} func (x *DestroyCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1962,17 +1690,9 @@ type RestoreCryptoKeyVersionRequest struct { func (x *RestoreCryptoKeyVersionRequest) Reset() { *x = RestoreCryptoKeyVersionRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RestoreCryptoKeyVersionRequest) String() string { @@ -1983,11 +1703,7 @@ func (*RestoreCryptoKeyVersionRequest) ProtoMessage() {} func (x *RestoreCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2094,17 +1810,9 @@ type EncryptRequest struct { func (x *EncryptRequest) Reset() { *x = EncryptRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EncryptRequest) String() string { @@ -2115,11 +1823,7 @@ func (*EncryptRequest) ProtoMessage() {} func (x *EncryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2230,17 +1934,9 @@ type DecryptRequest struct { func (x *DecryptRequest) Reset() { *x = DecryptRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DecryptRequest) String() string { @@ -2251,11 +1947,7 @@ func (*DecryptRequest) ProtoMessage() {} func (x *DecryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2401,17 +2093,9 @@ type RawEncryptRequest struct { func (x *RawEncryptRequest) Reset() { *x = RawEncryptRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawEncryptRequest) String() string { @@ -2422,11 +2106,7 @@ func (*RawEncryptRequest) ProtoMessage() {} func (x *RawEncryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2568,17 +2248,9 @@ type RawDecryptRequest struct { func (x *RawDecryptRequest) Reset() { *x = RawDecryptRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawDecryptRequest) String() string { @@ -2589,11 +2261,7 @@ func (*RawDecryptRequest) ProtoMessage() {} func (x *RawDecryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2732,17 +2400,9 @@ type AsymmetricSignRequest struct { func (x *AsymmetricSignRequest) Reset() { *x = AsymmetricSignRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricSignRequest) String() string { @@ -2753,11 +2413,7 @@ func (*AsymmetricSignRequest) ProtoMessage() {} func (x *AsymmetricSignRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2846,17 +2502,9 @@ type AsymmetricDecryptRequest struct { func (x *AsymmetricDecryptRequest) Reset() { *x = AsymmetricDecryptRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricDecryptRequest) String() string { @@ -2867,11 +2515,7 @@ func (*AsymmetricDecryptRequest) ProtoMessage() {} func (x *AsymmetricDecryptRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2943,17 +2587,9 @@ type MacSignRequest struct { func (x *MacSignRequest) Reset() { *x = MacSignRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacSignRequest) String() string { @@ -2964,11 +2600,7 @@ func (*MacSignRequest) ProtoMessage() {} func (x *MacSignRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3048,12 +2680,8 @@ type MacVerifyRequest struct { // checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService] // will report an error if the checksum verification fails. If you receive a // checksum error, your client should verify that -<<<<<<< HEAD // CRC32C([MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac]) is // equal to -======= - // CRC32C([MacVerifyRequest.tag][]) is equal to ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c], // and if so, perform a limited number of retries. A persistent mismatch may // indicate an issue in your computation of the CRC32C checksum. Note: This @@ -3066,17 +2694,9 @@ type MacVerifyRequest struct { func (x *MacVerifyRequest) Reset() { *x = MacVerifyRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacVerifyRequest) String() string { @@ -3087,11 +2707,7 @@ func (*MacVerifyRequest) ProtoMessage() {} func (x *MacVerifyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3163,17 +2779,9 @@ type GenerateRandomBytesRequest struct { func (x *GenerateRandomBytesRequest) Reset() { *x = GenerateRandomBytesRequest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GenerateRandomBytesRequest) String() string { @@ -3184,11 +2792,7 @@ func (*GenerateRandomBytesRequest) ProtoMessage() {} func (x *GenerateRandomBytesRequest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3290,17 +2894,9 @@ type EncryptResponse struct { func (x *EncryptResponse) Reset() { *x = EncryptResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *EncryptResponse) String() string { @@ -3311,11 +2907,7 @@ func (*EncryptResponse) ProtoMessage() {} func (x *EncryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3410,17 +3002,9 @@ type DecryptResponse struct { func (x *DecryptResponse) Reset() { *x = DecryptResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DecryptResponse) String() string { @@ -3431,11 +3015,7 @@ func (*DecryptResponse) ProtoMessage() {} func (x *DecryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3574,17 +3154,9 @@ type RawEncryptResponse struct { func (x *RawEncryptResponse) Reset() { *x = RawEncryptResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawEncryptResponse) String() string { @@ -3595,11 +3167,7 @@ func (*RawEncryptResponse) ProtoMessage() {} func (x *RawEncryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3759,17 +3327,9 @@ type RawDecryptResponse struct { func (x *RawDecryptResponse) Reset() { *x = RawDecryptResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RawDecryptResponse) String() string { @@ -3780,11 +3340,7 @@ func (*RawDecryptResponse) ProtoMessage() {} func (x *RawDecryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3905,17 +3461,9 @@ type AsymmetricSignResponse struct { func (x *AsymmetricSignResponse) Reset() { *x = AsymmetricSignResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricSignResponse) String() string { @@ -3926,11 +3474,7 @@ func (*AsymmetricSignResponse) ProtoMessage() {} func (x *AsymmetricSignResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4033,17 +3577,9 @@ type AsymmetricDecryptResponse struct { func (x *AsymmetricDecryptResponse) Reset() { *x = AsymmetricDecryptResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AsymmetricDecryptResponse) String() string { @@ -4054,11 +3590,7 @@ func (*AsymmetricDecryptResponse) ProtoMessage() {} func (x *AsymmetricDecryptResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4150,17 +3682,9 @@ type MacSignResponse struct { func (x *MacSignResponse) Reset() { *x = MacSignResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacSignResponse) String() string { @@ -4171,11 +3695,7 @@ func (*MacSignResponse) ProtoMessage() {} func (x *MacSignResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4285,17 +3805,9 @@ type MacVerifyResponse struct { func (x *MacVerifyResponse) Reset() { *x = MacVerifyResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MacVerifyResponse) String() string { @@ -4306,11 +3818,7 @@ func (*MacVerifyResponse) ProtoMessage() {} func (x *MacVerifyResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4394,17 +3902,9 @@ type GenerateRandomBytesResponse struct { func (x *GenerateRandomBytesResponse) Reset() { *x = GenerateRandomBytesResponse{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GenerateRandomBytesResponse) String() string { @@ -4415,11 +3915,7 @@ func (*GenerateRandomBytesResponse) ProtoMessage() {} func (x *GenerateRandomBytesResponse) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4466,17 +3962,9 @@ type Digest struct { func (x *Digest) Reset() { *x = Digest{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Digest) String() string { @@ -4487,11 +3975,7 @@ func (*Digest) ProtoMessage() {} func (x *Digest) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4580,17 +4064,9 @@ type LocationMetadata struct { func (x *LocationMetadata) Reset() { *x = LocationMetadata{} -<<<<<<< HEAD mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LocationMetadata) String() string { @@ -4601,11 +4077,7 @@ func (*LocationMetadata) ProtoMessage() {} func (x *LocationMetadata) ProtoReflect() protoreflect.Message { mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5669,27 +5141,16 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, -<<<<<<< HEAD 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7c, -======= - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7f, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, -<<<<<<< HEAD 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -5880,527 +5341,6 @@ func file_google_cloud_kms_v1_service_proto_init() { return } file_google_cloud_kms_v1_resources_proto_init() -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_google_cloud_kms_v1_service_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ListKeyRingsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ListCryptoKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListCryptoKeyVersionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ListImportJobsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ListKeyRingsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListCryptoKeysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ListCryptoKeyVersionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ListImportJobsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*GetKeyRingRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*GetCryptoKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*GetCryptoKeyVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*GetPublicKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*GetImportJobRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*CreateKeyRingRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*CreateCryptoKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*CreateCryptoKeyVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ImportCryptoKeyVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*CreateImportJobRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UpdateCryptoKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*UpdateCryptoKeyVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*UpdateCryptoKeyPrimaryVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*DestroyCryptoKeyVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*RestoreCryptoKeyVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*EncryptRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DecryptRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*RawEncryptRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*RawDecryptRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*AsymmetricSignRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*AsymmetricDecryptRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*MacSignRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*MacVerifyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*GenerateRandomBytesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*EncryptResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*DecryptResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*RawEncryptResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*RawDecryptResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*AsymmetricSignResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*AsymmetricDecryptResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*MacSignResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*MacVerifyResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*GenerateRandomBytesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[41].Exporter = func(v any, i int) any { - switch v := v.(*Digest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_cloud_kms_v1_service_proto_msgTypes[42].Exporter = func(v any, i int) any { - switch v := v.(*LocationMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_google_cloud_kms_v1_service_proto_msgTypes[16].OneofWrappers = []any{ (*ImportCryptoKeyVersionRequest_RsaAesWrappedKey)(nil), } diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go index 8bf1733b74..b3799f0518 100644 --- a/vendor/cloud.google.com/go/kms/internal/version.go +++ b/vendor/cloud.google.com/go/kms/internal/version.go @@ -15,8 +15,4 @@ package internal // Version is the current tagged release of the library. -<<<<<<< HEAD const Version = "1.20.4" -======= -const Version = "1.20.1" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index a82766a95d..e90454d01a 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,7 +1,6 @@ # Changes -<<<<<<< HEAD ## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.49.0...storage/v1.50.0) (2025-01-09) @@ -44,8 +43,6 @@ * **storage:** Monitored resource detection ([#11197](https://github.com/googleapis/google-cloud-go/issues/11197)) ([911bcd8](https://github.com/googleapis/google-cloud-go/commit/911bcd8b1816256482bd52e85da7eaf00c315293)) * **storage:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.47.0...storage/v1.48.0) (2024-12-05) diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 550503301f..1ea1d98ce5 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -62,10 +62,7 @@ type storageClient interface { GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) -<<<<<<< HEAD MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Default Object ACL methods. @@ -111,11 +108,8 @@ type storageClient interface { ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error -<<<<<<< HEAD NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (*MultiRangeDownloader, error) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // settings contains transport-agnostic configuration for API calls made via @@ -269,12 +263,9 @@ type openWriterParams struct { // sendCRC32C - see `Writer.SendCRC32C`. // Optional. sendCRC32C bool -<<<<<<< HEAD // append - Write with appendable object semantics. // Optional. append bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Writer callbacks @@ -292,7 +283,6 @@ type openWriterParams struct { setObj func(*ObjectAttrs) } -<<<<<<< HEAD type newMultiRangeDownloaderParams struct { bucket string conds *Conditions @@ -302,8 +292,6 @@ type newMultiRangeDownloaderParams struct { handle *ReadHandle } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type newRangeReaderParams struct { bucket string conds *Conditions @@ -313,10 +301,7 @@ type newRangeReaderParams struct { object string offset int64 readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. -<<<<<<< HEAD handle *ReadHandle -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type getObjectParams struct { @@ -344,7 +329,6 @@ type restoreObjectParams struct { copySourceACL bool } -<<<<<<< HEAD type moveObjectParams struct { bucket, srcObject, dstObject string srcConds *Conditions @@ -352,8 +336,6 @@ type moveObjectParams struct { encryptionKey []byte } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type composeObjectRequest struct { dstBucket string dstObject destinationObject diff --git a/vendor/cloud.google.com/go/storage/experimental/experimental.go b/vendor/cloud.google.com/go/storage/experimental/experimental.go index 4e1427fe32..5bcc59ad2f 100644 --- a/vendor/cloud.google.com/go/storage/experimental/experimental.go +++ b/vendor/cloud.google.com/go/storage/experimental/experimental.go @@ -73,7 +73,6 @@ type ReadStallTimeoutConfig struct { // and retried. TargetPercentile float64 } -<<<<<<< HEAD // WithGRPCBidiReads provides an [option.ClientOption] that may be passed to // [cloud.google.com/go/storage.NewGRPCClient]. @@ -86,5 +85,3 @@ type ReadStallTimeoutConfig struct { func WithGRPCBidiReads() option.ClientOption { return internal.WithGRPCBidiReads.(func() option.ClientOption)() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index b9e176c8f9..2d243bf9fe 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -24,28 +24,18 @@ import ( "log" "net/url" "os" -<<<<<<< HEAD "sync" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" "cloud.google.com/go/storage/internal/apiv2/storagepb" -<<<<<<< HEAD -======= - "github.com/google/uuid" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" -<<<<<<< HEAD "google.golang.org/api/transport" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" @@ -115,12 +105,7 @@ func defaultGRPCOptions() []option.ClientOption { // Only enable DirectPath when the emulator is not being targeted. defaults = append(defaults, internaloption.EnableDirectPath(true), -<<<<<<< HEAD internaloption.EnableDirectPathXds()) -======= - internaloption.EnableDirectPathXds(), - internaloption.AllowNonDefaultServiceAccount(true)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return defaults @@ -131,7 +116,6 @@ func defaultGRPCOptions() []option.ClientOption { type grpcStorageClient struct { raw *gapic.Client settings *settings -<<<<<<< HEAD config *storageConfig } @@ -151,8 +135,6 @@ func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) return nil, fmt.Errorf("gRPC Metrics: %w", err) } return metricsContext, nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // newGRPCStorageClient initializes a new storageClient that uses the gRPC @@ -185,10 +167,7 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl return &grpcStorageClient{ raw: g, settings: s, -<<<<<<< HEAD config: &config, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -703,7 +682,6 @@ func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreOb return attrs, err } -<<<<<<< HEAD func (c *grpcStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := &storagepb.MoveObjectRequest{ @@ -734,8 +712,6 @@ func (c *grpcStorageClient) MoveObject(ctx context.Context, params *moveObjectPa return attrs, err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Default Object ACL methods. func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -1000,11 +976,7 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } -<<<<<<< HEAD if err := applySourceCondsProto("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil { -======= - if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } @@ -1040,11 +1012,7 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } -<<<<<<< HEAD // Custom codec to be used for unmarshaling BidiReadObjectResponse messages. -======= -// Custom codec to be used for unmarshaling ReadObjectResponse messages. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This is used to avoid a copy of object data in proto.Unmarshal. type bytesCodecV2 struct { } @@ -1068,11 +1036,7 @@ func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) { return data, nil } -<<<<<<< HEAD // Unmarshal is used for data received for BidiReadObjectResponse. We want to preserve -======= -// Unmarshal is used for data received for ReadObjectResponse. We want to preserve ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal. func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error { switch v := v.(type) { @@ -1093,7 +1057,6 @@ func (bytesCodecV2) Name() string { return "" } -<<<<<<< HEAD func contextMetadataFromBidiReadObject(req *storagepb.BidiReadObjectRequest) []string { if len(req.GetReadObjectSpec().GetRoutingToken()) > 0 { return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s&routing_token=%s", req.GetReadObjectSpec().GetBucket(), req.GetReadObjectSpec().GetRoutingToken())} @@ -1527,9 +1490,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return c.NewRangeReaderReadObject(ctx, params, opts...) } -======= -func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() @@ -1544,18 +1504,13 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } b := bucketResourceName(globalProjectAlias, params.bucket) -<<<<<<< HEAD // Create a BidiReadObjectRequest. spec := &storagepb.BidiReadObjectSpec{ -======= - req := &storagepb.ReadObjectRequest{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Bucket: b, Object: params.object, CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), } -<<<<<<< HEAD if err := applyCondsProto("gRPCReader.NewRangeReader", params.gen, params.conds, spec); err != nil { return nil, err } @@ -1568,12 +1523,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange ReadObjectSpec: spec, } ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...) -======= - // The default is a negative value, which means latest. - if params.gen >= 0 { - req.Generation = params.gen - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. @@ -1586,7 +1535,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange cc, cancel := context.WithCancel(ctx) -<<<<<<< HEAD // BidiReadObject can take multiple ranges, but we just request one in this case. readRange := &storagepb.ReadRange{ ReadOffset: params.offset + seen, @@ -1602,27 +1550,10 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.ReadRanges = []*storagepb.ReadRange{readRange} var stream storagepb.Storage_BidiReadObjectClient -======= - req.ReadOffset = params.offset + seen - - // Only set a ReadLimit if length is greater than zero, because <= 0 means - // to read it all. - if params.length > 0 { - req.ReadLimit = params.length - seen - } - - if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { - cancel() - return nil, nil, err - } - - var stream storagepb.Storage_ReadObjectClient ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var err error var decoder *readResponseDecoder err = run(cc, func(ctx context.Context) error { -<<<<<<< HEAD stream, err = c.raw.BidiReadObject(ctx, s.gax...) if err != nil { return err @@ -1634,24 +1565,13 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange if err := stream.CloseSend(); err != nil { return err } -======= - stream, err = c.raw.ReadObject(ctx, req, s.gax...) - if err != nil { - return err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Receive the message into databuf as a wire-encoded message so we can // use a custom decoder to avoid an extra copy at the protobuf layer. databufs := mem.BufferSlice{} err := stream.RecvMsg(&databufs) -<<<<<<< HEAD // These types of errors show up on the RecvMsg call, rather than the // initialization of the stream via BidiReadObject above. -======= - // These types of errors show up on the Recv call, rather than the - // initialization of the stream via ReadObject above. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } @@ -1678,33 +1598,21 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return nil, nil, err } -<<<<<<< HEAD return &readStreamResponse{ stream: stream, decoder: decoder, }, cancel, nil -======= - return &readStreamResponse{stream, decoder}, cancel, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } res, cancel, err := reopen(0) if err != nil { return nil, err } -<<<<<<< HEAD // The first message was Recv'd on stream open, use it to populate the // object metadata and read handle. msg := res.decoder.msg obj := msg.GetMetadata() handle := ReadHandle(msg.GetReadHandle().GetHandle()) -======= - - // The first message was Recv'd on stream open, use it to populate the - // object metadata. - msg := res.decoder.msg - obj := msg.GetMetadata() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() @@ -1713,18 +1621,13 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange wantCRC uint32 checkCRC bool ) -<<<<<<< HEAD if checksums := obj.GetChecksums(); checksums != nil && checksums.Crc32C != nil { -======= - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if params.offset == 0 && params.length < 0 { checkCRC = true } wantCRC = checksums.GetCrc32C() } -<<<<<<< HEAD startOffset := params.offset if params.offset < 0 { startOffset = size + params.offset @@ -1745,11 +1648,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange Attrs: ReaderObjectAttrs{ Size: size, StartOffset: startOffset, -======= - r = &Reader{ - Attrs: ReaderObjectAttrs{ - Size: size, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ContentType: obj.GetContentType(), ContentEncoding: obj.GetContentEncoding(), CacheControl: obj.GetCacheControl(), @@ -1758,10 +1656,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange Generation: obj.GetGeneration(), CRC32C: wantCRC, }, -<<<<<<< HEAD objectMetadata: &metadata, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reader: &gRPCReader{ stream: res.stream, reopen: reopen, @@ -1775,19 +1670,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange checkCRC: checkCRC, }, checkCRC: checkCRC, -<<<<<<< HEAD handle: &handle, remain: remain, -======= - } - - cr := msg.GetContentRange() - if cr != nil { - r.Attrs.StartOffset = cr.GetStart() - r.remain = cr.GetEnd() - cr.GetStart() - } else { - r.remain = size ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // For a zero-length request, explicitly close the stream and set remaining @@ -1801,33 +1685,16 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { -<<<<<<< HEAD var offset int64 errorf := params.setError setObj := params.setObj pr, pw := io.Pipe() s := callSettings(c.settings, opts...) -======= - s := callSettings(c.settings, opts...) - - var offset int64 - errorf := params.setError - progress := params.progress - setObj := params.setObj - - pr, pw := io.Pipe() - gw := newGRPCWriter(c, params, pr) - gw.settings = s - if s.userProject != "" { - gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This function reads the data sent to the pipe and sends sets of messages // on the gRPC client-stream as the buffer is filled. go func() { -<<<<<<< HEAD err := func() error { // Unless the user told us the content type, we have to determine it from // the first read. @@ -1878,72 +1745,6 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage errorf(err) pr.CloseWithError(err) close(params.donec) -======= - defer close(params.donec) - - // Loop until there is an error or the Object has been finalized. - for { - // Note: This blocks until either the buffer is full or EOF is read. - recvd, doneReading, err := gw.read() - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - - if params.attrs.Retention != nil { - // TO-DO: remove once ObjectRetention is available - see b/308194853 - err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") - errorf(err) - pr.CloseWithError(err) - return - } - // The chunk buffer is full, but there is no end in sight. This - // means that either: - // 1. A resumable upload will need to be used to send - // multiple chunks, until we are done reading data. Start a - // resumable upload if it has not already been started. - // 2. ChunkSize of zero may also have a full buffer, but a resumable - // session should not be initiated in this case. - if !doneReading && gw.upid == "" && params.chunkSize != 0 { - err = gw.startResumableUpload() - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - } - - o, off, err := gw.uploadBuffer(recvd, offset, doneReading, newUploadBufferRetryConfig(gw.settings)) - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - - // At this point, the current buffer has been uploaded. For resumable - // uploads and chunkSize = 0, capture the committed offset here in case - // the upload was not finalized and another chunk is to be uploaded. Call - // the progress function for resumable uploads only. - if gw.upid != "" || gw.chunkSize == 0 { - offset = off - } - if gw.upid != "" { - progress(offset) - } - - // When we are done reading data without errors, set the object and - // finish. - if doneReading { - // Build Object from server's response. - setObj(newObjectFromProto(o)) - return - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }() return pw, nil @@ -2063,7 +1864,6 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context } type readStreamResponse struct { -<<<<<<< HEAD stream storagepb.Storage_BidiReadObjectClient decoder *readResponseDecoder } @@ -2101,16 +1901,6 @@ type gRPCReader struct { seen, size int64 zeroRange bool stream storagepb.Storage_BidiReadObjectClient -======= - stream storagepb.Storage_ReadObjectClient - decoder *readResponseDecoder -} - -type gRPCReader struct { - seen, size int64 - zeroRange bool - stream storagepb.Storage_ReadObjectClient ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte currMsg *readResponseDecoder // decoder for the current message @@ -2264,10 +2054,6 @@ func (r *gRPCReader) Close() error { if r.cancel != nil { r.cancel() } -<<<<<<< HEAD -======= - r.stream = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.currMsg = nil return nil } @@ -2283,17 +2069,10 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. -<<<<<<< HEAD func (r *gRPCReader) recv() error { databufs := mem.BufferSlice{} err := r.stream.RecvMsg(&databufs) -======= -func (r *gRPCReader) recv() error { - databufs := mem.BufferSlice{} - err := r.stream.RecvMsg(&databufs) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { shouldRetry = r.settings.retry.shouldRetry @@ -2316,7 +2095,6 @@ func (r *gRPCReader) recv() error { // ReadObjectResponse field and subfield numbers. const ( -<<<<<<< HEAD // Top level fields. metadataField = protowire.Number(4) objectRangeDataField = protowire.Number(6) @@ -2328,14 +2106,6 @@ const ( // Nested in ObjectRangeData.ChecksummedData checksummedDataContentField = protowire.Number(1) checksummedDataCRC32CField = protowire.Number(2) -======= - checksummedDataField = protowire.Number(1) - checksummedDataContentField = protowire.Number(1) - checksummedDataCRC32CField = protowire.Number(2) - objectChecksumsField = protowire.Number(2) - contentRangeField = protowire.Number(3) - metadataField = protowire.Number(4) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // readResponseDecoder is a wrapper on the raw message, used to decode one message @@ -2348,15 +2118,9 @@ type readResponseDecoder struct { currBuf int // index of the current buffer being processed currOff uint64 // offset in the current buffer // Processed data -<<<<<<< HEAD msg *storagepb.BidiReadObjectResponse // processed response message with all fields other than object data populated dataOffsets bufferSliceOffsets // offsets of the object data in the message. done bool // true if the data has been completely read. -======= - msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated - dataOffsets bufferSliceOffsets // offsets of the object data in the message. - done bool // true if the data has been completely read. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type bufferSliceOffsets struct { @@ -2637,26 +2401,15 @@ func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) { return b, nil } -<<<<<<< HEAD // readFullObjectResponse returns the BidiReadObjectResponse that is encoded in the // wire-encoded message buffer b, or an error if the message is invalid. // This must be used on the first recv of an object as it may contain all fields // of BidiReadObjectResponse, and we use or pass on those fields to the user. -======= -// readFullObjectResponse returns the ReadObjectResponse that is encoded in the -// wire-encoded message buffer b, or an error if the message is invalid. -// This must be used on the first recv of an object as it may contain all fields -// of ReadObjectResponse, and we use or pass on those fields to the user. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This function is essentially identical to proto.Unmarshal, except it aliases // the data in the input []byte. If the proto library adds a feature to // Unmarshal that does that, this function can be dropped. func (d *readResponseDecoder) readFullObjectResponse() error { -<<<<<<< HEAD msg := &storagepb.BidiReadObjectResponse{} -======= - msg := &storagepb.ReadObjectResponse{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Loop over the entire message, extracting fields as we go. This does not // handle field concatenation, in which the contents of a single field @@ -2670,7 +2423,6 @@ func (d *readResponseDecoder) readFullObjectResponse() error { // Unmarshal the field according to its type. Only fields that are not // nil will be present. switch { -<<<<<<< HEAD // This is a repeated field, so it can occur more than once. But, for now // we can just take the first range per message since Reader only requests // a single range. @@ -2680,25 +2432,14 @@ func (d *readResponseDecoder) readFullObjectResponse() error { // The object data field was found. Initialize the data ranges assuming // exactly one range in the message. msg.ObjectDataRanges = []*storagepb.ObjectRangeData{{ChecksummedData: &storagepb.ChecksummedData{}, ReadRange: &storagepb.ReadRange{}}} -======= - case fieldNum == checksummedDataField && fieldType == protowire.BytesType: - // The ChecksummedData field was found. Initialize the struct. - msg.ChecksummedData = &storagepb.ChecksummedData{} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bytesFieldLen, err := d.consumeVarint() if err != nil { return fmt.Errorf("consuming bytes: %v", err) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var contentEndOff = d.off + bytesFieldLen for d.off < contentEndOff { gotNum, gotTyp, err := d.consumeTag() if err != nil { -<<<<<<< HEAD return fmt.Errorf("consuming objectRangeData tag: %w", err) } @@ -2757,64 +2498,11 @@ func (d *readResponseDecoder) readFullObjectResponse() error { buf, err := d.consumeBytesCopy() if err != nil { return fmt.Errorf("invalid BidiReadObjectResponse.Metadata: %v", err) -======= - return fmt.Errorf("consuming checksummedData tag: %w", err) - } - - switch { - case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: - // Get the offsets of the content bytes. - d.dataOffsets, err = d.consumeBytes() - if err != nil { - return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err) - } - case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: - v, err := d.consumeFixed32() - if err != nil { - return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err) - } - msg.ChecksummedData.Crc32C = &v - default: - err := d.consumeFieldValue(gotNum, gotTyp) - if err != nil { - return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err) - } - } - } - case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: - // The field was found. Initialize the struct. - msg.ObjectChecksums = &storagepb.ObjectChecksums{} - // Consume the bytes and copy them into a single buffer if they are split across buffers. - buf, err := d.consumeBytesCopy() - if err != nil { - return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err) - } - // Unmarshal. - if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil { - return err - } - case fieldNum == contentRangeField && fieldType == protowire.BytesType: - msg.ContentRange = &storagepb.ContentRange{} - buf, err := d.consumeBytesCopy() - if err != nil { - return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err) - } - if err := proto.Unmarshal(buf, msg.ContentRange); err != nil { - return err - } - case fieldNum == metadataField && fieldType == protowire.BytesType: - msg.Metadata = &storagepb.Object{} - - buf, err := d.consumeBytesCopy() - if err != nil { - return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := proto.Unmarshal(buf, msg.Metadata); err != nil { return err } -<<<<<<< HEAD case fieldNum == readHandleField && fieldType == protowire.BytesType: msg.ReadHandle = &storagepb.BidiReadHandle{} buf, err := d.consumeBytesCopy() @@ -2829,20 +2517,11 @@ func (d *readResponseDecoder) readFullObjectResponse() error { err := d.consumeFieldValue(fieldNum, fieldType) if err != nil { return fmt.Errorf("invalid field in BidiReadObjectResponse: %w", err) -======= - default: - err := d.consumeFieldValue(fieldNum, fieldType) - if err != nil { - return fmt.Errorf("invalid field in ReadObjectResponse: %w", err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } d.msg = msg -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -2862,7 +2541,6 @@ func (r *gRPCReader) reopenStream() error { return nil } -<<<<<<< HEAD func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, r io.Reader) (*gRPCWriter, error) { if params.attrs.Retention != nil { // TO-DO: remove once ObjectRetention is available - see b/308194853 @@ -2876,17 +2554,12 @@ func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, if params.chunkSize > 0 { size = params.chunkSize } -======= -func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { - size := params.chunkSize ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Round up chunksize to nearest 256KiB if size%googleapi.MinUploadChunkSize != 0 { size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize) } -<<<<<<< HEAD if s.userProject != "" { params.ctx = setUserProjectMetadata(params.ctx, s.userProject) } @@ -2898,13 +2571,6 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) // WriteObject doesn't support the generation condition, so use default. if err := applyCondsProto("WriteObject", defaultGen, params.conds, spec); err != nil { return nil, err -======= - // A completely bufferless upload is not possible as it is in JSON because - // the buffer must be provided to the message. However use the minimum size - // possible in this case. - if params.chunkSize == 0 { - size = googleapi.MinUploadChunkSize ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &gRPCWriter{ @@ -2915,7 +2581,6 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) bucket: params.bucket, attrs: params.attrs, conds: params.conds, -<<<<<<< HEAD spec: spec, encryptionKey: params.encryptionKey, settings: s, @@ -2925,13 +2590,6 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) forceEmptyContentType: params.forceEmptyContentType, append: params.append, }, nil -======= - encryptionKey: params.encryptionKey, - sendCRC32C: params.sendCRC32C, - chunkSize: params.chunkSize, - forceEmptyContentType: params.forceEmptyContentType, - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // gRPCWriter is a wrapper around the the gRPC client-stream API that manages @@ -2946,7 +2604,6 @@ type gRPCWriter struct { bucket string attrs *ObjectAttrs conds *Conditions -<<<<<<< HEAD spec *storagepb.WriteObjectSpec encryptionKey []byte settings *settings @@ -3124,47 +2781,10 @@ func (w *gRPCWriter) newGRPCResumableBidiWriteBufferSender() (*gRPCResumableBidi stream: stream, settings: w.settings, }, nil -======= - encryptionKey []byte - settings *settings - - sendCRC32C bool - chunkSize int - forceEmptyContentType bool - - // The gRPC client-stream used for sending buffers. - stream storagepb.Storage_BidiWriteObjectClient - - // The Resumable Upload ID started by a gRPC-based Writer. - upid string -} - -// startResumableUpload initializes a Resumable Upload with gRPC and sets the -// upload ID on the Writer. -func (w *gRPCWriter) startResumableUpload() error { - spec, err := w.writeObjectSpec() - if err != nil { - return err - } - req := &storagepb.StartResumableWriteRequest{ - WriteObjectSpec: spec, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), - } - // TODO: Currently the checksums are only sent on the request to initialize - // the upload, but in the future, we must also support sending it - // on the *last* message of the stream. - req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - return run(w.ctx, func(ctx context.Context) error { - upres, err := w.c.raw.StartResumableWrite(w.ctx, req) - w.upid = upres.GetUploadId() - return err - }, w.settings.retry, w.settings.idempotent) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // queryProgress is a helper that queries the status of the resumable upload // associated with the given upload ID. -<<<<<<< HEAD func (s *gRPCResumableBidiWriteBufferSender) queryProgress() (int64, error) { var persistedSize int64 err := run(s.ctx, func(ctx context.Context) error { @@ -3300,296 +2920,11 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (obj } } return -======= -func (w *gRPCWriter) queryProgress() (int64, error) { - var persistedSize int64 - err := run(w.ctx, func(ctx context.Context) error { - q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ - UploadId: w.upid, - }) - persistedSize = q.GetPersistedSize() - return err - }, w.settings.retry, true) - - // q.GetCommittedSize() will return 0 if q is nil. - return persistedSize, err -} - -// uploadBuffer uploads the buffer at the given offset using a bi-directional -// Write stream. It will open a new stream if necessary (on the first call or -// after resuming from failure). The resulting write offset after uploading the -// buffer is returned, as well as well as the final Object if the upload is -// completed. -// -// Returns object, persisted size, and any error that is not retriable. -func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool, retryConfig *uploadBufferRetryConfig) (*storagepb.Object, int64, error) { - var err error - var lastWriteOfEntireObject bool - - sent := 0 - writeOffset := start - - toWrite := w.buf[:recvd] - - // Send a request with as many bytes as possible. - // Loop until all bytes are sent. -sendBytes: // label this loop so that we can use a continue statement from a nested block - for { - bytesNotYetSent := recvd - sent - remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize - - if remainingDataFitsInSingleReq && doneReading { - lastWriteOfEntireObject = true - } - - // Send the maximum amount of bytes we can, unless we don't have that many. - bytesToSendInCurrReq := maxPerMessageWriteSize - if remainingDataFitsInSingleReq { - bytesToSendInCurrReq = bytesNotYetSent - } - - // Prepare chunk section for upload. - data := toWrite[sent : sent+bytesToSendInCurrReq] - - req := &storagepb.BidiWriteObjectRequest{ - Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{ - ChecksummedData: &storagepb.ChecksummedData{ - Content: data, - }, - }, - WriteOffset: writeOffset, - FinishWrite: lastWriteOfEntireObject, - Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, - StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, - } - - // Open a new stream if necessary and set the first_message field on - // the request. The first message on the WriteObject stream must either - // be the Object or the Resumable Upload ID. - if w.stream == nil { - hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))} - ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...) - ctx = setInvocationHeaders(ctx, retryConfig.invocationID, retryConfig.attempts) - - w.stream, err = w.c.raw.BidiWriteObject(ctx) - if err != nil { - return nil, 0, err - } - - if w.upid != "" { // resumable upload - req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid} - } else { // non-resumable - spec, err := w.writeObjectSpec() - if err != nil { - return nil, 0, err - } - req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{ - WriteObjectSpec: spec, - } - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) - // For a non-resumable upload, checksums must be sent in this message. - // TODO: Currently the checksums are only sent on the first message - // of the stream, but in the future, we must also support sending it - // on the *last* message of the stream (instead of the first). - req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - } - } - - err = w.stream.Send(req) - if err == io.EOF { - // err was io.EOF. The client-side of a stream only gets an EOF on Send - // when the backend closes the stream and wants to return an error - // status. - - // Receive from the stream Recv() until it returns a non-nil error - // to receive the server's status as an error. We may get multiple - // messages before the error due to buffering. - err = nil - for err == nil { - _, err = w.stream.Recv() - } - // Drop the stream reference as a new one will need to be created if - // we retry. - w.stream = nil - - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received. - err = retryConfig.retriable(w.ctx, err) - - if err == nil { - retryConfig.doBackOff(w.ctx) - - // TODO: Add test case for failure modes of querying progress. - writeOffset, err = w.determineOffset(start) - if err != nil { - return nil, 0, err - } - sent = int(writeOffset) - int(start) - - // Continue sending requests, opening a new stream and resending - // any bytes not yet persisted as per QueryWriteStatus - continue sendBytes - } - } - if err != nil { - return nil, 0, err - } - - // Update the immediate stream's sent total and the upload offset with - // the data sent. - sent += len(data) - writeOffset += int64(len(data)) - - // Not done sending data, do not attempt to commit it yet, loop around - // and send more data. - if recvd-sent > 0 { - continue sendBytes - } - - // The buffer has been uploaded and there is still more data to be - // uploaded, but this is not a resumable upload session. Therefore, - // don't check persisted data. - if !lastWriteOfEntireObject && w.chunkSize == 0 { - return nil, writeOffset, nil - } - - // Done sending the data in the buffer (remainingDataFitsInSingleReq - // should == true if we reach this code). - // If we are done sending the whole object, close the stream and get the final - // object. Otherwise, receive from the stream to confirm the persisted data. - if !lastWriteOfEntireObject { - resp, err := w.stream.Recv() - - if err != nil { - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - err = retryConfig.retriable(w.ctx, err) - if err != nil { - return nil, 0, err - } - - retryConfig.doBackOff(w.ctx) - writeOffset, err = w.determineOffset(start) - if err != nil { - return nil, 0, err - } - sent = int(writeOffset) - int(start) - - // Drop the stream reference as a new one will need to be created. - w.stream = nil - - continue sendBytes - } - - if resp.GetPersistedSize() != writeOffset { - // Retry if not all bytes were persisted. - writeOffset = resp.GetPersistedSize() - sent = int(writeOffset) - int(start) - continue sendBytes - } - } else { - // If the object is done uploading, close the send stream to signal - // to the server that we are done sending so that we can receive - // from the stream without blocking. - err = w.stream.CloseSend() - if err != nil { - // CloseSend() retries the send internally. It never returns an - // error in the current implementation, but we check it anyway in - // case that it does in the future. - return nil, 0, err - } - - // Stream receives do not block once send is closed, but we may not - // receive the response with the object right away; loop until we - // receive the object or error out. - var obj *storagepb.Object - for obj == nil { - resp, err := w.stream.Recv() - - if err != nil { - err = retryConfig.retriable(w.ctx, err) - if err != nil { - return nil, 0, err - } - retryConfig.doBackOff(w.ctx) - - writeOffset, err = w.determineOffset(start) - if err != nil { - return nil, 0, err - } - sent = int(writeOffset) - int(start) - w.stream = nil - continue sendBytes - } - - obj = resp.GetResource() - } - - // Even though we received the object response, continue reading - // until we receive a non-nil error, to ensure the stream does not - // leak even if the context isn't cancelled. See: - // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream - for err == nil { - _, err = w.stream.Recv() - } - - return obj, writeOffset, nil - } - - return nil, writeOffset, nil - } -} - -// determineOffset either returns the offset given to it in the case of a simple -// upload, or queries the write status in the case a resumable upload is being -// used. -func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { - // For a Resumable Upload, we must start from however much data - // was committed. - if w.upid != "" { - committed, err := w.queryProgress() - if err != nil { - return 0, err - } - offset = committed - } - return offset, nil -} - -// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's -// ObjectAttrs and applies its Conditions. This is only used for gRPC. -func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { - // To avoid modifying the ObjectAttrs embeded in the calling writer, deref - // the ObjectAttrs pointer to make a copy, then assign the desired name to - // the attribute. - attrs := *w.attrs - - spec := &storagepb.WriteObjectSpec{ - Resource: attrs.toProtoObject(w.bucket), - } - // WriteObject doesn't support the generation condition, so use default. - if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { - return nil, err - } - return spec, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // read copies the data in the reader to the given buffer and reports how much // data was read into the buffer and if there is no more data to read (EOF). -<<<<<<< HEAD -func (w *gRPCWriter) read() (int, bool, error) { -======= -// Furthermore, if the attrs.ContentType is unset, the first bytes of content -// will be sniffed for a matching content type unless forceEmptyContentType is enabled. func (w *gRPCWriter) read() (int, bool, error) { - if w.attrs.ContentType == "" && !w.forceEmptyContentType { - w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set n to -1 to start the Read loop. var n, recvd int = -1, 0 var err error @@ -3613,92 +2948,3 @@ func checkCanceled(err error) error { return err } -<<<<<<< HEAD -======= - -type uploadBufferRetryConfig struct { - attempts int - invocationID string - config *retryConfig - lastErr error -} - -func newUploadBufferRetryConfig(settings *settings) *uploadBufferRetryConfig { - config := settings.retry - - if config == nil { - config = defaultRetry.clone() - } - - if config.shouldRetry == nil { - config.shouldRetry = ShouldRetry - } - - if config.backoff == nil { - config.backoff = &gaxBackoff{} - } else { - config.backoff.SetMultiplier(settings.retry.backoff.GetMultiplier()) - config.backoff.SetInitial(settings.retry.backoff.GetInitial()) - config.backoff.SetMax(settings.retry.backoff.GetMax()) - } - - return &uploadBufferRetryConfig{ - attempts: 1, - invocationID: uuid.New().String(), - config: config, - } -} - -// retriable determines if a retry is necessary and if so returns a nil error; -// otherwise it returns the error to be surfaced to the user. -func (retry *uploadBufferRetryConfig) retriable(ctx context.Context, err error) error { - if err == nil { - // a nil err does not need to be retried - return nil - } - if err != context.Canceled && err != context.DeadlineExceeded { - retry.lastErr = err - } - - if retry.config.policy == RetryNever { - return err - } - - if retry.config.maxAttempts != nil && retry.attempts >= *retry.config.maxAttempts { - return fmt.Errorf("storage: retry failed after %v attempts; last error: %w", retry.attempts, err) - } - - retry.attempts++ - - // Explicitly check context cancellation so that we can distinguish between a - // DEADLINE_EXCEEDED error from the server and a user-set context deadline. - // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's - // sent by the server) in both cases. - ctxErr := ctx.Err() - if errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) { - if retry.lastErr != nil { - return fmt.Errorf("retry failed with %v; last error: %w", ctxErr, retry.lastErr) - } - return ctxErr - } - - if !retry.config.shouldRetry(err) { - return err - } - return nil -} - -// doBackOff pauses for the appropriate amount of time; it should be called after -// encountering a retriable error. -func (retry *uploadBufferRetryConfig) doBackOff(ctx context.Context) error { - p := retry.config.backoff.Pause() - - if ctxErr := gax.Sleep(ctx, p); ctxErr != nil { - if retry.lastErr != nil { - return fmt.Errorf("retry failed with %v; last error: %w", ctxErr, retry.lastErr) - } - return ctxErr - } - return nil -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go index 15ecfd7eca..f7bebd1def 100644 --- a/vendor/cloud.google.com/go/storage/grpc_metrics.go +++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go @@ -16,13 +16,8 @@ package storage import ( "context" -<<<<<<< HEAD "errors" "fmt" -======= - "fmt" - "log" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "time" @@ -34,13 +29,8 @@ import ( "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" "google.golang.org/api/option" -<<<<<<< HEAD "google.golang.org/grpc" "google.golang.org/grpc/experimental/stats" -======= - "google.golang.org/api/transport" - "google.golang.org/grpc" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/stats/opentelemetry" ) @@ -49,7 +39,6 @@ const ( metricPrefix = "storage.googleapis.com/client/" ) -<<<<<<< HEAD // Added to help with tests type storageMonitoredResource struct { project string @@ -240,8 +229,6 @@ func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.Resou return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func latencyHistogramBoundaries() []float64 { boundaries := []float64{} boundary := 0.0 @@ -281,73 +268,6 @@ func sizeHistogramBoundaries() []float64 { return boundaries } -<<<<<<< HEAD -======= -func metricFormatter(m metricdata.Metrics) string { - return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/") -} - -func gcpAttributeExpectedDefaults() []attribute.KeyValue { - return []attribute.KeyValue{ - {Key: "location", Value: attribute.StringValue("global")}, - {Key: "cloud_platform", Value: attribute.StringValue("unknown")}, - {Key: "host_id", Value: attribute.StringValue("unknown")}} -} - -// Added to help with tests -type preparedResource struct { - projectToUse string - resource *resource.Resource -} - -func newPreparedResource(ctx context.Context, project string, resourceOptions []resource.Option) (*preparedResource, error) { - detectedAttrs, err := resource.New(ctx, resourceOptions...) - if err != nil { - return nil, err - } - preparedResource := &preparedResource{} - s := detectedAttrs.Set() - p, present := s.Value("cloud.account.id") - if present { - preparedResource.projectToUse = p.AsString() - } else { - preparedResource.projectToUse = project - } - updates := []attribute.KeyValue{} - for _, kv := range gcpAttributeExpectedDefaults() { - if val, present := s.Value(kv.Key); !present || val.AsString() == "" { - updates = append(updates, attribute.KeyValue{Key: kv.Key, Value: kv.Value}) - } - } - r, err := resource.New( - ctx, - resource.WithAttributes( - attribute.KeyValue{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)}, - attribute.KeyValue{Key: "instance_id", Value: attribute.StringValue(uuid.New().String())}, - attribute.KeyValue{Key: "project_id", Value: attribute.StringValue(project)}, - attribute.KeyValue{Key: "api", Value: attribute.StringValue("grpc")}, - ), - resource.WithAttributes(detectedAttrs.Attributes()...), - // Last duplicate key / value wins - resource.WithAttributes(updates...), - ) - if err != nil { - return nil, err - } - preparedResource.resource = r - return preparedResource, nil -} - -type metricsContext struct { - // client options passed to gRPC channels - clientOpts []option.ClientOption - // instance of metric reader used by gRPC client-side metrics - provider *metric.MeterProvider - // clean func to call when closing gRPC client - close func() -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func createHistogramView(name string, boundaries []float64) metric.View { return metric.NewView(metric.Instrument{ Name: name, @@ -358,135 +278,6 @@ func createHistogramView(name string, boundaries []float64) metric.View { }) } -<<<<<<< HEAD func metricFormatter(m metricdata.Metrics) string { return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/") -======= -func newGRPCMetricContext(ctx context.Context, project string, config storageConfig) (*metricsContext, error) { - var exporter metric.Exporter - meterOpts := []metric.Option{} - if config.metricExporter != nil { - exporter = *config.metricExporter - } else { - preparedResource, err := newPreparedResource(ctx, project, []resource.Option{resource.WithDetectors(gcp.NewDetector())}) - if err != nil { - return nil, err - } - meterOpts = append(meterOpts, metric.WithResource(preparedResource.resource)) - // Implementation requires a project, if one is not determined possibly user - // credentials. Then we will fail stating gRPC Metrics require a project-id. - if project == "" && preparedResource.projectToUse == "" { - return nil, fmt.Errorf("google cloud project is required to start client-side metrics") - } - // If projectTouse isn't the same as project provided to Storage client, then - // emit a log stating which project is being used to emit metrics to. - if project != preparedResource.projectToUse { - log.Printf("The Project ID configured for metrics is %s, but the Project ID of the storage client is %s. Make sure that the service account in use has the required metric writing role (roles/monitoring.metricWriter) in the project projectIdToUse or metrics will not be written.", preparedResource.projectToUse, project) - } - meOpts := []mexporter.Option{ - mexporter.WithProjectID(preparedResource.projectToUse), - mexporter.WithMetricDescriptorTypeFormatter(metricFormatter), - mexporter.WithCreateServiceTimeSeries(), - mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"})} - exporter, err = mexporter.New(meOpts...) - if err != nil { - return nil, err - } - } - // Metric views update histogram boundaries to be relevant to GCS - // otherwise default OTel histogram boundaries are used. - metricViews := []metric.View{ - createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()), - createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()), - createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries()), - } - interval := time.Minute - if config.metricInterval > 0 { - interval = config.metricInterval - } - meterOpts = append(meterOpts, metric.WithReader(metric.NewPeriodicReader(&exporterLogSuppressor{exporter: exporter}, metric.WithInterval(interval))), - metric.WithView(metricViews...)) - provider := metric.NewMeterProvider(meterOpts...) - mo := opentelemetry.MetricsOptions{ - MeterProvider: provider, - Metrics: opentelemetry.DefaultMetrics().Add( - "grpc.lb.wrr.rr_fallback", - "grpc.lb.wrr.endpoint_weight_not_yet_usable", - "grpc.lb.wrr.endpoint_weight_stale", - "grpc.lb.wrr.endpoint_weights", - "grpc.lb.rls.cache_entries", - "grpc.lb.rls.cache_size", - "grpc.lb.rls.default_target_picks", - "grpc.lb.rls.target_picks", - "grpc.lb.rls.failed_picks"), - OptionalLabels: []string{"grpc.lb.locality"}, - } - opts := []option.ClientOption{ - option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})), - } - context := &metricsContext{ - clientOpts: opts, - provider: provider, - close: createShutdown(ctx, provider), - } - return context, nil -} - -func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) { - var project string - c, err := transport.Creds(ctx, s.clientOption...) - if err == nil { - project = c.ProjectID - } - // Enable client-side metrics for gRPC - metricsContext, err := newGRPCMetricContext(ctx, project, config) - if err != nil { - return nil, fmt.Errorf("gRPC Metrics: %w", err) - } - return metricsContext, nil -} - -func createShutdown(ctx context.Context, provider *metric.MeterProvider) func() { - return func() { - provider.Shutdown(ctx) - } -} - -// Silences permission errors after initial error is emitted to prevent -// chatty logs. -type exporterLogSuppressor struct { - exporter metric.Exporter - emittedFailure bool -} - -// Implements OTel SDK metric.Exporter interface to prevent noisy logs from -// lack of credentials after initial failure. -// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter -func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { - if err := e.exporter.Export(ctx, rm); err != nil && !e.emittedFailure { - if strings.Contains(err.Error(), "PermissionDenied") { - e.emittedFailure = true - return fmt.Errorf("gRPC metrics failed due permission issue: %w", err) - } - return err - } - return nil -} - -func (e *exporterLogSuppressor) Temporality(k metric.InstrumentKind) metricdata.Temporality { - return e.exporter.Temporality(k) -} - -func (e *exporterLogSuppressor) Aggregation(k metric.InstrumentKind) metric.Aggregation { - return e.exporter.Aggregation(k) -} - -func (e *exporterLogSuppressor) ForceFlush(ctx context.Context) error { - return e.exporter.ForceFlush(ctx) -} - -func (e *exporterLogSuppressor) Shutdown(ctx context.Context) error { - return e.exporter.Shutdown(ctx) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 8ce372a766..61b20555f4 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -34,10 +34,6 @@ import ( "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" -<<<<<<< HEAD -======= - "github.com/googleapis/gax-go/v2" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/googleapis/gax-go/v2/callctx" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" @@ -596,7 +592,6 @@ func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreOb return newObject(obj), err } -<<<<<<< HEAD func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) req := c.raw.Objects.Move(params.bucket, params.srcObject, params.dstObject).Context(ctx) @@ -622,8 +617,6 @@ func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectPa return newObject(obj), err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Default Object ACL methods. func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { @@ -829,11 +822,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } -<<<<<<< HEAD if err := applySourceConds("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil { -======= - if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } if s.userProject != "" { @@ -872,14 +861,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } -<<<<<<< HEAD // NewMultiRangeDownloader is not supported by http client. func (c *httpStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) { return nil, errMethodNotSupported } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() @@ -996,13 +982,10 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR } func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { -<<<<<<< HEAD if params.append { return nil, errors.New("storage: append not supported on HTTP Client; use gRPC") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s := callSettings(c.settings, opts...) errorf := params.setError setObj := params.setObj @@ -1076,17 +1059,7 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage } if useRetry { if s.retry != nil { -<<<<<<< HEAD call.WithRetry(s.retry.backoff, s.retry.shouldRetry) -======= - bo := &gax.Backoff{} - if s.retry.backoff != nil { - bo.Multiplier = s.retry.backoff.GetMultiplier() - bo.Initial = s.retry.backoff.GetInitial() - bo.Max = s.retry.backoff.GetMax() - } - call.WithRetry(bo, s.retry.shouldRetry) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { call.WithRetry(nil, nil) } @@ -1577,7 +1550,6 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen } } -<<<<<<< HEAD metadata := map[string]string{} for key, values := range res.Header { if len(values) > 0 && strings.HasPrefix(key, "X-Goog-Meta-") { @@ -1586,8 +1558,6 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) attrs := ReaderObjectAttrs{ Size: size, ContentType: res.Header.Get("Content-Type"), @@ -1601,18 +1571,11 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen Decompressed: res.Uncompressed || uncompressedByServer(res), } return &Reader{ -<<<<<<< HEAD Attrs: attrs, objectMetadata: &metadata, size: size, remain: remain, checkCRC: checkCRC, -======= - Attrs: attrs, - size: size, - remain: remain, - checkCRC: checkCRC, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reader: &httpReader{ reopen: reopen, body: body, diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index 10553cca10..03c3f8c170 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC -======= -// Copyright 2024 Google LLC ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go index 61145a195e..a51532f60f 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC -======= -// Copyright 2024 Google LLC ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index e46b441618..502fa56786 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC -======= -// Copyright 2024 Google LLC ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -68,20 +64,12 @@ // // The following is an example of making an API call with the newly created client, mentioned above. // -<<<<<<< HEAD // stream, err := c.BidiReadObject(ctx) -======= -// stream, err := c.BidiWriteObject(ctx) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // if err != nil { // // TODO: Handle error. // } // go func() { -<<<<<<< HEAD // reqs := []*storagepb.BidiReadObjectRequest{ -======= -// reqs := []*storagepb.BidiWriteObjectRequest{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // TODO: Create requests. // } // for _, req := range reqs { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index f89935cc8c..7e4d99ec91 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -10,14 +10,11 @@ "grpc": { "libraryClient": "Client", "rpcs": { -<<<<<<< HEAD "BidiReadObject": { "methods": [ "BidiReadObject" ] }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "BidiWriteObject": { "methods": [ "BidiWriteObject" @@ -78,14 +75,11 @@ "LockBucketRetentionPolicy" ] }, -<<<<<<< HEAD "MoveObject": { "methods": [ "MoveObject" ] }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "QueryWriteStatus": { "methods": [ "QueryWriteStatus" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go index dbd521c8c8..0de9b31f64 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC -======= -// Copyright 2024 Google LLC ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,7 +18,6 @@ package storage import ( "context" -<<<<<<< HEAD "log/slog" "github.com/googleapis/gax-go/v2/internallog/grpclog" @@ -33,12 +28,6 @@ import ( const serviceName = "storage.googleapis.com" -======= - - "google.golang.org/api/option" -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // For more information on implementing a client constructor hook, see // https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. type clientHookParams struct{} @@ -63,7 +52,6 @@ func DefaultAuthScopes() []string { "https://www.googleapis.com/auth/devstorage.read_write", } } -<<<<<<< HEAD func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) { var zero O @@ -75,5 +63,3 @@ func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(c logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp)) return resp, err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 3e134ae764..4a50254d89 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC -======= -// Copyright 2024 Google LLC ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,10 +19,7 @@ package storage import ( "context" "fmt" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "net/url" "regexp" @@ -64,10 +57,7 @@ type CallOptions struct { CancelResumableWrite []gax.CallOption GetObject []gax.CallOption ReadObject []gax.CallOption -<<<<<<< HEAD BidiReadObject []gax.CallOption -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UpdateObject []gax.CallOption WriteObject []gax.CallOption BidiWriteObject []gax.CallOption @@ -75,10 +65,7 @@ type CallOptions struct { RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption QueryWriteStatus []gax.CallOption -<<<<<<< HEAD MoveObject []gax.CallOption -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func defaultGRPCClientOptions() []option.ClientOption { @@ -292,7 +279,6 @@ func defaultCallOptions() *CallOptions { }) }), }, -<<<<<<< HEAD BidiReadObject: []gax.CallOption{ gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ @@ -305,8 +291,6 @@ func defaultCallOptions() *CallOptions { }) }), }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UpdateObject: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -396,7 +380,6 @@ func defaultCallOptions() *CallOptions { }) }), }, -<<<<<<< HEAD MoveObject: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -410,8 +393,6 @@ func defaultCallOptions() *CallOptions { }) }), }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -435,10 +416,7 @@ type internalClient interface { CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) -<<<<<<< HEAD BidiReadObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) @@ -446,10 +424,7 @@ type internalClient interface { RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) -<<<<<<< HEAD MoveObject(context.Context, *storagepb.MoveObjectRequest, ...gax.CallOption) (*storagepb.Object, error) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Client is a client for interacting with Cloud Storage API. @@ -569,7 +544,6 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject return c.internalClient.ComposeObject(ctx, req, opts...) } -<<<<<<< HEAD // DeleteObject deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the generation parameter is used, or // if soft delete (at https://cloud.google.com/storage/docs/soft-delete) is not @@ -590,14 +564,6 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject // Requires storage.objects.delete // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= -// DeleteObject deletes an object and its metadata. -// -// Deletions are normally permanent when versioning is disabled or whenever -// the generation parameter is used. However, if soft delete is enabled for -// the bucket, deleted objects can be restored using RestoreObject until the -// soft delete retention period has passed. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { return c.internalClient.DeleteObject(ctx, req, opts...) } @@ -619,7 +585,6 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel return c.internalClient.CancelResumableWrite(ctx, req, opts...) } -<<<<<<< HEAD // GetObject retrieves object metadata. // // IAM Permissions: @@ -628,14 +593,10 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. To return object ACLs, the authenticated user must also have // the storage.objects.getIamPolicy permission. -======= -// GetObject retrieves an object’s metadata. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { return c.internalClient.GetObject(ctx, req, opts...) } -<<<<<<< HEAD // ReadObject retrieves object data. // // IAM Permissions: @@ -643,14 +604,10 @@ func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, // Requires storage.objects.get // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= -// ReadObject reads an object’s data. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { return c.internalClient.ReadObject(ctx, req, opts...) } -<<<<<<< HEAD // BidiReadObject reads an object’s data. // // This is a bi-directional API with the added support for reading multiple @@ -674,8 +631,6 @@ func (c *Client) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (st return c.internalClient.BidiReadObject(ctx, opts...) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // UpdateObject updates an object’s metadata. // Equivalent to JSON API’s storage.objects.patch. func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { @@ -745,15 +700,12 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // Alternatively, the BidiWriteObject operation may be used to write an // object with controls over flushing and the ability to fetch the ability to // determine the current persisted size. -<<<<<<< HEAD // // IAM Permissions: // // Requires storage.objects.create // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { return c.internalClient.WriteObject(ctx, opts...) } @@ -778,7 +730,6 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s } // ListObjects retrieves a list of objects matching the criteria. -<<<<<<< HEAD // // IAM Permissions: // @@ -786,8 +737,6 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) // to use this method. To return object ACLs, the authenticated user must also // have the storage.objects.getIamPolicy permission. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { return c.internalClient.ListObjects(ctx, req, opts...) } @@ -798,7 +747,6 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject return c.internalClient.RewriteObject(ctx, req, opts...) } -<<<<<<< HEAD // StartResumableWrite starts a resumable write operation. This // method is part of the Resumable // upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -812,16 +760,10 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject // Requires storage.objects.create // IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= -// StartResumableWrite starts a resumable write. How long the write operation remains valid, and -// what happens when the write operation becomes invalid, are -// service-dependent. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { return c.internalClient.StartResumableWrite(ctx, req, opts...) } -<<<<<<< HEAD // QueryWriteStatus determines the persisted_size of an object that is being written. This // method is part of the resumable // upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -838,33 +780,16 @@ func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartRe // time to determine how much data has been logged for this object. // For any sequence of QueryWriteStatus() calls for a given // object name, the sequence of returned persisted_size values are -======= -// QueryWriteStatus determines the persisted_size for an object that is being written, which -// can then be used as the write_offset for the next Write() call. -// -// If the object does not exist (i.e., the object has been deleted, or the -// first Write() has not yet reached the service), this method returns the -// error NOT_FOUND. -// -// The client may call QueryWriteStatus() at any time to determine how -// much data has been processed for this object. This is useful if the -// client is buffering data and needs to know which data can be safely -// evicted. For any sequence of QueryWriteStatus() calls for a given -// object name, the sequence of returned persisted_size values will be ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // non-decreasing. func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { return c.internalClient.QueryWriteStatus(ctx, req, opts...) } -<<<<<<< HEAD // MoveObject moves the source object to the destination object in the same bucket. func (c *Client) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { return c.internalClient.MoveObject(ctx, req, opts...) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. // // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. @@ -880,11 +805,8 @@ type gRPCClient struct { // The x-goog-* metadata to be sent with each request. xGoogHeaders []string -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewClient creates a new storage client based on gRPC. @@ -932,10 +854,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error connPool: connPool, client: storagepb.NewStorageClient(connPool), CallOptions: &client.CallOptions, -<<<<<<< HEAD logger: internaloption.GetLogger(opts), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.setGoogleClientInfo() @@ -986,11 +905,7 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD _, err = executeRPC(ctx, c.client.DeleteBucket, req, settings.GRPC, c.logger, "DeleteBucket") -======= - _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) return err @@ -1014,11 +929,7 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.GetBucket, req, settings.GRPC, c.logger, "GetBucket") -======= - resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1048,11 +959,7 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.CreateBucket, req, settings.GRPC, c.logger, "CreateBucket") -======= - resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1090,11 +997,7 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.ListBuckets, req, settings.GRPC, c.logger, "ListBuckets") -======= - resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1138,11 +1041,7 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.LockBucketRetentionPolicy, req, settings.GRPC, c.logger, "LockBucketRetentionPolicy") -======= - resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1169,11 +1068,7 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy") -======= - resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1200,11 +1095,7 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy") -======= - resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1237,11 +1128,7 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions") -======= - resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1268,11 +1155,7 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.UpdateBucket, req, settings.GRPC, c.logger, "UpdateBucket") -======= - resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1299,11 +1182,7 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.ComposeObject, req, settings.GRPC, c.logger, "ComposeObject") -======= - resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1329,11 +1208,7 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD _, err = executeRPC(ctx, c.client.DeleteObject, req, settings.GRPC, c.logger, "DeleteObject") -======= - _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) return err @@ -1357,11 +1232,7 @@ func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreOb var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.RestoreObject, req, settings.GRPC, c.logger, "RestoreObject") -======= - resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1388,11 +1259,7 @@ func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.Ca var resp *storagepb.CancelResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.CancelResumableWrite, req, settings.GRPC, c.logger, "CancelResumableWrite") -======= - resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1419,11 +1286,7 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.GetObject, req, settings.GRPC, c.logger, "GetObject") -======= - resp, err = c.client.GetObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1450,7 +1313,6 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe var resp storagepb.Storage_ReadObjectClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "ReadObject") resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "ReadObject") @@ -1471,9 +1333,6 @@ func (c *gRPCClient) BidiReadObject(ctx context.Context, opts ...gax.CallOption) c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiReadObject") resp, err = c.client.BidiReadObject(ctx, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiReadObject") -======= - resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1500,11 +1359,7 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.UpdateObject, req, settings.GRPC, c.logger, "UpdateObject") -======= - resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1519,13 +1374,9 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "WriteObject") resp, err = c.client.WriteObject(ctx, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "WriteObject") -======= - resp, err = c.client.WriteObject(ctx, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1540,13 +1391,9 @@ func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiWriteObject") resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiWriteObject") -======= - resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1584,11 +1431,7 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.ListObjects, req, settings.GRPC, c.logger, "ListObjects") -======= - resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1635,11 +1478,7 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb var resp *storagepb.RewriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.RewriteObject, req, settings.GRPC, c.logger, "RewriteObject") -======= - resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1666,11 +1505,7 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta var resp *storagepb.StartResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.StartResumableWrite, req, settings.GRPC, c.logger, "StartResumableWrite") -======= - resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { @@ -1697,7 +1532,6 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW var resp *storagepb.QueryWriteStatusResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error -<<<<<<< HEAD resp, err = executeRPC(ctx, c.client.QueryWriteStatus, req, settings.GRPC, c.logger, "QueryWriteStatus") return err }, opts...) @@ -1726,9 +1560,6 @@ func (c *gRPCClient) MoveObject(ctx context.Context, req *storagepb.MoveObjectRe err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = executeRPC(ctx, c.client.MoveObject, req, settings.GRPC, c.logger, "MoveObject") -======= - resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err }, opts...) if err != nil { diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 14243c9322..7f286f3549 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v4.25.3 // source: google/storage/v2/storage.proto @@ -31,18 +27,11 @@ import ( iampb "cloud.google.com/go/iam/apiv1/iampb" _ "google.golang.org/genproto/googleapis/api/annotations" -<<<<<<< HEAD status "google.golang.org/genproto/googleapis/rpc/status" date "google.golang.org/genproto/googleapis/type/date" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status1 "google.golang.org/grpc/status" -======= - date "google.golang.org/genproto/googleapis/type/date" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -189,11 +178,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29, 0} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Request message for DeleteBucket. @@ -213,17 +198,9 @@ type DeleteBucketRequest struct { func (x *DeleteBucketRequest) Reset() { *x = DeleteBucketRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DeleteBucketRequest) String() string { @@ -234,11 +211,7 @@ func (*DeleteBucketRequest) ProtoMessage() {} func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,17 +269,9 @@ type GetBucketRequest struct { func (x *GetBucketRequest) Reset() { *x = GetBucketRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetBucketRequest) String() string { @@ -317,11 +282,7 @@ func (*GetBucketRequest) ProtoMessage() {} func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -396,17 +357,9 @@ type CreateBucketRequest struct { func (x *CreateBucketRequest) Reset() { *x = CreateBucketRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CreateBucketRequest) String() string { @@ -417,11 +370,7 @@ func (*CreateBucketRequest) ProtoMessage() {} func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -498,17 +447,9 @@ type ListBucketsRequest struct { func (x *ListBucketsRequest) Reset() { *x = ListBucketsRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListBucketsRequest) String() string { @@ -519,11 +460,7 @@ func (*ListBucketsRequest) ProtoMessage() {} func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -588,17 +525,9 @@ type ListBucketsResponse struct { func (x *ListBucketsResponse) Reset() { *x = ListBucketsResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListBucketsResponse) String() string { @@ -609,11 +538,7 @@ func (*ListBucketsResponse) ProtoMessage() {} func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -657,17 +582,9 @@ type LockBucketRetentionPolicyRequest struct { func (x *LockBucketRetentionPolicyRequest) Reset() { *x = LockBucketRetentionPolicyRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LockBucketRetentionPolicyRequest) String() string { @@ -678,11 +595,7 @@ func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -747,17 +660,9 @@ type UpdateBucketRequest struct { func (x *UpdateBucketRequest) Reset() { *x = UpdateBucketRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateBucketRequest) String() string { @@ -768,11 +673,7 @@ func (*UpdateBucketRequest) ProtoMessage() {} func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -864,17 +765,9 @@ type ComposeObjectRequest struct { func (x *ComposeObjectRequest) Reset() { *x = ComposeObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ComposeObjectRequest) String() string { @@ -885,11 +778,7 @@ func (*ComposeObjectRequest) ProtoMessage() {} func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -997,17 +886,9 @@ type DeleteObjectRequest struct { func (x *DeleteObjectRequest) Reset() { *x = DeleteObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DeleteObjectRequest) String() string { @@ -1018,11 +899,7 @@ func (*DeleteObjectRequest) ProtoMessage() {} func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1137,17 +1014,9 @@ type RestoreObjectRequest struct { func (x *RestoreObjectRequest) Reset() { *x = RestoreObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RestoreObjectRequest) String() string { @@ -1158,11 +1027,7 @@ func (*RestoreObjectRequest) ProtoMessage() {} func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[9] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1261,17 +1126,9 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CancelResumableWriteRequest) String() string { @@ -1282,11 +1139,7 @@ func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[10] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1318,17 +1171,9 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CancelResumableWriteResponse) String() string { @@ -1339,11 +1184,7 @@ func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[11] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1416,17 +1257,9 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ReadObjectRequest) String() string { @@ -1437,11 +1270,7 @@ func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[12] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1580,17 +1409,9 @@ type GetObjectRequest struct { func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GetObjectRequest) String() string { @@ -1601,11 +1422,7 @@ func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[13] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1723,17 +1540,9 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ReadObjectResponse) String() string { @@ -1744,11 +1553,7 @@ func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[14] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1791,7 +1596,6 @@ func (x *ReadObjectResponse) GetMetadata() *Object { return nil } -<<<<<<< HEAD // Describes the object to read in a BidiReadObject request. type BidiReadObjectSpec struct { state protoimpl.MessageState @@ -2568,8 +2372,6 @@ func (x *BidiWriteHandle) GetHandle() []byte { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes an attempt to insert an object, possibly over multiple requests. type WriteObjectSpec struct { state protoimpl.MessageState @@ -2605,27 +2407,16 @@ type WriteObjectSpec struct { // you must start the upload over from scratch, this time sending the correct // number of bytes. ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` -<<<<<<< HEAD // If true, the object will be created in appendable mode. // This field may only be set when using BidiWriteObject. Appendable *bool `protobuf:"varint,9,opt,name=appendable,proto3,oneof" json:"appendable,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectSpec) String() string { @@ -2635,13 +2426,8 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[26] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2653,11 +2439,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectSpec) GetResource() *Object { @@ -2709,7 +2491,6 @@ func (x *WriteObjectSpec) GetObjectSize() int64 { return 0 } -<<<<<<< HEAD func (x *WriteObjectSpec) GetAppendable() bool { if x != nil && x.Appendable != nil { return *x.Appendable @@ -2717,8 +2498,6 @@ func (x *WriteObjectSpec) GetAppendable() bool { return false } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Request message for WriteObject. type WriteObjectRequest struct { state protoimpl.MessageState @@ -2770,17 +2549,9 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectRequest) String() string { @@ -2790,13 +2561,8 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[27] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2808,11 +2574,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2927,17 +2689,9 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *WriteObjectResponse) String() string { @@ -2947,13 +2701,8 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[28] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2965,11 +2714,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -3013,7 +2758,6 @@ func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} -<<<<<<< HEAD // Describes an attempt to append to an object, possibly over multiple requests. type AppendObjectSpec struct { state protoimpl.MessageState @@ -3119,8 +2863,6 @@ func (x *AppendObjectSpec) GetWriteHandle() *BidiWriteHandle { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Request message for BidiWriteObject. type BidiWriteObjectRequest struct { state protoimpl.MessageState @@ -3133,10 +2875,7 @@ type BidiWriteObjectRequest struct { // // *BidiWriteObjectRequest_UploadId // *BidiWriteObjectRequest_WriteObjectSpec -<<<<<<< HEAD // *BidiWriteObjectRequest_AppendObjectSpec -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` // Required. The offset from the beginning of the object at which the data // should be written. @@ -3160,12 +2899,8 @@ type BidiWriteObjectRequest struct { Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service // don't match the specified checksums the call will fail. May only be -<<<<<<< HEAD // provided in the first request or the // last request (with finish_write set). -======= - // provided in last request (with finish_write set). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` // For each BidiWriteObjectRequest where state_lookup is `true` or the client // closes the stream, the service will send a BidiWriteObjectResponse @@ -3196,17 +2931,9 @@ type BidiWriteObjectRequest struct { func (x *BidiWriteObjectRequest) Reset() { *x = BidiWriteObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BidiWriteObjectRequest) String() string { @@ -3216,13 +2943,8 @@ func (x *BidiWriteObjectRequest) String() string { func (*BidiWriteObjectRequest) ProtoMessage() {} func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[30] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3234,11 +2956,7 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { @@ -3262,7 +2980,6 @@ func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { return nil } -<<<<<<< HEAD func (x *BidiWriteObjectRequest) GetAppendObjectSpec() *AppendObjectSpec { if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_AppendObjectSpec); ok { return x.AppendObjectSpec @@ -3270,8 +2987,6 @@ func (x *BidiWriteObjectRequest) GetAppendObjectSpec() *AppendObjectSpec { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { if x != nil { return x.WriteOffset @@ -3344,23 +3059,17 @@ type BidiWriteObjectRequest_WriteObjectSpec struct { WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` } -<<<<<<< HEAD type BidiWriteObjectRequest_AppendObjectSpec struct { // For appendable uploads. Describes the object to append to. AppendObjectSpec *AppendObjectSpec `protobuf:"bytes,11,opt,name=append_object_spec,json=appendObjectSpec,proto3,oneof"` } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} -<<<<<<< HEAD func (*BidiWriteObjectRequest_AppendObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type isBidiWriteObjectRequest_Data interface { isBidiWriteObjectRequest_Data() } @@ -3386,28 +3095,17 @@ type BidiWriteObjectResponse struct { // *BidiWriteObjectResponse_PersistedSize // *BidiWriteObjectResponse_Resource WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` -<<<<<<< HEAD // An optional write handle that will periodically be present in response // messages. Clients should save it for later use in establishing a new stream // if a connection is interrupted. WriteHandle *BidiWriteHandle `protobuf:"bytes,3,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BidiWriteObjectResponse) Reset() { *x = BidiWriteObjectResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BidiWriteObjectResponse) String() string { @@ -3417,13 +3115,8 @@ func (x *BidiWriteObjectResponse) String() string { func (*BidiWriteObjectResponse) ProtoMessage() {} func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[31] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3435,11 +3128,7 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { @@ -3463,7 +3152,6 @@ func (x *BidiWriteObjectResponse) GetResource() *Object { return nil } -<<<<<<< HEAD func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle { if x != nil { return x.WriteHandle @@ -3471,8 +3159,6 @@ func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type isBidiWriteObjectResponse_WriteStatus interface { isBidiWriteObjectResponse_WriteStatus() } @@ -3557,17 +3243,9 @@ type ListObjectsRequest struct { func (x *ListObjectsRequest) Reset() { *x = ListObjectsRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsRequest) String() string { @@ -3577,13 +3255,8 @@ func (x *ListObjectsRequest) String() string { func (*ListObjectsRequest) ProtoMessage() {} func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[32] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3595,11 +3268,7 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. func (*ListObjectsRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsRequest) GetParent() string { @@ -3708,17 +3377,9 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QueryWriteStatusRequest) String() string { @@ -3728,13 +3389,8 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[33] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3746,11 +3402,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -3784,17 +3436,9 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QueryWriteStatusResponse) String() string { @@ -3804,13 +3448,8 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[34] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3822,11 +3461,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -3983,17 +3618,9 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteObjectRequest) String() string { @@ -4003,13 +3630,8 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[35] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4021,11 +3643,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -4214,17 +3832,9 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteResponse) String() string { @@ -4234,13 +3844,8 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[36] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4252,11 +3857,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -4294,7 +3895,6 @@ func (x *RewriteResponse) GetResource() *Object { return nil } -<<<<<<< HEAD // Request message for MoveObject. type MoveObjectRequest struct { state protoimpl.MessageState @@ -4464,15 +4064,12 @@ func (x *MoveObjectRequest) GetIfMetagenerationNotMatch() int64 { return 0 } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Request message StartResumableWrite. type StartResumableWriteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -<<<<<<< HEAD // Required. Contains the information necessary to start a resumable write. WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` // A set of parameters common to Storage API requests related to an object. @@ -4482,33 +4079,14 @@ type StartResumableWriteRequest struct { // initiating a resumable upload with`StartResumableWriteRequest` or when // completing a write with `WriteObjectRequest` with // `finish_write` set to `true`. -======= - // Required. The destination bucket, object, and metadata, as well as any - // preconditions. - WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be used to validate the - // uploaded object. For each upload, object_checksums can be provided with - // either StartResumableWriteRequest or the WriteObjectRequest with - // finish_write set to `true`. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` } func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteRequest) String() string { @@ -4518,13 +4096,8 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[38] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4536,11 +4109,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -4570,32 +4139,19 @@ type StartResumableWriteResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -<<<<<<< HEAD // A unique identifier for the initiated resumable write operation. // As the ID grants write access, you should keep it confidential during // the upload to prevent unauthorized access and data tampering during your // upload. This ID should be included in subsequent `WriteObject` requests to // upload the object data. -======= - // The upload_id of the newly started resumable write operation. This - // value should be copied into the `WriteObjectRequest.upload_id` field. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` } func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteResponse) String() string { @@ -4605,13 +4161,8 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[39] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4623,11 +4174,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -4683,17 +4230,9 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateObjectRequest) String() string { @@ -4703,13 +4242,8 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[40] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4721,11 +4255,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *UpdateObjectRequest) GetObject() *Object { @@ -4803,17 +4333,9 @@ type CommonObjectRequestParams struct { func (x *CommonObjectRequestParams) Reset() { *x = CommonObjectRequestParams{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CommonObjectRequestParams) String() string { @@ -4823,13 +4345,8 @@ func (x *CommonObjectRequestParams) String() string { func (*CommonObjectRequestParams) ProtoMessage() {} func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[41] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4841,11 +4358,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { // Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { @@ -4878,17 +4391,9 @@ type ServiceConstants struct { func (x *ServiceConstants) Reset() { *x = ServiceConstants{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServiceConstants) String() string { @@ -4898,13 +4403,8 @@ func (x *ServiceConstants) String() string { func (*ServiceConstants) ProtoMessage() {} func (x *ServiceConstants) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[42] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4916,11 +4416,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. func (*ServiceConstants) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A bucket. @@ -5054,17 +4550,9 @@ type Bucket struct { func (x *Bucket) Reset() { *x = Bucket{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket) String() string { @@ -5074,13 +4562,8 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[43] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5092,11 +4575,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket) GetName() string { @@ -5358,17 +4837,9 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BucketAccessControl) String() string { @@ -5378,13 +4849,8 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[44] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5396,11 +4862,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BucketAccessControl) GetRole() string { @@ -5481,17 +4943,9 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ChecksummedData) String() string { @@ -5501,13 +4955,8 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[45] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5519,11 +4968,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ChecksummedData) GetContent() []byte { @@ -5563,17 +5008,9 @@ type ObjectChecksums struct { func (x *ObjectChecksums) Reset() { *x = ObjectChecksums{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectChecksums) String() string { @@ -5583,13 +5020,8 @@ func (x *ObjectChecksums) String() string { func (*ObjectChecksums) ProtoMessage() {} func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[46] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5601,11 +5033,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. func (*ObjectChecksums) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectChecksums) GetCrc32C() uint32 { @@ -5638,17 +5066,9 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CustomerEncryption) String() string { @@ -5658,13 +5078,8 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[47] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5676,11 +5091,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -5756,11 +5167,8 @@ type Object struct { // Output only. If this object is noncurrent, this is the time when the object // became noncurrent. DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` -<<<<<<< HEAD // Output only. The time when the object was finalized. FinalizeTime *timestamppb.Timestamp `protobuf:"bytes,36,opt,name=finalize_time,json=finalizeTime,proto3" json:"finalize_time,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. // If an object is stored without a Content-Type, it is served as @@ -5840,17 +5248,9 @@ type Object struct { func (x *Object) Reset() { *x = Object{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Object) String() string { @@ -5860,13 +5260,8 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[48] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5878,11 +5273,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Object) GetName() string { @@ -5983,7 +5374,6 @@ func (x *Object) GetDeleteTime() *timestamppb.Timestamp { return nil } -<<<<<<< HEAD func (x *Object) GetFinalizeTime() *timestamppb.Timestamp { if x != nil { return x.FinalizeTime @@ -5991,8 +5381,6 @@ func (x *Object) GetFinalizeTime() *timestamppb.Timestamp { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *Object) GetContentType() string { if x != nil { return x.ContentType @@ -6157,17 +5545,9 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectAccessControl) String() string { @@ -6177,13 +5557,8 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[49] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6195,11 +5570,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectAccessControl) GetRole() string { @@ -6283,17 +5654,9 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsResponse) String() string { @@ -6303,13 +5666,8 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[50] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6321,11 +5679,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -6363,17 +5717,9 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ProjectTeam) String() string { @@ -6383,13 +5729,8 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[51] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6401,11 +5742,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ProjectTeam) GetProjectNumber() string { @@ -6436,17 +5773,9 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Owner) String() string { @@ -6456,13 +5785,8 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[52] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6474,11 +5798,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Owner) GetEntity() string { @@ -6511,17 +5831,9 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ContentRange) String() string { @@ -6531,13 +5843,8 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[53] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6549,11 +5856,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ContentRange) GetStart() int64 { @@ -6594,17 +5897,9 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ComposeObjectRequest_SourceObject) String() string { @@ -6614,13 +5909,8 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[54] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6670,17 +5960,9 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { @@ -6690,13 +5972,8 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[55] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6730,17 +6007,9 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Billing) String() string { @@ -6750,13 +6019,8 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[56] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6768,11 +6032,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 0} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -6811,17 +6071,9 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Cors) String() string { @@ -6831,13 +6083,8 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[57] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6849,11 +6096,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 1} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Cors) GetOrigin() []string { @@ -6897,17 +6140,9 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Encryption) String() string { @@ -6917,13 +6152,8 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[58] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6935,11 +6165,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 2} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -6964,17 +6190,9 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig) String() string { @@ -6984,13 +6202,8 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[59] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7002,11 +6215,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -7037,17 +6246,9 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle) String() string { @@ -7057,13 +6258,8 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[60] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7075,11 +6271,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -7104,17 +6296,9 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Logging) String() string { @@ -7124,13 +6308,8 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[61] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7142,11 +6321,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 5} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Logging) GetLogBucket() string { @@ -7184,17 +6359,9 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_RetentionPolicy) String() string { @@ -7204,13 +6371,8 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[62] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7222,11 +6384,7 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 6} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -7266,17 +6424,9 @@ type Bucket_SoftDeletePolicy struct { func (x *Bucket_SoftDeletePolicy) Reset() { *x = Bucket_SoftDeletePolicy{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_SoftDeletePolicy) String() string { @@ -7286,13 +6436,8 @@ func (x *Bucket_SoftDeletePolicy) String() string { func (*Bucket_SoftDeletePolicy) ProtoMessage() {} func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[63] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7304,11 +6449,7 @@ func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 7} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { @@ -7339,17 +6480,9 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Versioning) String() string { @@ -7359,13 +6492,8 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[64] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7377,11 +6505,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 8} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Versioning) GetEnabled() bool { @@ -7414,17 +6538,9 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Website) String() string { @@ -7434,13 +6550,8 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[65] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7452,11 +6563,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 9} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -7487,17 +6594,9 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_CustomPlacementConfig) String() string { @@ -7507,13 +6606,8 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[66] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7525,11 +6619,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 10} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -7563,17 +6653,9 @@ type Bucket_Autoclass struct { func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Autoclass) String() string { @@ -7583,13 +6665,8 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[67] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7601,11 +6678,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 11} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -7648,17 +6721,9 @@ type Bucket_HierarchicalNamespace struct { func (x *Bucket_HierarchicalNamespace) Reset() { *x = Bucket_HierarchicalNamespace{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_HierarchicalNamespace) String() string { @@ -7668,13 +6733,8 @@ func (x *Bucket_HierarchicalNamespace) String() string { func (*Bucket_HierarchicalNamespace) ProtoMessage() {} func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[68] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7686,11 +6746,7 @@ func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 12} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { @@ -7718,17 +6774,9 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { @@ -7738,13 +6786,8 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[70] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7756,11 +6799,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 3, 0} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -7792,17 +6831,9 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule) String() string { @@ -7812,13 +6843,8 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[71] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7830,11 +6856,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -7867,17 +6889,9 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Action) String() string { @@ -7887,13 +6901,8 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[72] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7905,11 +6914,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 0} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -7980,17 +6985,9 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Condition) String() string { @@ -8000,13 +6997,8 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_storage_v2_storage_proto_msgTypes[73] if x != nil { -======= - mi := &file_google_storage_v2_storage_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -8018,11 +7010,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { -<<<<<<< HEAD return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} -======= - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30, 4, 0, 1} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -8127,7 +7115,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, -<<<<<<< HEAD 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -8225,15 +7212,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, -======= - 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, @@ -8241,7 +7219,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, -<<<<<<< HEAD 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, @@ -8496,255 +7473,12 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, -======= - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, - 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, - 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, - 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, - 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, - 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, - 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, - 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, - 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd3, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, - 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, -<<<<<<< HEAD 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, @@ -9119,197 +7853,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, -======= - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, - 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, - 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, - 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, - 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, - 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, - 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x8e, 0x06, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, - 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, - 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, - 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, - 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, - 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, - 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, - 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, - 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, - 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, - 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, - 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, - 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, - 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, - 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, - 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, - 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, - 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, @@ -9317,7 +7860,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, -<<<<<<< HEAD 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, @@ -9495,218 +8037,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x07, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, -======= - 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, - 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, - 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, - 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, - 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, - 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, - 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, - 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, - 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, - 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, - 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, - 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, - 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, - 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, - 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, - 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, - 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, - 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, - 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, - 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, - 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, - 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, - 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, - 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, - 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, - 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, @@ -9715,7 +8045,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, -<<<<<<< HEAD 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, @@ -10585,860 +8914,6 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, - 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, - 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, - 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, - 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, - 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, - 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, - 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, - 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, - 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, - 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, - 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, - 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, - 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, - 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, - 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, - 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, - 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, - 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, - 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, - 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, - 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, - 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, - 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, - 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, - 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, - 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, - 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, - 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, - 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, - 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, - 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, - 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, - 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, - 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, - 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, - 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, - 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, - 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, - 0x10, 0x01, 0x22, 0x86, 0x24, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, - 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, - 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, - 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, - 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, - 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, - 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, - 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, - 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, - 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, - 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, - 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, - 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, - 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, - 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, - 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, - 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, - 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, - 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, - 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, - 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, - 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, - 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, - 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, - 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, - 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, - 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, - 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, - 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, - 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, - 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, - 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, - 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, - 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, - 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, - 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, - 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, - 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, - 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, - 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, - 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, - 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, - 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, - 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, - 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, - 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, - 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, - 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, - 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, - 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, - 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2a, 0x07, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x32, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x97, 0x02, 0x0a, 0x13, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, - 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, - 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, - 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, - 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, - 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, - 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, - 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, - 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, - 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf7, 0x0d, 0x0a, 0x06, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, - 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, - 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, - 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, - 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, - 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, - 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, - 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x48, 0x02, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x48, 0x03, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, - 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, - 0x13, 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, - 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, - 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, - 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x48, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, - 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, - 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x8c, 0x1c, 0x0a, 0x07, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, - 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, - 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, - 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, - 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, - 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, - 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, - 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, - 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, - 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, - 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, - 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, - 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, - 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x1a, 0xa7, 0x02, - 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, - 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, - 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, - 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, - 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -11454,11 +8929,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -<<<<<<< HEAD var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 75) -======= -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 62) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var file_google_storage_v2_storage_proto_goTypes = []any{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -11476,7 +8947,6 @@ var file_google_storage_v2_storage_proto_goTypes = []any{ (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse -<<<<<<< HEAD (*BidiReadObjectSpec)(nil), // 16: google.storage.v2.BidiReadObjectSpec (*BidiReadObjectRequest)(nil), // 17: google.storage.v2.BidiReadObjectRequest (*BidiReadObjectResponse)(nil), // 18: google.storage.v2.BidiReadObjectResponse @@ -11715,210 +9185,6 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 112, // [112:112] is the sub-list for extension type_name 112, // [112:112] is the sub-list for extension extendee 0, // [0:112] is the sub-list for field type_name -======= - (*WriteObjectSpec)(nil), // 16: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 17: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 18: google.storage.v2.WriteObjectResponse - (*BidiWriteObjectRequest)(nil), // 19: google.storage.v2.BidiWriteObjectRequest - (*BidiWriteObjectResponse)(nil), // 20: google.storage.v2.BidiWriteObjectResponse - (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest - (*CommonObjectRequestParams)(nil), // 29: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 30: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 31: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 32: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 33: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 34: google.storage.v2.ObjectChecksums - (*CustomerEncryption)(nil), // 35: google.storage.v2.CustomerEncryption - (*Object)(nil), // 36: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 37: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 38: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 39: google.storage.v2.ProjectTeam - (*Owner)(nil), // 40: google.storage.v2.Owner - (*ContentRange)(nil), // 41: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 42: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 43: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 44: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 45: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 46: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 47: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 48: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 49: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 50: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_SoftDeletePolicy)(nil), // 51: google.storage.v2.Bucket.SoftDeletePolicy - (*Bucket_Versioning)(nil), // 52: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 53: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 54: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 55: google.storage.v2.Bucket.Autoclass - (*Bucket_HierarchicalNamespace)(nil), // 56: google.storage.v2.Bucket.HierarchicalNamespace - nil, // 57: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 58: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 59: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 60: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 61: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 62: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 63: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 64: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 65: google.protobuf.Duration - (*date.Date)(nil), // 66: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 67: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 68: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 69: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 70: google.protobuf.Empty - (*iampb.Policy)(nil), // 71: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 72: google.iam.v1.TestIamPermissionsResponse -} -var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 63, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 31, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 63, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 31, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 31, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 63, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 36, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 42, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 29, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 34, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 29, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 29, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 29, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 63, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 29, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 63, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 33, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 34, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 41, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 36, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 36, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 16, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 33, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 34, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 29, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 36, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 16, // 26: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 33, // 27: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 34, // 28: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 29, // 29: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 36, // 30: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object - 63, // 31: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 29, // 32: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 36, // 33: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 36, // 34: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 29, // 35: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 34, // 36: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 36, // 37: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 16, // 38: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 29, // 39: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 34, // 40: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 36, // 41: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 63, // 42: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 29, // 43: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 32, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 37, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 48, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 64, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 45, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 64, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 57, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 53, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 52, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 49, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 40, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 46, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 44, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 50, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 47, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 54, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 55, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 56, // 61: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace - 51, // 62: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy - 39, // 63: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 37, // 64: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 64, // 65: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 64, // 66: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 34, // 67: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 64, // 68: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 64, // 69: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 64, // 70: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 62, // 71: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 40, // 72: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 35, // 73: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 64, // 74: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 64, // 75: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp - 64, // 76: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp - 39, // 77: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 36, // 78: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 43, // 79: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 58, // 80: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 59, // 81: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 64, // 82: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 65, // 83: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 65, // 84: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration - 64, // 85: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp - 64, // 86: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 64, // 87: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp - 64, // 88: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 60, // 89: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 61, // 90: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 66, // 91: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 66, // 92: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 66, // 93: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 94: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 95: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 96: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 97: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 98: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 67, // 99: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 68, // 100: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 69, // 101: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 102: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 9, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 10, // 105: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest - 11, // 106: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 14, // 107: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 13, // 108: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 28, // 109: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 17, // 110: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 19, // 111: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest - 21, // 112: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 24, // 113: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 26, // 114: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 22, // 115: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 70, // 116: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 31, // 117: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 31, // 118: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 119: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 31, // 120: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 71, // 121: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 71, // 122: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 72, // 123: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 31, // 124: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 36, // 125: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 70, // 126: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 36, // 127: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object - 12, // 128: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 36, // 129: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 15, // 130: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 36, // 131: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 18, // 132: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 20, // 133: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse - 38, // 134: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 25, // 135: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 27, // 136: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 23, // 137: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 116, // [116:138] is the sub-list for method output_type - 94, // [94:116] is the sub-list for method input_type - 94, // [94:94] is the sub-list for extension type_name - 94, // [94:94] is the sub-list for extension extendee - 0, // [0:94] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_storage_v2_storage_proto_init() } @@ -11926,731 +9192,6 @@ func file_google_storage_v2_storage_proto_init() { if File_google_storage_v2_storage_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DeleteBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GetBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*CreateBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ListBucketsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ListBucketsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*LockBucketRetentionPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*UpdateBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ComposeObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*DeleteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*RestoreObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*CancelResumableWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*CancelResumableWriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*ReadObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*GetObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ReadObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*WriteObjectSpec); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*WriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*WriteObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*BidiWriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*BidiWriteObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*ListObjectsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*QueryWriteStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*QueryWriteStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*RewriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*RewriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*StartResumableWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*StartResumableWriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*UpdateObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*CommonObjectRequestParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*ServiceConstants); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*BucketAccessControl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*ChecksummedData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*ObjectChecksums); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*CustomerEncryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*Object); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*ObjectAccessControl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*ListObjectsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*ProjectTeam); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*Owner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*ContentRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any { - switch v := v.(*ComposeObjectRequest_SourceObject); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Billing); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Cors); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Encryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_IamConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Lifecycle); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Logging); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_RetentionPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_SoftDeletePolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Versioning); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Website); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_CustomPlacementConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Autoclass); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_HierarchicalNamespace); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Lifecycle_Rule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Lifecycle_Rule_Action); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v any, i int) any { - switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{} @@ -12661,19 +9202,14 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{} -<<<<<<< HEAD file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[26].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{ -======= - file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []any{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } -<<<<<<< HEAD file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), @@ -12704,46 +9240,13 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[63].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[67].OneofWrappers = []any{} file_google_storage_v2_storage_proto_msgTypes[73].OneofWrappers = []any{} -======= - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{ - (*WriteObjectResponse_PersistedSize)(nil), - (*WriteObjectResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{ - (*BidiWriteObjectRequest_UploadId)(nil), - (*BidiWriteObjectRequest_WriteObjectSpec)(nil), - (*BidiWriteObjectRequest_ChecksummedData)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{ - (*BidiWriteObjectResponse_PersistedSize)(nil), - (*BidiWriteObjectResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{ - (*QueryWriteStatusResponse_PersistedSize)(nil), - (*QueryWriteStatusResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[33].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[35].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[54].OneofWrappers = []any{} - file_google_storage_v2_storage_proto_msgTypes[60].OneofWrappers = []any{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, -<<<<<<< HEAD NumMessages: 75, -======= - NumMessages: 62, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NumExtensions: 0, NumServices: 1, }, @@ -12801,7 +9304,6 @@ type StorageClient interface { // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) -<<<<<<< HEAD // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the generation parameter is used, or // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not @@ -12822,14 +9324,6 @@ type StorageClient interface { // Requires `storage.objects.delete` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= - // Deletes an object and its metadata. - // - // Deletions are normally permanent when versioning is disabled or whenever - // the generation parameter is used. However, if soft delete is enabled for - // the bucket, deleted objects can be restored using RestoreObject until the - // soft delete retention period has passed. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Restores a soft-deleted object. RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -12842,7 +9336,6 @@ type StorageClient interface { // they could either complete before the cancellation or fail if the // cancellation completes first. CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) -<<<<<<< HEAD // Retrieves object metadata. // // **IAM Permissions**: @@ -12880,12 +9373,6 @@ type StorageClient interface { // This API is currently in preview and is not yet available for general // use. BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error) -======= - // Retrieves an object's metadata. - GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Reads an object's data. - ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Updates an object's metadata. // Equivalent to JSON API's storage.objects.patch. UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -12942,25 +9429,18 @@ type StorageClient interface { // whether the service views the object as complete. // // Attempting to resume an already finalized object will result in an OK -<<<<<<< HEAD // status, with a `WriteObjectResponse` containing the finalized object's -======= - // status, with a WriteObjectResponse containing the finalized object's ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // metadata. // // Alternatively, the BidiWriteObject operation may be used to write an // object with controls over flushing and the ability to fetch the ability to // determine the current persisted size. -<<<<<<< HEAD // // **IAM Permissions**: // // Requires `storage.objects.create` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) // Stores a new object and metadata. // @@ -12979,7 +9459,6 @@ type StorageClient interface { // always be sent to the client, regardless of the value of `state_lookup`. BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) // Retrieves a list of objects matching the criteria. -<<<<<<< HEAD // // **IAM Permissions**: // @@ -12987,13 +9466,10 @@ type StorageClient interface { // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) // to use this method. To return object ACLs, the authenticated user must also // have the `storage.objects.getIamPolicy` permission. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides // metadata. RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) -<<<<<<< HEAD // Starts a resumable write operation. This // method is part of the [Resumable // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -13028,26 +9504,6 @@ type StorageClient interface { QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) // Moves the source object to the destination object in the same bucket. MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error) -======= - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) - // Determines the `persisted_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `persisted_size` values will be - // non-decreasing. - QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type storageClient struct { @@ -13216,7 +9672,6 @@ func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { return m, nil } -<<<<<<< HEAD func (c *storageClient) BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error) { stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/BidiReadObject", opts...) if err != nil { @@ -13248,8 +9703,6 @@ func (x *storageBidiReadObjectClient) Recv() (*BidiReadObjectResponse, error) { return m, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) @@ -13260,11 +9713,7 @@ func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectReques } func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { -<<<<<<< HEAD stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/WriteObject", opts...) -======= - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13298,11 +9747,7 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) } func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { -<<<<<<< HEAD stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[3], "/google.storage.v2.Storage/BidiWriteObject", opts...) -======= - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13368,7 +9813,6 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat return out, nil } -<<<<<<< HEAD func (c *storageClient) MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error) { out := new(Object) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/MoveObject", in, out, opts...) @@ -13378,8 +9822,6 @@ func (c *storageClient) MoveObject(ctx context.Context, in *MoveObjectRequest, o return out, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. @@ -13413,7 +9855,6 @@ type StorageServer interface { // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) -<<<<<<< HEAD // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the generation parameter is used, or // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not @@ -13434,14 +9875,6 @@ type StorageServer interface { // Requires `storage.objects.delete` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= - // Deletes an object and its metadata. - // - // Deletions are normally permanent when versioning is disabled or whenever - // the generation parameter is used. However, if soft delete is enabled for - // the bucket, deleted objects can be restored using RestoreObject until the - // soft delete retention period has passed. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) // Restores a soft-deleted object. RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) @@ -13454,7 +9887,6 @@ type StorageServer interface { // they could either complete before the cancellation or fail if the // cancellation completes first. CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) -<<<<<<< HEAD // Retrieves object metadata. // // **IAM Permissions**: @@ -13492,12 +9924,6 @@ type StorageServer interface { // This API is currently in preview and is not yet available for general // use. BidiReadObject(Storage_BidiReadObjectServer) error -======= - // Retrieves an object's metadata. - GetObject(context.Context, *GetObjectRequest) (*Object, error) - // Reads an object's data. - ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Updates an object's metadata. // Equivalent to JSON API's storage.objects.patch. UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) @@ -13554,25 +9980,18 @@ type StorageServer interface { // whether the service views the object as complete. // // Attempting to resume an already finalized object will result in an OK -<<<<<<< HEAD // status, with a `WriteObjectResponse` containing the finalized object's -======= - // status, with a WriteObjectResponse containing the finalized object's ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // metadata. // // Alternatively, the BidiWriteObject operation may be used to write an // object with controls over flushing and the ability to fetch the ability to // determine the current persisted size. -<<<<<<< HEAD // // **IAM Permissions**: // // Requires `storage.objects.create` // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on // the bucket. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WriteObject(Storage_WriteObjectServer) error // Stores a new object and metadata. // @@ -13591,7 +10010,6 @@ type StorageServer interface { // always be sent to the client, regardless of the value of `state_lookup`. BidiWriteObject(Storage_BidiWriteObjectServer) error // Retrieves a list of objects matching the criteria. -<<<<<<< HEAD // // **IAM Permissions**: // @@ -13599,13 +10017,10 @@ type StorageServer interface { // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) // to use this method. To return object ACLs, the authenticated user must also // have the `storage.objects.getIamPolicy` permission. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides // metadata. RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) -<<<<<<< HEAD // Starts a resumable write operation. This // method is part of the [Resumable // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature. @@ -13640,26 +10055,6 @@ type StorageServer interface { QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) // Moves the source object to the destination object in the same bucket. MoveObject(context.Context, *MoveObjectRequest) (*Object, error) -======= - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) - // Determines the `persisted_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `persisted_size` values will be - // non-decreasing. - QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // UnimplementedStorageServer can be embedded to have forward compatible implementations. @@ -13667,7 +10062,6 @@ type UnimplementedStorageServer struct { } func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { -<<<<<<< HEAD return nil, status1.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") } func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { @@ -13738,72 +10132,6 @@ func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWrite } func (*UnimplementedStorageServer) MoveObject(context.Context, *MoveObjectRequest) (*Object, error) { return nil, status1.Errorf(codes.Unimplemented, "method MoveObject not implemented") -======= - return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") -} -func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") -} -func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") -} -func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") -} -func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") -} -func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") -} -func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") -} -func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") -} -func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") -} -func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") -} -func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") -} -func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") -} -func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") -} -func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") -} -func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { - return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") -} -func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") -} -func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { - return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") -} -func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { - return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") -} -func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") -} -func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") -} -func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") -} -func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func RegisterStorageServer(s *grpc.Server, srv StorageServer) { @@ -14083,7 +10411,6 @@ func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { return x.ServerStream.SendMsg(m) } -<<<<<<< HEAD func _Storage_BidiReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(StorageServer).BidiReadObject(&storageBidiReadObjectServer{stream}) } @@ -14110,8 +10437,6 @@ func (x *storageBidiReadObjectServer) Recv() (*BidiReadObjectRequest, error) { return m, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateObjectRequest) if err := dec(in); err != nil { @@ -14254,7 +10579,6 @@ func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -<<<<<<< HEAD func _Storage_MoveObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MoveObjectRequest) if err := dec(in); err != nil { @@ -14273,8 +10597,6 @@ func _Storage_MoveObject_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _Storage_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.storage.v2.Storage", HandlerType: (*StorageServer)(nil), @@ -14355,13 +10677,10 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "QueryWriteStatus", Handler: _Storage_QueryWriteStatus_Handler, }, -<<<<<<< HEAD { MethodName: "MoveObject", Handler: _Storage_MoveObject_Handler, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, Streams: []grpc.StreamDesc{ { @@ -14370,15 +10689,12 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, { -<<<<<<< HEAD StreamName: "BidiReadObject", Handler: _Storage_BidiReadObject_Handler, ServerStreams: true, ClientStreams: true, }, { -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) StreamName: "WriteObject", Handler: _Storage_WriteObject_Handler, ClientStreams: true, diff --git a/vendor/cloud.google.com/go/storage/internal/experimental.go b/vendor/cloud.google.com/go/storage/internal/experimental.go index 1483c46f5c..2fd5111fb3 100644 --- a/vendor/cloud.google.com/go/storage/internal/experimental.go +++ b/vendor/cloud.google.com/go/storage/internal/experimental.go @@ -29,11 +29,8 @@ var ( // WithReadStallTimeout is a function which is implemented by storage package. // It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption. WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption -<<<<<<< HEAD // WithGRPCBidiReads is a function which is implemented by the storage package. // It sets the gRPC client to use the BidiReadObject API for downloads. WithGRPCBidiReads any // func() option.ClientOption -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index b923404b74..ba56cacd8e 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,8 +15,4 @@ package internal // Version is the current tagged release of the library. -<<<<<<< HEAD const Version = "1.50.0" -======= -const Version = "1.48.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index 985310ae82..99783f3df4 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -58,15 +58,9 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry } bo := gax.Backoff{} if retry.backoff != nil { -<<<<<<< HEAD bo.Multiplier = retry.backoff.Multiplier bo.Initial = retry.backoff.Initial bo.Max = retry.backoff.Max -======= - bo.Multiplier = retry.backoff.GetMultiplier() - bo.Initial = retry.backoff.GetInitial() - bo.Max = retry.backoff.GetMax() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var errorFunc func(err error) bool = ShouldRetry if retry.shouldRetry != nil { diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index 467719b3a0..16d57644aa 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -40,10 +40,7 @@ func init() { storageinternal.WithMetricExporter = withMetricExporter storageinternal.WithMetricInterval = withMetricInterval storageinternal.WithReadStallTimeout = withReadStallTimeout -<<<<<<< HEAD storageinternal.WithGRPCBidiReads = withGRPCBidiReads -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable. @@ -83,13 +80,9 @@ type storageConfig struct { disableClientMetrics bool metricExporter *metric.Exporter metricInterval time.Duration -<<<<<<< HEAD manualReader *metric.ManualReader readStallTimeoutConfig *experimental.ReadStallTimeoutConfig grpcBidiReads bool -======= - readStallTimeoutConfig *experimental.ReadStallTimeoutConfig ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // newStorageConfig generates a new storageConfig with all the given @@ -202,7 +195,6 @@ func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) { c.metricExporter = w.metricExporter } -<<<<<<< HEAD type withTestMetricReaderConfig struct { internaloption.EmbeddableAdapter // reader override @@ -217,8 +209,6 @@ func (w *withTestMetricReaderConfig) ApplyStorageOpt(c *storageConfig) { c.manualReader = w.metricReader } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithReadStallTimeout is an option that may be passed to [NewClient]. // It enables the client to retry the stalled read request, happens as part of // storage.Reader creation. As the name suggest, timeout is adjusted dynamically @@ -252,7 +242,6 @@ type withReadStallTimeoutConfig struct { func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) { config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig } -<<<<<<< HEAD func withGRPCBidiReads() option.ClientOption { return &withGRPCBidiReadsConfig{} @@ -265,5 +254,3 @@ type withGRPCBidiReadsConfig struct { func (w *withGRPCBidiReadsConfig) ApplyStorageOpt(config *storageConfig) { config.grpcBidiReads = true } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 7bcb66af70..6b14fd1dce 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -22,10 +22,7 @@ import ( "io/ioutil" "net/http" "strings" -<<<<<<< HEAD "sync" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" "cloud.google.com/go/internal/trace" @@ -144,10 +141,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) encryptionKey: o.encryptionKey, conds: o.conds, readCompressed: o.readCompressed, -<<<<<<< HEAD handle: &o.readHandle, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } r, err = o.c.tc.NewRangeReader(ctx, params, opts...) @@ -163,7 +157,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return r, err } -<<<<<<< HEAD // NewMultiRangeDownloader creates a multi-range reader for an object. // Must be called on a gRPC client created using [NewGRPCClient]. // @@ -207,8 +200,6 @@ func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiR return r, err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // decompressiveTranscoding returns true if the request was served decompressed // and different than its original storage form. This happens when the "Content-Encoding" // header is "gzip". @@ -276,23 +267,16 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) // the stored CRC, returning an error from Read if there is a mismatch. This integrity check // is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. type Reader struct { -<<<<<<< HEAD Attrs ReaderObjectAttrs objectMetadata *map[string]string -======= - Attrs ReaderObjectAttrs ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) seen, remain, size int64 checkCRC bool // Did we check the CRC? This is now only used by tests. reader io.ReadCloser ctx context.Context -<<<<<<< HEAD mu sync.Mutex handle *ReadHandle -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Close closes the Reader. It must be called when done reading. @@ -363,7 +347,6 @@ func (r *Reader) CacheControl() string { func (r *Reader) LastModified() (time.Time, error) { return r.Attrs.LastModified, nil } -<<<<<<< HEAD // Metadata returns user-provided metadata, in key/value pairs. // @@ -456,5 +439,3 @@ func (mrd *MultiRangeDownloader) Wait() { func (mrd *MultiRangeDownloader) GetHandle() []byte { return mrd.reader.getHandle() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index e43104e4a4..9c40ca1b47 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -72,13 +72,8 @@ var ( // errMethodNotSupported indicates that the method called is not currently supported by the client. // TODO: Export this error when launching the transport-agnostic client. errMethodNotSupported = errors.New("storage: method is not currently supported") -<<<<<<< HEAD // errSignedURLMethodNotValid indicates that given HTTP method is not valid. errSignedURLMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) -======= - // errMethodNotValid indicates that given HTTP method is not valid. - errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) @@ -694,11 +689,7 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error { } opts.Method = strings.ToUpper(opts.Method) if _, ok := signedURLMethods[opts.Method]; !ok { -<<<<<<< HEAD return errSignedURLMethodNotValid -======= - return errMethodNotValid ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if opts.Expires.IsZero() { return errors.New("storage: missing required expires option") @@ -946,12 +937,9 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { return u.String(), nil } -<<<<<<< HEAD // ReadHandle associated with the object. This is periodically refreshed. type ReadHandle []byte -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ObjectHandle provides operations on an object in a Google Cloud Storage bucket. // Use BucketHandle.Object to get a handle. type ObjectHandle struct { @@ -967,7 +955,6 @@ type ObjectHandle struct { retry *retryConfig overrideRetention *bool softDeleted bool -<<<<<<< HEAD readHandle ReadHandle } @@ -985,8 +972,6 @@ func (o *ObjectHandle) ReadHandle(r ReadHandle) *ObjectHandle { o2 := *o o2.readHandle = r return &o2 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ACL provides access to the object's access control list. @@ -1191,7 +1176,6 @@ func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*Obje }, sOpts...) } -<<<<<<< HEAD // Move changes the name of the object to the destination name. // It can only be used to rename an object within the same bucket. The // bucket must have [HierarchicalNamespace] enabled to use this method. @@ -1224,8 +1208,6 @@ type MoveObjectDestination struct { Conditions *Conditions } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // @@ -2125,7 +2107,6 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e return nil } -<<<<<<< HEAD // applySourceConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. // This is specifically for calls like Rewrite and Move which have a source and destination @@ -2136,25 +2117,15 @@ func applySourceConds(method string, gen int64, conds *Conditions, call interfac if !setSourceGeneration(cval, gen) { return fmt.Errorf("storage: %s: source generation not supported", method) } -======= -func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { - if gen >= 0 { - call.SourceGeneration(gen) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if conds == nil { return nil } -<<<<<<< HEAD if err := conds.validate(method); err != nil { -======= - if err := conds.validate("CopyTo source"); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } switch { case conds.GenerationMatch != 0: -<<<<<<< HEAD if !setIfSourceGenerationMatch(cval, conds.GenerationMatch) { return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method) } @@ -2176,24 +2147,10 @@ func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall if !setIfSourceMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method) } -======= - call.IfSourceGenerationMatch(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } -<<<<<<< HEAD // applySourceCondsProto validates and attempts to set the conditions on a protobuf // message using protobuf reflection. This is specifically for RPCs which have separate // preconditions for source and destination objects (e.g. Rewrite and Move). @@ -2204,16 +2161,10 @@ func applySourceCondsProto(method string, gen int64, conds *Conditions, msg prot if !setConditionProtoField(rmsg, "source_generation", gen) { return fmt.Errorf("storage: %s: generation not supported", method) } -======= -func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { - if gen >= 0 { - call.SourceGeneration = gen ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if conds == nil { return nil } -<<<<<<< HEAD if err := conds.validate(method); err != nil { return err } @@ -2241,24 +2192,6 @@ func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.Rewrite if !setConditionProtoField(rmsg, "if_source_metageneration_not_match", conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method) } -======= - if err := conds.validate("CopyTo source"); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch = proto.Int64(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -2297,7 +2230,6 @@ func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool { return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value) } -<<<<<<< HEAD // More methods to set source object precondition fields (used by Rewrite and Move APIs). func setSourceGeneration(cval reflect.Value, value interface{}) bool { return setCondition(cval.MethodByName("SourceGeneration"), value) @@ -2319,8 +2251,6 @@ func setIfSourceMetagenerationNotMatch(cval reflect.Value, value interface{}) bo return setCondition(cval.MethodByName("IfSourceMetagenerationNotMatch"), value) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func setCondition(setter reflect.Value, value interface{}) bool { if setter.IsValid() { setter.Call([]reflect.Value{reflect.ValueOf(value)}) @@ -2394,11 +2324,7 @@ type withBackoff struct { } func (wb *withBackoff) apply(config *retryConfig) { -<<<<<<< HEAD config.backoff = &wb.backoff -======= - config.backoff = gaxBackoffFromStruct(&wb.backoff) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // WithMaxAttempts configures the maximum number of times an API call can be made @@ -2489,63 +2415,8 @@ func (wef *withErrorFunc) apply(config *retryConfig) { config.shouldRetry = wef.shouldRetry } -<<<<<<< HEAD type retryConfig struct { backoff *gax.Backoff -======= -type backoff interface { - Pause() time.Duration - - SetInitial(time.Duration) - SetMax(time.Duration) - SetMultiplier(float64) - - GetInitial() time.Duration - GetMax() time.Duration - GetMultiplier() float64 -} - -func gaxBackoffFromStruct(bo *gax.Backoff) *gaxBackoff { - if bo == nil { - return nil - } - b := &gaxBackoff{} - b.Backoff = *bo - return b -} - -// gaxBackoff is a gax.Backoff that implements the backoff interface -type gaxBackoff struct { - gax.Backoff -} - -func (b *gaxBackoff) SetInitial(i time.Duration) { - b.Initial = i -} - -func (b *gaxBackoff) SetMax(m time.Duration) { - b.Max = m -} - -func (b *gaxBackoff) SetMultiplier(m float64) { - b.Multiplier = m -} - -func (b *gaxBackoff) GetInitial() time.Duration { - return b.Initial -} - -func (b *gaxBackoff) GetMax() time.Duration { - return b.Max -} - -func (b *gaxBackoff) GetMultiplier() float64 { - return b.Multiplier -} - -type retryConfig struct { - backoff backoff ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) policy RetryPolicy shouldRetry func(err error) bool maxAttempts *int @@ -2555,7 +2426,6 @@ func (r *retryConfig) clone() *retryConfig { if r == nil { return nil } -<<<<<<< HEAD var bo *gax.Backoff if r.backoff != nil { @@ -2568,27 +2438,10 @@ func (r *retryConfig) clone() *retryConfig { return &retryConfig{ backoff: bo, -======= - newConfig := &retryConfig{ - backoff: nil, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) policy: r.policy, shouldRetry: r.shouldRetry, maxAttempts: r.maxAttempts, } -<<<<<<< HEAD -======= - - if r.backoff != nil { - bo := &gaxBackoff{} - bo.Initial = r.backoff.GetInitial() - bo.Max = r.backoff.GetMax() - bo.Multiplier = r.backoff.GetMultiplier() - newConfig.backoff = bo - } - - return newConfig ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index c0c280693d..ae8f6a6392 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -102,7 +102,6 @@ type Writer struct { // is provided, then gax.DetermineContentType is called to sniff the type. ForceEmptyContentType bool -<<<<<<< HEAD // Append is a parameter to indicate whether the writer should use appendable // object semantics for the new object generation. Appendable objects are // visible on the first Write() call, and can be appended to until they are @@ -112,8 +111,6 @@ type Writer struct { // yet available for general use. Append bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ProgressFunc can be used to monitor the progress of a large write // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see @@ -215,10 +212,7 @@ func (w *Writer) openWriter() (err error) { conds: w.o.conds, encryptionKey: w.o.encryptionKey, sendCRC32C: w.SendCRC32C, -<<<<<<< HEAD append: w.Append, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) donec: w.donec, setError: w.error, progress: w.progress, diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/vendor/filippo.io/edwards25519/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md deleted file mode 100644 index 24e2457d87..0000000000 --- a/vendor/filippo.io/edwards25519/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# filippo.io/edwards25519 - -``` -import "filippo.io/edwards25519" -``` - -This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives. -Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519). - -The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality. - -Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative. - -Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements. diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go deleted file mode 100644 index ab6aaebc0f..0000000000 --- a/vendor/filippo.io/edwards25519/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package edwards25519 implements group logic for the twisted Edwards curve -// -// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2 -// -// This is better known as the Edwards curve equivalent to Curve25519, and is -// the curve used by the Ed25519 signature scheme. -// -// Most users don't need this package, and should instead use crypto/ed25519 for -// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or -// github.com/gtank/ristretto255 for prime order group logic. -// -// However, developers who do need to interact with low-level edwards25519 -// operations can use this package, which is an extended version of -// crypto/internal/edwards25519 from the standard library repackaged as -// an importable module. -package edwards25519 diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go deleted file mode 100644 index a744da2c6d..0000000000 --- a/vendor/filippo.io/edwards25519/edwards25519.go +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import ( - "errors" - - "filippo.io/edwards25519/field" -) - -// Point types. - -type projP1xP1 struct { - X, Y, Z, T field.Element -} - -type projP2 struct { - X, Y, Z field.Element -} - -// Point represents a point on the edwards25519 curve. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is NOT valid, and it may be used only as a receiver. -type Point struct { - // Make the type not comparable (i.e. used with == or as a map key), as - // equivalent points can be represented by different Go values. - _ incomparable - - // The point is internally represented in extended coordinates (X, Y, Z, T) - // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522. - x, y, z, t field.Element -} - -type incomparable [0]func() - -func checkInitialized(points ...*Point) { - for _, p := range points { - if p.x == (field.Element{}) && p.y == (field.Element{}) { - panic("edwards25519: use of uninitialized Point") - } - } -} - -type projCached struct { - YplusX, YminusX, Z, T2d field.Element -} - -type affineCached struct { - YplusX, YminusX, T2d field.Element -} - -// Constructors. - -func (v *projP2) Zero() *projP2 { - v.X.Zero() - v.Y.One() - v.Z.One() - return v -} - -// identity is the point at infinity. -var identity, _ = new(Point).SetBytes([]byte{ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) - -// NewIdentityPoint returns a new Point set to the identity. -func NewIdentityPoint() *Point { - return new(Point).Set(identity) -} - -// generator is the canonical curve basepoint. See TestGenerator for the -// correspondence of this encoding with the values in RFC 8032. -var generator, _ = new(Point).SetBytes([]byte{ - 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}) - -// NewGeneratorPoint returns a new Point set to the canonical generator. -func NewGeneratorPoint() *Point { - return new(Point).Set(generator) -} - -func (v *projCached) Zero() *projCached { - v.YplusX.One() - v.YminusX.One() - v.Z.One() - v.T2d.Zero() - return v -} - -func (v *affineCached) Zero() *affineCached { - v.YplusX.One() - v.YminusX.One() - v.T2d.Zero() - return v -} - -// Assignments. - -// Set sets v = u, and returns v. -func (v *Point) Set(u *Point) *Point { - *v = *u - return v -} - -// Encoding. - -// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032, -// Section 5.1.2. -func (v *Point) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var buf [32]byte - return v.bytes(&buf) -} - -func (v *Point) bytes(buf *[32]byte) []byte { - checkInitialized(v) - - var zInv, x, y field.Element - zInv.Invert(&v.z) // zInv = 1 / Z - x.Multiply(&v.x, &zInv) // x = X / Z - y.Multiply(&v.y, &zInv) // y = Y / Z - - out := copyFieldElement(buf, &y) - out[31] |= byte(x.IsNegative() << 7) - return out -} - -var feOne = new(field.Element).One() - -// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not -// represent a valid point on the curve, SetBytes returns nil and an error and -// the receiver is unchanged. Otherwise, SetBytes returns v. -// -// Note that SetBytes accepts all non-canonical encodings of valid points. -// That is, it follows decoding rules that match most implementations in -// the ecosystem rather than RFC 8032. -func (v *Point) SetBytes(x []byte) (*Point, error) { - // Specifically, the non-canonical encodings that are accepted are - // 1) the ones where the field element is not reduced (see the - // (*field.Element).SetBytes docs) and - // 2) the ones where the x-coordinate is zero and the sign bit is set. - // - // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am, - // specifically the "Canonical A, R" section. - - y, err := new(field.Element).SetBytes(x) - if err != nil { - return nil, errors.New("edwards25519: invalid point encoding length") - } - - // -x² + y² = 1 + dx²y² - // x² + dx²y² = x²(dy² + 1) = y² - 1 - // x² = (y² - 1) / (dy² + 1) - - // u = y² - 1 - y2 := new(field.Element).Square(y) - u := new(field.Element).Subtract(y2, feOne) - - // v = dy² + 1 - vv := new(field.Element).Multiply(y2, d) - vv = vv.Add(vv, feOne) - - // x = +√(u/v) - xx, wasSquare := new(field.Element).SqrtRatio(u, vv) - if wasSquare == 0 { - return nil, errors.New("edwards25519: invalid point encoding") - } - - // Select the negative square root if the sign bit is set. - xxNeg := new(field.Element).Negate(xx) - xx = xx.Select(xxNeg, xx, int(x[31]>>7)) - - v.x.Set(xx) - v.y.Set(y) - v.z.One() - v.t.Multiply(xx, y) // xy = T / Z - - return v, nil -} - -func copyFieldElement(buf *[32]byte, v *field.Element) []byte { - copy(buf[:], v.Bytes()) - return buf[:] -} - -// Conversions. - -func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 { - v.X.Multiply(&p.X, &p.T) - v.Y.Multiply(&p.Y, &p.Z) - v.Z.Multiply(&p.Z, &p.T) - return v -} - -func (v *projP2) FromP3(p *Point) *projP2 { - v.X.Set(&p.x) - v.Y.Set(&p.y) - v.Z.Set(&p.z) - return v -} - -func (v *Point) fromP1xP1(p *projP1xP1) *Point { - v.x.Multiply(&p.X, &p.T) - v.y.Multiply(&p.Y, &p.Z) - v.z.Multiply(&p.Z, &p.T) - v.t.Multiply(&p.X, &p.Y) - return v -} - -func (v *Point) fromP2(p *projP2) *Point { - v.x.Multiply(&p.X, &p.Z) - v.y.Multiply(&p.Y, &p.Z) - v.z.Square(&p.Z) - v.t.Multiply(&p.X, &p.Y) - return v -} - -// d is a constant in the curve equation. -var d, _ = new(field.Element).SetBytes([]byte{ - 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, - 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, - 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, - 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52}) -var d2 = new(field.Element).Add(d, d) - -func (v *projCached) FromP3(p *Point) *projCached { - v.YplusX.Add(&p.y, &p.x) - v.YminusX.Subtract(&p.y, &p.x) - v.Z.Set(&p.z) - v.T2d.Multiply(&p.t, d2) - return v -} - -func (v *affineCached) FromP3(p *Point) *affineCached { - v.YplusX.Add(&p.y, &p.x) - v.YminusX.Subtract(&p.y, &p.x) - v.T2d.Multiply(&p.t, d2) - - var invZ field.Element - invZ.Invert(&p.z) - v.YplusX.Multiply(&v.YplusX, &invZ) - v.YminusX.Multiply(&v.YminusX, &invZ) - v.T2d.Multiply(&v.T2d, &invZ) - return v -} - -// (Re)addition and subtraction. - -// Add sets v = p + q, and returns v. -func (v *Point) Add(p, q *Point) *Point { - checkInitialized(p, q) - qCached := new(projCached).FromP3(q) - result := new(projP1xP1).Add(p, qCached) - return v.fromP1xP1(result) -} - -// Subtract sets v = p - q, and returns v. -func (v *Point) Subtract(p, q *Point) *Point { - checkInitialized(p, q) - qCached := new(projCached).FromP3(q) - result := new(projP1xP1).Sub(p, qCached) - return v.fromP1xP1(result) -} - -func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 { - var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element - - YplusX.Add(&p.y, &p.x) - YminusX.Subtract(&p.y, &p.x) - - PP.Multiply(&YplusX, &q.YplusX) - MM.Multiply(&YminusX, &q.YminusX) - TT2d.Multiply(&p.t, &q.T2d) - ZZ2.Multiply(&p.z, &q.Z) - - ZZ2.Add(&ZZ2, &ZZ2) - - v.X.Subtract(&PP, &MM) - v.Y.Add(&PP, &MM) - v.Z.Add(&ZZ2, &TT2d) - v.T.Subtract(&ZZ2, &TT2d) - return v -} - -func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 { - var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element - - YplusX.Add(&p.y, &p.x) - YminusX.Subtract(&p.y, &p.x) - - PP.Multiply(&YplusX, &q.YminusX) // flipped sign - MM.Multiply(&YminusX, &q.YplusX) // flipped sign - TT2d.Multiply(&p.t, &q.T2d) - ZZ2.Multiply(&p.z, &q.Z) - - ZZ2.Add(&ZZ2, &ZZ2) - - v.X.Subtract(&PP, &MM) - v.Y.Add(&PP, &MM) - v.Z.Subtract(&ZZ2, &TT2d) // flipped sign - v.T.Add(&ZZ2, &TT2d) // flipped sign - return v -} - -func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 { - var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element - - YplusX.Add(&p.y, &p.x) - YminusX.Subtract(&p.y, &p.x) - - PP.Multiply(&YplusX, &q.YplusX) - MM.Multiply(&YminusX, &q.YminusX) - TT2d.Multiply(&p.t, &q.T2d) - - Z2.Add(&p.z, &p.z) - - v.X.Subtract(&PP, &MM) - v.Y.Add(&PP, &MM) - v.Z.Add(&Z2, &TT2d) - v.T.Subtract(&Z2, &TT2d) - return v -} - -func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 { - var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element - - YplusX.Add(&p.y, &p.x) - YminusX.Subtract(&p.y, &p.x) - - PP.Multiply(&YplusX, &q.YminusX) // flipped sign - MM.Multiply(&YminusX, &q.YplusX) // flipped sign - TT2d.Multiply(&p.t, &q.T2d) - - Z2.Add(&p.z, &p.z) - - v.X.Subtract(&PP, &MM) - v.Y.Add(&PP, &MM) - v.Z.Subtract(&Z2, &TT2d) // flipped sign - v.T.Add(&Z2, &TT2d) // flipped sign - return v -} - -// Doubling. - -func (v *projP1xP1) Double(p *projP2) *projP1xP1 { - var XX, YY, ZZ2, XplusYsq field.Element - - XX.Square(&p.X) - YY.Square(&p.Y) - ZZ2.Square(&p.Z) - ZZ2.Add(&ZZ2, &ZZ2) - XplusYsq.Add(&p.X, &p.Y) - XplusYsq.Square(&XplusYsq) - - v.Y.Add(&YY, &XX) - v.Z.Subtract(&YY, &XX) - - v.X.Subtract(&XplusYsq, &v.Y) - v.T.Subtract(&ZZ2, &v.Z) - return v -} - -// Negation. - -// Negate sets v = -p, and returns v. -func (v *Point) Negate(p *Point) *Point { - checkInitialized(p) - v.x.Negate(&p.x) - v.y.Set(&p.y) - v.z.Set(&p.z) - v.t.Negate(&p.t) - return v -} - -// Equal returns 1 if v is equivalent to u, and 0 otherwise. -func (v *Point) Equal(u *Point) int { - checkInitialized(v, u) - - var t1, t2, t3, t4 field.Element - t1.Multiply(&v.x, &u.z) - t2.Multiply(&u.x, &v.z) - t3.Multiply(&v.y, &u.z) - t4.Multiply(&u.y, &v.z) - - return t1.Equal(&t2) & t3.Equal(&t4) -} - -// Constant-time operations - -// Select sets v to a if cond == 1 and to b if cond == 0. -func (v *projCached) Select(a, b *projCached, cond int) *projCached { - v.YplusX.Select(&a.YplusX, &b.YplusX, cond) - v.YminusX.Select(&a.YminusX, &b.YminusX, cond) - v.Z.Select(&a.Z, &b.Z, cond) - v.T2d.Select(&a.T2d, &b.T2d, cond) - return v -} - -// Select sets v to a if cond == 1 and to b if cond == 0. -func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached { - v.YplusX.Select(&a.YplusX, &b.YplusX, cond) - v.YminusX.Select(&a.YminusX, &b.YminusX, cond) - v.T2d.Select(&a.T2d, &b.T2d, cond) - return v -} - -// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. -func (v *projCached) CondNeg(cond int) *projCached { - v.YplusX.Swap(&v.YminusX, cond) - v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) - return v -} - -// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. -func (v *affineCached) CondNeg(cond int) *affineCached { - v.YplusX.Swap(&v.YminusX, cond) - v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) - return v -} diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go deleted file mode 100644 index d152d68ff4..0000000000 --- a/vendor/filippo.io/edwards25519/extra.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// This file contains additional functionality that is not included in the -// upstream crypto/internal/edwards25519 package. - -import ( - "errors" - - "filippo.io/edwards25519/field" -) - -// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where -// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. -func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. Don't change the style without making - // sure it doesn't increase the inliner cost. - var e [4]field.Element - X, Y, Z, T = v.extendedCoordinates(&e) - return -} - -func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) { - checkInitialized(v) - X = e[0].Set(&v.x) - Y = e[1].Set(&v.y) - Z = e[2].Set(&v.z) - T = e[3].Set(&v.t) - return -} - -// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where -// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. -// -// If the coordinates are invalid or don't represent a valid point on the curve, -// SetExtendedCoordinates returns nil and an error and the receiver is -// unchanged. Otherwise, SetExtendedCoordinates returns v. -func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) { - if !isOnCurve(X, Y, Z, T) { - return nil, errors.New("edwards25519: invalid point coordinates") - } - v.x.Set(X) - v.y.Set(Y) - v.z.Set(Z) - v.t.Set(T) - return v, nil -} - -func isOnCurve(X, Y, Z, T *field.Element) bool { - var lhs, rhs field.Element - XX := new(field.Element).Square(X) - YY := new(field.Element).Square(Y) - ZZ := new(field.Element).Square(Z) - TT := new(field.Element).Square(T) - // -x² + y² = 1 + dx²y² - // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)² - // -X² + Y² = Z² + dT² - lhs.Subtract(YY, XX) - rhs.Multiply(d, TT).Add(&rhs, ZZ) - if lhs.Equal(&rhs) != 1 { - return false - } - // xy = T/Z - // XY/Z² = T/Z - // XY = TZ - lhs.Multiply(X, Y) - rhs.Multiply(T, Z) - return lhs.Equal(&rhs) == 1 -} - -// BytesMontgomery converts v to a point on the birationally-equivalent -// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding -// according to RFC 7748. -// -// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode -// to the same value. If v is the identity point, BytesMontgomery returns 32 -// zero bytes, analogously to the X25519 function. -// -// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate: -// while every valid edwards25519 point has a unique u-coordinate Montgomery -// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond -// to any edwards25519 point, and every other X25519 input corresponds to two -// edwards25519 points. -func (v *Point) BytesMontgomery() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var buf [32]byte - return v.bytesMontgomery(&buf) -} - -func (v *Point) bytesMontgomery(buf *[32]byte) []byte { - checkInitialized(v) - - // RFC 7748, Section 4.1 provides the bilinear map to calculate the - // Montgomery u-coordinate - // - // u = (1 + y) / (1 - y) - // - // where y = Y / Z. - - var y, recip, u field.Element - - y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z - recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y) - u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r - - return copyFieldElement(buf, &u) -} - -// MultByCofactor sets v = 8 * p, and returns v. -func (v *Point) MultByCofactor(p *Point) *Point { - checkInitialized(p) - result := projP1xP1{} - pp := (&projP2{}).FromP3(p) - result.Double(pp) - pp.FromP1xP1(&result) - result.Double(pp) - pp.FromP1xP1(&result) - result.Double(pp) - return v.fromP1xP1(&result) -} - -// Given k > 0, set s = s**(2*i). -func (s *Scalar) pow2k(k int) { - for i := 0; i < k; i++ { - s.Multiply(s, s) - } -} - -// Invert sets s to the inverse of a nonzero scalar v, and returns s. -// -// If t is zero, Invert returns zero. -func (s *Scalar) Invert(t *Scalar) *Scalar { - // Uses a hardcoded sliding window of width 4. - var table [8]Scalar - var tt Scalar - tt.Multiply(t, t) - table[0] = *t - for i := 0; i < 7; i++ { - table[i+1].Multiply(&table[i], &tt) - } - // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15] - // so t**k = t[k/2] for odd k - - // To compute the sliding window digits, use the following Sage script: - - // sage: import itertools - // sage: def sliding_window(w,k): - // ....: digits = [] - // ....: while k > 0: - // ....: if k % 2 == 1: - // ....: kmod = k % (2**w) - // ....: digits.append(kmod) - // ....: k = k - kmod - // ....: else: - // ....: digits.append(0) - // ....: k = k // 2 - // ....: return digits - - // Now we can compute s roughly as follows: - - // sage: s = 1 - // sage: for coeff in reversed(sliding_window(4,l-2)): - // ....: s = s*s - // ....: if coeff > 0 : - // ....: s = s*t**coeff - - // This works on one bit at a time, with many runs of zeros. - // The digits can be collapsed into [(count, coeff)] as follows: - - // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))] - - // Entries of the form (k, 0) turn into pow2k(k) - // Entries of the form (1, coeff) turn into a squaring and then a table lookup. - // We can fold the squaring into the previous pow2k(k) as pow2k(k+1). - - *s = table[1/2] - s.pow2k(127 + 1) - s.Multiply(s, &table[1/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[9/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[11/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[13/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[15/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[7/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[15/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[5/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[1/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[15/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[15/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[7/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[3/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[11/2]) - s.pow2k(5 + 1) - s.Multiply(s, &table[11/2]) - s.pow2k(9 + 1) - s.Multiply(s, &table[9/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[3/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[3/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[3/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[9/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[7/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[3/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[13/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[7/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[9/2]) - s.pow2k(3 + 1) - s.Multiply(s, &table[15/2]) - s.pow2k(4 + 1) - s.Multiply(s, &table[11/2]) - - return s -} - -// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. -// -// Execution time depends only on the lengths of the two slices, which must match. -func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point { - if len(scalars) != len(points) { - panic("edwards25519: called MultiScalarMult with different size inputs") - } - checkInitialized(points...) - - // Proceed as in the single-base case, but share doublings - // between each point in the multiscalar equation. - - // Build lookup tables for each point - tables := make([]projLookupTable, len(points)) - for i := range tables { - tables[i].FromP3(points[i]) - } - // Compute signed radix-16 digits for each scalar - digits := make([][64]int8, len(scalars)) - for i := range digits { - digits[i] = scalars[i].signedRadix16() - } - - // Unwrap first loop iteration to save computing 16*identity - multiple := &projCached{} - tmp1 := &projP1xP1{} - tmp2 := &projP2{} - // Lookup-and-add the appropriate multiple of each input point - for j := range tables { - tables[j].SelectInto(multiple, digits[j][63]) - tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords - v.fromP1xP1(tmp1) // update v - } - tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration - for i := 62; i >= 0; i-- { - tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords - v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords - // Lookup-and-add the appropriate multiple of each input point - for j := range tables { - tables[j].SelectInto(multiple, digits[j][i]) - tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords - v.fromP1xP1(tmp1) // update v - } - tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration - } - return v -} - -// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. -// -// Execution time depends on the inputs. -func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point { - if len(scalars) != len(points) { - panic("edwards25519: called VarTimeMultiScalarMult with different size inputs") - } - checkInitialized(points...) - - // Generalize double-base NAF computation to arbitrary sizes. - // Here all the points are dynamic, so we only use the smaller - // tables. - - // Build lookup tables for each point - tables := make([]nafLookupTable5, len(points)) - for i := range tables { - tables[i].FromP3(points[i]) - } - // Compute a NAF for each scalar - nafs := make([][256]int8, len(scalars)) - for i := range nafs { - nafs[i] = scalars[i].nonAdjacentForm(5) - } - - multiple := &projCached{} - tmp1 := &projP1xP1{} - tmp2 := &projP2{} - tmp2.Zero() - - // Move from high to low bits, doubling the accumulator - // at each iteration and checking whether there is a nonzero - // coefficient to look up a multiple of. - // - // Skip trying to find the first nonzero coefficent, because - // searching might be more work than a few extra doublings. - for i := 255; i >= 0; i-- { - tmp1.Double(tmp2) - - for j := range nafs { - if nafs[j][i] > 0 { - v.fromP1xP1(tmp1) - tables[j].SelectInto(multiple, nafs[j][i]) - tmp1.Add(v, multiple) - } else if nafs[j][i] < 0 { - v.fromP1xP1(tmp1) - tables[j].SelectInto(multiple, -nafs[j][i]) - tmp1.Sub(v, multiple) - } - } - - tmp2.FromP1xP1(tmp1) - } - - v.fromP2(tmp2) - return v -} diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go deleted file mode 100644 index 5518ef2b90..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "errors" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is -// not of the right length, SetBytes returns nil and an error, and the -// receiver is unchanged. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032, but -// consistent with most Ed25519 implementations. -func (v *Element) SetBytes(x []byte) (*Element, error) { - if len(x) != 32 { - return nil, errors.New("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v, nil -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2⁵¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) { - t0 := new(Element) - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := new(Element).Square(v) - uv3 := new(Element).Multiply(u, t0.Multiply(v2, v)) - uv7 := new(Element).Multiply(uv3, t0.Square(v2)) - rr := new(Element).Multiply(uv3, t0.Pow22523(uv7)) - - check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2 - - uNeg := new(Element).Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1)) - - rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(rr) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go deleted file mode 100644 index edcf163c4e..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -// -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -// -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s deleted file mode 100644 index 293f013c94..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_amd64.s +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go deleted file mode 100644 index ddb6c9b8f7..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go deleted file mode 100644 index af459ef515..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s deleted file mode 100644 index 3126a43419..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_arm64.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go deleted file mode 100644 index 234a5b2e5d..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego -// +build !arm64 !gc purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go deleted file mode 100644 index 1ef503b9a2..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_extra.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "errors" - -// This file contains additional functionality that is not included in the -// upstream crypto/ed25519/edwards25519/field package. - -// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which -// is reduced modulo the field order. If x is not of the right length, -// SetWideBytes returns nil and an error, and the receiver is unchanged. -// -// SetWideBytes is not necessary to select a uniformly distributed value, and is -// only provided for compatibility: SetBytes can be used instead as the chance -// of bias is less than 2⁻²⁵⁰. -func (v *Element) SetWideBytes(x []byte) (*Element, error) { - if len(x) != 64 { - return nil, errors.New("edwards25519: invalid SetWideBytes input size") - } - - // Split the 64 bytes into two elements, and extract the most significant - // bit of each, which is ignored by SetBytes. - lo, _ := new(Element).SetBytes(x[:32]) - loMSB := uint64(x[31] >> 7) - hi, _ := new(Element).SetBytes(x[32:]) - hiMSB := uint64(x[63] >> 7) - - // The output we want is - // - // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹ - // - // which applying the reduction identity comes out to - // - // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19² - // - // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value - // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value - // (hiMSB * 2 * 19²), so it fits in a uint64. - - v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19 - v.l1 = lo.l1 + hi.l1*2*19 - v.l2 = lo.l2 + hi.l2*2*19 - v.l3 = lo.l3 + hi.l3*2*19 - v.l4 = lo.l4 + hi.l4*2*19 - - return v.carryPropagate(), nil -} diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go deleted file mode 100644 index 86f5fd9553..0000000000 --- a/vendor/filippo.io/edwards25519/field/fe_generic.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, - // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2⁵¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) - // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² - // r0 < 2⁷ × 2⁵² × 2⁵² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2⁵² × 2⁵² - // r4 < 2¹⁰⁷ - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as an Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2⁵¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction -// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and - // the final l0 will be at most 52 bits. Similarly for the rest. - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go deleted file mode 100644 index 3fd1653877..0000000000 --- a/vendor/filippo.io/edwards25519/scalar.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright (c) 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import ( - "encoding/binary" - "errors" -) - -// A Scalar is an integer modulo -// -// l = 2^252 + 27742317777372353535851937790883648493 -// -// which is the prime order of the edwards25519 group. -// -// This type works similarly to math/big.Int, and all arguments and -// receivers are allowed to alias. -// -// The zero value is a valid zero element. -type Scalar struct { - // s is the scalar in the Montgomery domain, in the format of the - // fiat-crypto implementation. - s fiatScalarMontgomeryDomainFieldElement -} - -// The field implementation in scalar_fiat.go is generated by the fiat-crypto -// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc) -// from a formally verified model. -// -// fiat-crypto code comes under the following license. -// -// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, -// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -// NewScalar returns a new zero Scalar. -func NewScalar() *Scalar { - return &Scalar{} -} - -// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to -// using Multiply and then Add. -func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar { - // Make a copy of z in case it aliases s. - zCopy := new(Scalar).Set(z) - return s.Multiply(x, y).Add(s, zCopy) -} - -// Add sets s = x + y mod l, and returns s. -func (s *Scalar) Add(x, y *Scalar) *Scalar { - // s = 1 * x + y mod l - fiatScalarAdd(&s.s, &x.s, &y.s) - return s -} - -// Subtract sets s = x - y mod l, and returns s. -func (s *Scalar) Subtract(x, y *Scalar) *Scalar { - // s = -1 * y + x mod l - fiatScalarSub(&s.s, &x.s, &y.s) - return s -} - -// Negate sets s = -x mod l, and returns s. -func (s *Scalar) Negate(x *Scalar) *Scalar { - // s = -1 * x + 0 mod l - fiatScalarOpp(&s.s, &x.s) - return s -} - -// Multiply sets s = x * y mod l, and returns s. -func (s *Scalar) Multiply(x, y *Scalar) *Scalar { - // s = x * y + 0 mod l - fiatScalarMul(&s.s, &x.s, &y.s) - return s -} - -// Set sets s = x, and returns s. -func (s *Scalar) Set(x *Scalar) *Scalar { - *s = *x - return s -} - -// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer. -// If x is not of the right length, SetUniformBytes returns nil and an error, -// and the receiver is unchanged. -// -// SetUniformBytes can be used to set s to a uniformly distributed value given -// 64 uniformly distributed random bytes. -func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) { - if len(x) != 64 { - return nil, errors.New("edwards25519: invalid SetUniformBytes input length") - } - - // We have a value x of 512 bits, but our fiatScalarFromBytes function - // expects an input lower than l, which is a little over 252 bits. - // - // Instead of writing a reduction function that operates on wider inputs, we - // can interpret x as the sum of three shorter values a, b, and c. - // - // x = a + b * 2^168 + c * 2^336 mod l - // - // We then precompute 2^168 and 2^336 modulo l, and perform the reduction - // with two multiplications and two additions. - - s.setShortBytes(x[:21]) - t := new(Scalar).setShortBytes(x[21:42]) - s.Add(s, t.Multiply(t, scalarTwo168)) - t.setShortBytes(x[42:]) - s.Add(s, t.Multiply(t, scalarTwo336)) - - return s, nil -} - -// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a -// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value -// in the 2^256 Montgomery domain. -var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7, - 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}} -var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b, - 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}} - -// setShortBytes sets s = x mod l, where x is a little-endian integer shorter -// than 32 bytes. -func (s *Scalar) setShortBytes(x []byte) *Scalar { - if len(x) >= 32 { - panic("edwards25519: internal error: setShortBytes called with a long string") - } - var buf [32]byte - copy(buf[:], x) - fiatScalarFromBytes((*[4]uint64)(&s.s), &buf) - fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s)) - return s -} - -// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of -// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes -// returns nil and an error, and the receiver is unchanged. -func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) { - if len(x) != 32 { - return nil, errors.New("invalid scalar length") - } - if !isReduced(x) { - return nil, errors.New("invalid scalar encoding") - } - - fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x)) - fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s)) - - return s, nil -} - -// scalarMinusOneBytes is l - 1 in little endian. -var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16} - -// isReduced returns whether the given scalar in 32-byte little endian encoded -// form is reduced modulo l. -func isReduced(s []byte) bool { - if len(s) != 32 { - return false - } - - for i := len(s) - 1; i >= 0; i-- { - switch { - case s[i] > scalarMinusOneBytes[i]: - return false - case s[i] < scalarMinusOneBytes[i]: - return true - } - } - return true -} - -// SetBytesWithClamping applies the buffer pruning described in RFC 8032, -// Section 5.1.5 (also known as clamping) and sets s to the result. The input -// must be 32 bytes, and it is not modified. If x is not of the right length, -// SetBytesWithClamping returns nil and an error, and the receiver is unchanged. -// -// Note that since Scalar values are always reduced modulo the prime order of -// the curve, the resulting value will not preserve any of the cofactor-clearing -// properties that clamping is meant to provide. It will however work as -// expected as long as it is applied to points on the prime order subgroup, like -// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the -// irrelevant RFC 7748 clamping, but it is now required for compatibility. -func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) { - // The description above omits the purpose of the high bits of the clamping - // for brevity, but those are also lost to reductions, and are also - // irrelevant to edwards25519 as they protect against a specific - // implementation bug that was once observed in a generic Montgomery ladder. - if len(x) != 32 { - return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length") - } - - // We need to use the wide reduction from SetUniformBytes, since clamping - // sets the 2^254 bit, making the value higher than the order. - var wideBytes [64]byte - copy(wideBytes[:], x[:]) - wideBytes[0] &= 248 - wideBytes[31] &= 63 - wideBytes[31] |= 64 - return s.SetUniformBytes(wideBytes[:]) -} - -// Bytes returns the canonical 32-byte little-endian encoding of s. -func (s *Scalar) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var encoded [32]byte - return s.bytes(&encoded) -} - -func (s *Scalar) bytes(out *[32]byte) []byte { - var ss fiatScalarNonMontgomeryDomainFieldElement - fiatScalarFromMontgomery(&ss, &s.s) - fiatScalarToBytes(out, (*[4]uint64)(&ss)) - return out[:] -} - -// Equal returns 1 if s and t are equal, and 0 otherwise. -func (s *Scalar) Equal(t *Scalar) int { - var diff fiatScalarMontgomeryDomainFieldElement - fiatScalarSub(&diff, &s.s, &t.s) - var nonzero uint64 - fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff)) - nonzero |= nonzero >> 32 - nonzero |= nonzero >> 16 - nonzero |= nonzero >> 8 - nonzero |= nonzero >> 4 - nonzero |= nonzero >> 2 - nonzero |= nonzero >> 1 - return int(^nonzero) & 1 -} - -// nonAdjacentForm computes a width-w non-adjacent form for this scalar. -// -// w must be between 2 and 8, or nonAdjacentForm will panic. -func (s *Scalar) nonAdjacentForm(w uint) [256]int8 { - // This implementation is adapted from the one - // in curve25519-dalek and is documented there: - // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871 - b := s.Bytes() - if b[31] > 127 { - panic("scalar has high bit set illegally") - } - if w < 2 { - panic("w must be at least 2 by the definition of NAF") - } else if w > 8 { - panic("NAF digits must fit in int8") - } - - var naf [256]int8 - var digits [5]uint64 - - for i := 0; i < 4; i++ { - digits[i] = binary.LittleEndian.Uint64(b[i*8:]) - } - - width := uint64(1 << w) - windowMask := uint64(width - 1) - - pos := uint(0) - carry := uint64(0) - for pos < 256 { - indexU64 := pos / 64 - indexBit := pos % 64 - var bitBuf uint64 - if indexBit < 64-w { - // This window's bits are contained in a single u64 - bitBuf = digits[indexU64] >> indexBit - } else { - // Combine the current 64 bits with bits from the next 64 - bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit)) - } - - // Add carry into the current window - window := carry + (bitBuf & windowMask) - - if window&1 == 0 { - // If the window value is even, preserve the carry and continue. - // Why is the carry preserved? - // If carry == 0 and window & 1 == 0, - // then the next carry should be 0 - // If carry == 1 and window & 1 == 0, - // then bit_buf & 1 == 1 so the next carry should be 1 - pos += 1 - continue - } - - if window < width/2 { - carry = 0 - naf[pos] = int8(window) - } else { - carry = 1 - naf[pos] = int8(window) - int8(width) - } - - pos += w - } - return naf -} - -func (s *Scalar) signedRadix16() [64]int8 { - b := s.Bytes() - if b[31] > 127 { - panic("scalar has high bit set illegally") - } - - var digits [64]int8 - - // Compute unsigned radix-16 digits: - for i := 0; i < 32; i++ { - digits[2*i] = int8(b[i] & 15) - digits[2*i+1] = int8((b[i] >> 4) & 15) - } - - // Recenter coefficients: - for i := 0; i < 63; i++ { - carry := (digits[i] + 8) >> 4 - digits[i] -= carry << 4 - digits[i+1] += carry - } - - return digits -} diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go deleted file mode 100644 index 2e5782b605..0000000000 --- a/vendor/filippo.io/edwards25519/scalar_fiat.go +++ /dev/null @@ -1,1147 +0,0 @@ -// Code generated by Fiat Cryptography. DO NOT EDIT. -// -// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes -// -// curve description: Scalar -// -// machine_wordsize = 64 (from "64") -// -// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes -// -// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493") -// -// -// -// NOTE: In addition to the bounds specified above each function, all -// -// functions synthesized for this Montgomery arithmetic require the -// -// input to be strictly less than the prime modulus (m), and also -// -// require the input to be in the unique saturated representation. -// -// All functions also ensure that these two properties are true of -// -// return values. -// -// -// -// Computed values: -// -// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) -// -// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248) -// -// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in -// -// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256 - -package edwards25519 - -import "math/bits" - -type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927 -type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927 - -// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain. -// -// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] -type fiatScalarMontgomeryDomainFieldElement [4]uint64 - -// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain. -// -// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] -type fiatScalarNonMontgomeryDomainFieldElement [4]uint64 - -// fiatScalarCmovznzU64 is a single-word conditional move. -// -// Postconditions: -// -// out1 = (if arg1 = 0 then arg2 else arg3) -// -// Input Bounds: -// -// arg1: [0x0 ~> 0x1] -// arg2: [0x0 ~> 0xffffffffffffffff] -// arg3: [0x0 ~> 0xffffffffffffffff] -// -// Output Bounds: -// -// out1: [0x0 ~> 0xffffffffffffffff] -func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) { - x1 := (uint64(arg1) * 0xffffffffffffffff) - x2 := ((x1 & arg3) | ((^x1) & arg2)) - *out1 = x2 -} - -// fiatScalarMul multiplies two field elements in the Montgomery domain. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// 0 ≤ eval arg2 < m -// -// Postconditions: -// -// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m -// 0 ≤ eval out1 < m -func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { - x1 := arg1[1] - x2 := arg1[2] - x3 := arg1[3] - x4 := arg1[0] - var x5 uint64 - var x6 uint64 - x6, x5 = bits.Mul64(x4, arg2[3]) - var x7 uint64 - var x8 uint64 - x8, x7 = bits.Mul64(x4, arg2[2]) - var x9 uint64 - var x10 uint64 - x10, x9 = bits.Mul64(x4, arg2[1]) - var x11 uint64 - var x12 uint64 - x12, x11 = bits.Mul64(x4, arg2[0]) - var x13 uint64 - var x14 uint64 - x13, x14 = bits.Add64(x12, x9, uint64(0x0)) - var x15 uint64 - var x16 uint64 - x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14))) - var x17 uint64 - var x18 uint64 - x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16))) - x19 := (uint64(fiatScalarUint1(x18)) + x6) - var x20 uint64 - _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b) - var x22 uint64 - var x23 uint64 - x23, x22 = bits.Mul64(x20, 0x1000000000000000) - var x24 uint64 - var x25 uint64 - x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6) - var x26 uint64 - var x27 uint64 - x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed) - var x28 uint64 - var x29 uint64 - x28, x29 = bits.Add64(x27, x24, uint64(0x0)) - x30 := (uint64(fiatScalarUint1(x29)) + x25) - var x32 uint64 - _, x32 = bits.Add64(x11, x26, uint64(0x0)) - var x33 uint64 - var x34 uint64 - x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32))) - var x35 uint64 - var x36 uint64 - x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34))) - var x37 uint64 - var x38 uint64 - x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36))) - var x39 uint64 - var x40 uint64 - x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38))) - var x41 uint64 - var x42 uint64 - x42, x41 = bits.Mul64(x1, arg2[3]) - var x43 uint64 - var x44 uint64 - x44, x43 = bits.Mul64(x1, arg2[2]) - var x45 uint64 - var x46 uint64 - x46, x45 = bits.Mul64(x1, arg2[1]) - var x47 uint64 - var x48 uint64 - x48, x47 = bits.Mul64(x1, arg2[0]) - var x49 uint64 - var x50 uint64 - x49, x50 = bits.Add64(x48, x45, uint64(0x0)) - var x51 uint64 - var x52 uint64 - x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50))) - var x53 uint64 - var x54 uint64 - x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52))) - x55 := (uint64(fiatScalarUint1(x54)) + x42) - var x56 uint64 - var x57 uint64 - x56, x57 = bits.Add64(x33, x47, uint64(0x0)) - var x58 uint64 - var x59 uint64 - x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57))) - var x60 uint64 - var x61 uint64 - x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59))) - var x62 uint64 - var x63 uint64 - x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61))) - var x64 uint64 - var x65 uint64 - x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63))) - var x66 uint64 - _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b) - var x68 uint64 - var x69 uint64 - x69, x68 = bits.Mul64(x66, 0x1000000000000000) - var x70 uint64 - var x71 uint64 - x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6) - var x72 uint64 - var x73 uint64 - x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed) - var x74 uint64 - var x75 uint64 - x74, x75 = bits.Add64(x73, x70, uint64(0x0)) - x76 := (uint64(fiatScalarUint1(x75)) + x71) - var x78 uint64 - _, x78 = bits.Add64(x56, x72, uint64(0x0)) - var x79 uint64 - var x80 uint64 - x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78))) - var x81 uint64 - var x82 uint64 - x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80))) - var x83 uint64 - var x84 uint64 - x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82))) - var x85 uint64 - var x86 uint64 - x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84))) - x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65))) - var x88 uint64 - var x89 uint64 - x89, x88 = bits.Mul64(x2, arg2[3]) - var x90 uint64 - var x91 uint64 - x91, x90 = bits.Mul64(x2, arg2[2]) - var x92 uint64 - var x93 uint64 - x93, x92 = bits.Mul64(x2, arg2[1]) - var x94 uint64 - var x95 uint64 - x95, x94 = bits.Mul64(x2, arg2[0]) - var x96 uint64 - var x97 uint64 - x96, x97 = bits.Add64(x95, x92, uint64(0x0)) - var x98 uint64 - var x99 uint64 - x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97))) - var x100 uint64 - var x101 uint64 - x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99))) - x102 := (uint64(fiatScalarUint1(x101)) + x89) - var x103 uint64 - var x104 uint64 - x103, x104 = bits.Add64(x79, x94, uint64(0x0)) - var x105 uint64 - var x106 uint64 - x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104))) - var x107 uint64 - var x108 uint64 - x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106))) - var x109 uint64 - var x110 uint64 - x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108))) - var x111 uint64 - var x112 uint64 - x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110))) - var x113 uint64 - _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b) - var x115 uint64 - var x116 uint64 - x116, x115 = bits.Mul64(x113, 0x1000000000000000) - var x117 uint64 - var x118 uint64 - x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6) - var x119 uint64 - var x120 uint64 - x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed) - var x121 uint64 - var x122 uint64 - x121, x122 = bits.Add64(x120, x117, uint64(0x0)) - x123 := (uint64(fiatScalarUint1(x122)) + x118) - var x125 uint64 - _, x125 = bits.Add64(x103, x119, uint64(0x0)) - var x126 uint64 - var x127 uint64 - x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125))) - var x128 uint64 - var x129 uint64 - x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127))) - var x130 uint64 - var x131 uint64 - x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129))) - var x132 uint64 - var x133 uint64 - x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131))) - x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112))) - var x135 uint64 - var x136 uint64 - x136, x135 = bits.Mul64(x3, arg2[3]) - var x137 uint64 - var x138 uint64 - x138, x137 = bits.Mul64(x3, arg2[2]) - var x139 uint64 - var x140 uint64 - x140, x139 = bits.Mul64(x3, arg2[1]) - var x141 uint64 - var x142 uint64 - x142, x141 = bits.Mul64(x3, arg2[0]) - var x143 uint64 - var x144 uint64 - x143, x144 = bits.Add64(x142, x139, uint64(0x0)) - var x145 uint64 - var x146 uint64 - x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144))) - var x147 uint64 - var x148 uint64 - x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146))) - x149 := (uint64(fiatScalarUint1(x148)) + x136) - var x150 uint64 - var x151 uint64 - x150, x151 = bits.Add64(x126, x141, uint64(0x0)) - var x152 uint64 - var x153 uint64 - x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151))) - var x154 uint64 - var x155 uint64 - x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153))) - var x156 uint64 - var x157 uint64 - x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155))) - var x158 uint64 - var x159 uint64 - x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157))) - var x160 uint64 - _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b) - var x162 uint64 - var x163 uint64 - x163, x162 = bits.Mul64(x160, 0x1000000000000000) - var x164 uint64 - var x165 uint64 - x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6) - var x166 uint64 - var x167 uint64 - x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed) - var x168 uint64 - var x169 uint64 - x168, x169 = bits.Add64(x167, x164, uint64(0x0)) - x170 := (uint64(fiatScalarUint1(x169)) + x165) - var x172 uint64 - _, x172 = bits.Add64(x150, x166, uint64(0x0)) - var x173 uint64 - var x174 uint64 - x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172))) - var x175 uint64 - var x176 uint64 - x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174))) - var x177 uint64 - var x178 uint64 - x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176))) - var x179 uint64 - var x180 uint64 - x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178))) - x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159))) - var x182 uint64 - var x183 uint64 - x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0)) - var x184 uint64 - var x185 uint64 - x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183))) - var x186 uint64 - var x187 uint64 - x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185))) - var x188 uint64 - var x189 uint64 - x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187))) - var x191 uint64 - _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189))) - var x192 uint64 - fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173) - var x193 uint64 - fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175) - var x194 uint64 - fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177) - var x195 uint64 - fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179) - out1[0] = x192 - out1[1] = x193 - out1[2] = x194 - out1[3] = x195 -} - -// fiatScalarAdd adds two field elements in the Montgomery domain. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// 0 ≤ eval arg2 < m -// -// Postconditions: -// -// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m -// 0 ≤ eval out1 < m -func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { - var x1 uint64 - var x2 uint64 - x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0)) - var x3 uint64 - var x4 uint64 - x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2))) - var x5 uint64 - var x6 uint64 - x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) - var x7 uint64 - var x8 uint64 - x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) - var x9 uint64 - var x10 uint64 - x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0)) - var x11 uint64 - var x12 uint64 - x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10))) - var x13 uint64 - var x14 uint64 - x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12))) - var x15 uint64 - var x16 uint64 - x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14))) - var x18 uint64 - _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16))) - var x19 uint64 - fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1) - var x20 uint64 - fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3) - var x21 uint64 - fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5) - var x22 uint64 - fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7) - out1[0] = x19 - out1[1] = x20 - out1[2] = x21 - out1[3] = x22 -} - -// fiatScalarSub subtracts two field elements in the Montgomery domain. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// 0 ≤ eval arg2 < m -// -// Postconditions: -// -// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m -// 0 ≤ eval out1 < m -func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) { - var x1 uint64 - var x2 uint64 - x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0)) - var x3 uint64 - var x4 uint64 - x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2))) - var x5 uint64 - var x6 uint64 - x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4))) - var x7 uint64 - var x8 uint64 - x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6))) - var x9 uint64 - fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) - var x10 uint64 - var x11 uint64 - x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) - var x12 uint64 - var x13 uint64 - x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11))) - var x14 uint64 - var x15 uint64 - x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13))) - var x16 uint64 - x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15))) - out1[0] = x10 - out1[1] = x12 - out1[2] = x14 - out1[3] = x16 -} - -// fiatScalarOpp negates a field element in the Montgomery domain. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// -// Postconditions: -// -// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m -// 0 ≤ eval out1 < m -func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) { - var x1 uint64 - var x2 uint64 - x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0)) - var x3 uint64 - var x4 uint64 - x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2))) - var x5 uint64 - var x6 uint64 - x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4))) - var x7 uint64 - var x8 uint64 - x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6))) - var x9 uint64 - fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff) - var x10 uint64 - var x11 uint64 - x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0)) - var x12 uint64 - var x13 uint64 - x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11))) - var x14 uint64 - var x15 uint64 - x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13))) - var x16 uint64 - x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15))) - out1[0] = x10 - out1[1] = x12 - out1[2] = x14 - out1[3] = x16 -} - -// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// -// Postconditions: -// -// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0 -// -// Input Bounds: -// -// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]] -// -// Output Bounds: -// -// out1: [0x0 ~> 0xffffffffffffffff] -func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) { - x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3]))) - *out1 = x1 -} - -// fiatScalarFromMontgomery translates a field element out of the Montgomery domain. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// -// Postconditions: -// -// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m -// 0 ≤ eval out1 < m -func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) { - x1 := arg1[0] - var x2 uint64 - _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b) - var x4 uint64 - var x5 uint64 - x5, x4 = bits.Mul64(x2, 0x1000000000000000) - var x6 uint64 - var x7 uint64 - x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6) - var x8 uint64 - var x9 uint64 - x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed) - var x10 uint64 - var x11 uint64 - x10, x11 = bits.Add64(x9, x6, uint64(0x0)) - var x13 uint64 - _, x13 = bits.Add64(x1, x8, uint64(0x0)) - var x14 uint64 - var x15 uint64 - x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13))) - var x16 uint64 - var x17 uint64 - x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0)) - var x18 uint64 - _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b) - var x20 uint64 - var x21 uint64 - x21, x20 = bits.Mul64(x18, 0x1000000000000000) - var x22 uint64 - var x23 uint64 - x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6) - var x24 uint64 - var x25 uint64 - x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed) - var x26 uint64 - var x27 uint64 - x26, x27 = bits.Add64(x25, x22, uint64(0x0)) - var x29 uint64 - _, x29 = bits.Add64(x16, x24, uint64(0x0)) - var x30 uint64 - var x31 uint64 - x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29))) - var x32 uint64 - var x33 uint64 - x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31))) - var x34 uint64 - var x35 uint64 - x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33))) - var x36 uint64 - var x37 uint64 - x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0)) - var x38 uint64 - var x39 uint64 - x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37))) - var x40 uint64 - var x41 uint64 - x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39))) - var x42 uint64 - _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b) - var x44 uint64 - var x45 uint64 - x45, x44 = bits.Mul64(x42, 0x1000000000000000) - var x46 uint64 - var x47 uint64 - x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6) - var x48 uint64 - var x49 uint64 - x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed) - var x50 uint64 - var x51 uint64 - x50, x51 = bits.Add64(x49, x46, uint64(0x0)) - var x53 uint64 - _, x53 = bits.Add64(x36, x48, uint64(0x0)) - var x54 uint64 - var x55 uint64 - x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53))) - var x56 uint64 - var x57 uint64 - x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55))) - var x58 uint64 - var x59 uint64 - x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57))) - var x60 uint64 - var x61 uint64 - x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0)) - var x62 uint64 - var x63 uint64 - x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61))) - var x64 uint64 - var x65 uint64 - x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63))) - var x66 uint64 - _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b) - var x68 uint64 - var x69 uint64 - x69, x68 = bits.Mul64(x66, 0x1000000000000000) - var x70 uint64 - var x71 uint64 - x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6) - var x72 uint64 - var x73 uint64 - x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed) - var x74 uint64 - var x75 uint64 - x74, x75 = bits.Add64(x73, x70, uint64(0x0)) - var x77 uint64 - _, x77 = bits.Add64(x60, x72, uint64(0x0)) - var x78 uint64 - var x79 uint64 - x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77))) - var x80 uint64 - var x81 uint64 - x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79))) - var x82 uint64 - var x83 uint64 - x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81))) - x84 := (uint64(fiatScalarUint1(x83)) + x69) - var x85 uint64 - var x86 uint64 - x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0)) - var x87 uint64 - var x88 uint64 - x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86))) - var x89 uint64 - var x90 uint64 - x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88))) - var x91 uint64 - var x92 uint64 - x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90))) - var x94 uint64 - _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92))) - var x95 uint64 - fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78) - var x96 uint64 - fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80) - var x97 uint64 - fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82) - var x98 uint64 - fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84) - out1[0] = x95 - out1[1] = x96 - out1[2] = x97 - out1[3] = x98 -} - -// fiatScalarToMontgomery translates a field element into the Montgomery domain. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// -// Postconditions: -// -// eval (from_montgomery out1) mod m = eval arg1 mod m -// 0 ≤ eval out1 < m -func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) { - x1 := arg1[1] - x2 := arg1[2] - x3 := arg1[3] - x4 := arg1[0] - var x5 uint64 - var x6 uint64 - x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d) - var x7 uint64 - var x8 uint64 - x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65) - var x9 uint64 - var x10 uint64 - x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347) - var x11 uint64 - var x12 uint64 - x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01) - var x13 uint64 - var x14 uint64 - x13, x14 = bits.Add64(x12, x9, uint64(0x0)) - var x15 uint64 - var x16 uint64 - x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14))) - var x17 uint64 - var x18 uint64 - x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16))) - var x19 uint64 - _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b) - var x21 uint64 - var x22 uint64 - x22, x21 = bits.Mul64(x19, 0x1000000000000000) - var x23 uint64 - var x24 uint64 - x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6) - var x25 uint64 - var x26 uint64 - x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed) - var x27 uint64 - var x28 uint64 - x27, x28 = bits.Add64(x26, x23, uint64(0x0)) - var x30 uint64 - _, x30 = bits.Add64(x11, x25, uint64(0x0)) - var x31 uint64 - var x32 uint64 - x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30))) - var x33 uint64 - var x34 uint64 - x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32))) - var x35 uint64 - var x36 uint64 - x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34))) - var x37 uint64 - var x38 uint64 - x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d) - var x39 uint64 - var x40 uint64 - x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65) - var x41 uint64 - var x42 uint64 - x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347) - var x43 uint64 - var x44 uint64 - x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01) - var x45 uint64 - var x46 uint64 - x45, x46 = bits.Add64(x44, x41, uint64(0x0)) - var x47 uint64 - var x48 uint64 - x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46))) - var x49 uint64 - var x50 uint64 - x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48))) - var x51 uint64 - var x52 uint64 - x51, x52 = bits.Add64(x31, x43, uint64(0x0)) - var x53 uint64 - var x54 uint64 - x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52))) - var x55 uint64 - var x56 uint64 - x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54))) - var x57 uint64 - var x58 uint64 - x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56))) - var x59 uint64 - _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b) - var x61 uint64 - var x62 uint64 - x62, x61 = bits.Mul64(x59, 0x1000000000000000) - var x63 uint64 - var x64 uint64 - x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6) - var x65 uint64 - var x66 uint64 - x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed) - var x67 uint64 - var x68 uint64 - x67, x68 = bits.Add64(x66, x63, uint64(0x0)) - var x70 uint64 - _, x70 = bits.Add64(x51, x65, uint64(0x0)) - var x71 uint64 - var x72 uint64 - x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70))) - var x73 uint64 - var x74 uint64 - x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72))) - var x75 uint64 - var x76 uint64 - x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74))) - var x77 uint64 - var x78 uint64 - x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d) - var x79 uint64 - var x80 uint64 - x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65) - var x81 uint64 - var x82 uint64 - x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347) - var x83 uint64 - var x84 uint64 - x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01) - var x85 uint64 - var x86 uint64 - x85, x86 = bits.Add64(x84, x81, uint64(0x0)) - var x87 uint64 - var x88 uint64 - x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86))) - var x89 uint64 - var x90 uint64 - x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88))) - var x91 uint64 - var x92 uint64 - x91, x92 = bits.Add64(x71, x83, uint64(0x0)) - var x93 uint64 - var x94 uint64 - x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92))) - var x95 uint64 - var x96 uint64 - x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94))) - var x97 uint64 - var x98 uint64 - x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96))) - var x99 uint64 - _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b) - var x101 uint64 - var x102 uint64 - x102, x101 = bits.Mul64(x99, 0x1000000000000000) - var x103 uint64 - var x104 uint64 - x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6) - var x105 uint64 - var x106 uint64 - x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed) - var x107 uint64 - var x108 uint64 - x107, x108 = bits.Add64(x106, x103, uint64(0x0)) - var x110 uint64 - _, x110 = bits.Add64(x91, x105, uint64(0x0)) - var x111 uint64 - var x112 uint64 - x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110))) - var x113 uint64 - var x114 uint64 - x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112))) - var x115 uint64 - var x116 uint64 - x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114))) - var x117 uint64 - var x118 uint64 - x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d) - var x119 uint64 - var x120 uint64 - x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65) - var x121 uint64 - var x122 uint64 - x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347) - var x123 uint64 - var x124 uint64 - x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01) - var x125 uint64 - var x126 uint64 - x125, x126 = bits.Add64(x124, x121, uint64(0x0)) - var x127 uint64 - var x128 uint64 - x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126))) - var x129 uint64 - var x130 uint64 - x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128))) - var x131 uint64 - var x132 uint64 - x131, x132 = bits.Add64(x111, x123, uint64(0x0)) - var x133 uint64 - var x134 uint64 - x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132))) - var x135 uint64 - var x136 uint64 - x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134))) - var x137 uint64 - var x138 uint64 - x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136))) - var x139 uint64 - _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b) - var x141 uint64 - var x142 uint64 - x142, x141 = bits.Mul64(x139, 0x1000000000000000) - var x143 uint64 - var x144 uint64 - x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6) - var x145 uint64 - var x146 uint64 - x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed) - var x147 uint64 - var x148 uint64 - x147, x148 = bits.Add64(x146, x143, uint64(0x0)) - var x150 uint64 - _, x150 = bits.Add64(x131, x145, uint64(0x0)) - var x151 uint64 - var x152 uint64 - x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150))) - var x153 uint64 - var x154 uint64 - x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152))) - var x155 uint64 - var x156 uint64 - x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154))) - x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142) - var x158 uint64 - var x159 uint64 - x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0)) - var x160 uint64 - var x161 uint64 - x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159))) - var x162 uint64 - var x163 uint64 - x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161))) - var x164 uint64 - var x165 uint64 - x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163))) - var x167 uint64 - _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165))) - var x168 uint64 - fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151) - var x169 uint64 - fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153) - var x170 uint64 - fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155) - var x171 uint64 - fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157) - out1[0] = x168 - out1[1] = x169 - out1[2] = x170 - out1[3] = x171 -} - -// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order. -// -// Preconditions: -// -// 0 ≤ eval arg1 < m -// -// Postconditions: -// -// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31] -// -// Input Bounds: -// -// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]] -// -// Output Bounds: -// -// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]] -func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) { - x1 := arg1[3] - x2 := arg1[2] - x3 := arg1[1] - x4 := arg1[0] - x5 := (uint8(x4) & 0xff) - x6 := (x4 >> 8) - x7 := (uint8(x6) & 0xff) - x8 := (x6 >> 8) - x9 := (uint8(x8) & 0xff) - x10 := (x8 >> 8) - x11 := (uint8(x10) & 0xff) - x12 := (x10 >> 8) - x13 := (uint8(x12) & 0xff) - x14 := (x12 >> 8) - x15 := (uint8(x14) & 0xff) - x16 := (x14 >> 8) - x17 := (uint8(x16) & 0xff) - x18 := uint8((x16 >> 8)) - x19 := (uint8(x3) & 0xff) - x20 := (x3 >> 8) - x21 := (uint8(x20) & 0xff) - x22 := (x20 >> 8) - x23 := (uint8(x22) & 0xff) - x24 := (x22 >> 8) - x25 := (uint8(x24) & 0xff) - x26 := (x24 >> 8) - x27 := (uint8(x26) & 0xff) - x28 := (x26 >> 8) - x29 := (uint8(x28) & 0xff) - x30 := (x28 >> 8) - x31 := (uint8(x30) & 0xff) - x32 := uint8((x30 >> 8)) - x33 := (uint8(x2) & 0xff) - x34 := (x2 >> 8) - x35 := (uint8(x34) & 0xff) - x36 := (x34 >> 8) - x37 := (uint8(x36) & 0xff) - x38 := (x36 >> 8) - x39 := (uint8(x38) & 0xff) - x40 := (x38 >> 8) - x41 := (uint8(x40) & 0xff) - x42 := (x40 >> 8) - x43 := (uint8(x42) & 0xff) - x44 := (x42 >> 8) - x45 := (uint8(x44) & 0xff) - x46 := uint8((x44 >> 8)) - x47 := (uint8(x1) & 0xff) - x48 := (x1 >> 8) - x49 := (uint8(x48) & 0xff) - x50 := (x48 >> 8) - x51 := (uint8(x50) & 0xff) - x52 := (x50 >> 8) - x53 := (uint8(x52) & 0xff) - x54 := (x52 >> 8) - x55 := (uint8(x54) & 0xff) - x56 := (x54 >> 8) - x57 := (uint8(x56) & 0xff) - x58 := (x56 >> 8) - x59 := (uint8(x58) & 0xff) - x60 := uint8((x58 >> 8)) - out1[0] = x5 - out1[1] = x7 - out1[2] = x9 - out1[3] = x11 - out1[4] = x13 - out1[5] = x15 - out1[6] = x17 - out1[7] = x18 - out1[8] = x19 - out1[9] = x21 - out1[10] = x23 - out1[11] = x25 - out1[12] = x27 - out1[13] = x29 - out1[14] = x31 - out1[15] = x32 - out1[16] = x33 - out1[17] = x35 - out1[18] = x37 - out1[19] = x39 - out1[20] = x41 - out1[21] = x43 - out1[22] = x45 - out1[23] = x46 - out1[24] = x47 - out1[25] = x49 - out1[26] = x51 - out1[27] = x53 - out1[28] = x55 - out1[29] = x57 - out1[30] = x59 - out1[31] = x60 -} - -// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order. -// -// Preconditions: -// -// 0 ≤ bytes_eval arg1 < m -// -// Postconditions: -// -// eval out1 mod m = bytes_eval arg1 mod m -// 0 ≤ eval out1 < m -// -// Input Bounds: -// -// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]] -// -// Output Bounds: -// -// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]] -func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) { - x1 := (uint64(arg1[31]) << 56) - x2 := (uint64(arg1[30]) << 48) - x3 := (uint64(arg1[29]) << 40) - x4 := (uint64(arg1[28]) << 32) - x5 := (uint64(arg1[27]) << 24) - x6 := (uint64(arg1[26]) << 16) - x7 := (uint64(arg1[25]) << 8) - x8 := arg1[24] - x9 := (uint64(arg1[23]) << 56) - x10 := (uint64(arg1[22]) << 48) - x11 := (uint64(arg1[21]) << 40) - x12 := (uint64(arg1[20]) << 32) - x13 := (uint64(arg1[19]) << 24) - x14 := (uint64(arg1[18]) << 16) - x15 := (uint64(arg1[17]) << 8) - x16 := arg1[16] - x17 := (uint64(arg1[15]) << 56) - x18 := (uint64(arg1[14]) << 48) - x19 := (uint64(arg1[13]) << 40) - x20 := (uint64(arg1[12]) << 32) - x21 := (uint64(arg1[11]) << 24) - x22 := (uint64(arg1[10]) << 16) - x23 := (uint64(arg1[9]) << 8) - x24 := arg1[8] - x25 := (uint64(arg1[7]) << 56) - x26 := (uint64(arg1[6]) << 48) - x27 := (uint64(arg1[5]) << 40) - x28 := (uint64(arg1[4]) << 32) - x29 := (uint64(arg1[3]) << 24) - x30 := (uint64(arg1[2]) << 16) - x31 := (uint64(arg1[1]) << 8) - x32 := arg1[0] - x33 := (x31 + uint64(x32)) - x34 := (x30 + x33) - x35 := (x29 + x34) - x36 := (x28 + x35) - x37 := (x27 + x36) - x38 := (x26 + x37) - x39 := (x25 + x38) - x40 := (x23 + uint64(x24)) - x41 := (x22 + x40) - x42 := (x21 + x41) - x43 := (x20 + x42) - x44 := (x19 + x43) - x45 := (x18 + x44) - x46 := (x17 + x45) - x47 := (x15 + uint64(x16)) - x48 := (x14 + x47) - x49 := (x13 + x48) - x50 := (x12 + x49) - x51 := (x11 + x50) - x52 := (x10 + x51) - x53 := (x9 + x52) - x54 := (x7 + uint64(x8)) - x55 := (x6 + x54) - x56 := (x5 + x55) - x57 := (x4 + x56) - x58 := (x3 + x57) - x59 := (x2 + x58) - x60 := (x1 + x59) - out1[0] = x39 - out1[1] = x46 - out1[2] = x53 - out1[3] = x60 -} diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go deleted file mode 100644 index f7ca3cef99..0000000000 --- a/vendor/filippo.io/edwards25519/scalarmult.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "sync" - -// basepointTable is a set of 32 affineLookupTables, where table i is generated -// from 256i * basepoint. It is precomputed the first time it's used. -func basepointTable() *[32]affineLookupTable { - basepointTablePrecomp.initOnce.Do(func() { - p := NewGeneratorPoint() - for i := 0; i < 32; i++ { - basepointTablePrecomp.table[i].FromP3(p) - for j := 0; j < 8; j++ { - p.Add(p, p) - } - } - }) - return &basepointTablePrecomp.table -} - -var basepointTablePrecomp struct { - table [32]affineLookupTable - initOnce sync.Once -} - -// ScalarBaseMult sets v = x * B, where B is the canonical generator, and -// returns v. -// -// The scalar multiplication is done in constant time. -func (v *Point) ScalarBaseMult(x *Scalar) *Point { - basepointTable := basepointTable() - - // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i ) - // as described in the Ed25519 paper - // - // Group even and odd coefficients - // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B - // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B - // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B - // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B) - // - // We use a lookup table for each i to get x_i*16^(2*i)*B - // and do four doublings to multiply by 16. - digits := x.signedRadix16() - - multiple := &affineCached{} - tmp1 := &projP1xP1{} - tmp2 := &projP2{} - - // Accumulate the odd components first - v.Set(NewIdentityPoint()) - for i := 1; i < 64; i += 2 { - basepointTable[i/2].SelectInto(multiple, digits[i]) - tmp1.AddAffine(v, multiple) - v.fromP1xP1(tmp1) - } - - // Multiply by 16 - tmp2.FromP3(v) // tmp2 = v in P2 coords - tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords - tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords - tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords - tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords - v.fromP1xP1(tmp1) // now v = 16*(odd components) - - // Accumulate the even components - for i := 0; i < 64; i += 2 { - basepointTable[i/2].SelectInto(multiple, digits[i]) - tmp1.AddAffine(v, multiple) - v.fromP1xP1(tmp1) - } - - return v -} - -// ScalarMult sets v = x * q, and returns v. -// -// The scalar multiplication is done in constant time. -func (v *Point) ScalarMult(x *Scalar, q *Point) *Point { - checkInitialized(q) - - var table projLookupTable - table.FromP3(q) - - // Write x = sum(x_i * 16^i) - // so x*Q = sum( Q*x_i*16^i ) - // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... ) - // <------compute inside out--------- - // - // We use the lookup table to get the x_i*Q values - // and do four doublings to compute 16*Q - digits := x.signedRadix16() - - // Unwrap first loop iteration to save computing 16*identity - multiple := &projCached{} - tmp1 := &projP1xP1{} - tmp2 := &projP2{} - table.SelectInto(multiple, digits[63]) - - v.Set(NewIdentityPoint()) - tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords - for i := 62; i >= 0; i-- { - tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords - tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords - tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords - v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords - table.SelectInto(multiple, digits[i]) - tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords - } - v.fromP1xP1(tmp1) - return v -} - -// basepointNafTable is the nafLookupTable8 for the basepoint. -// It is precomputed the first time it's used. -func basepointNafTable() *nafLookupTable8 { - basepointNafTablePrecomp.initOnce.Do(func() { - basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint()) - }) - return &basepointNafTablePrecomp.table -} - -var basepointNafTablePrecomp struct { - table nafLookupTable8 - initOnce sync.Once -} - -// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical -// generator, and returns v. -// -// Execution time depends on the inputs. -func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point { - checkInitialized(A) - - // Similarly to the single variable-base approach, we compute - // digits and use them with a lookup table. However, because - // we are allowed to do variable-time operations, we don't - // need constant-time lookups or constant-time digit - // computations. - // - // So we use a non-adjacent form of some width w instead of - // radix 16. This is like a binary representation (one digit - // for each binary place) but we allow the digits to grow in - // magnitude up to 2^{w-1} so that the nonzero digits are as - // sparse as possible. Intuitively, this "condenses" the - // "mass" of the scalar onto sparse coefficients (meaning - // fewer additions). - - basepointNafTable := basepointNafTable() - var aTable nafLookupTable5 - aTable.FromP3(A) - // Because the basepoint is fixed, we can use a wider NAF - // corresponding to a bigger table. - aNaf := a.nonAdjacentForm(5) - bNaf := b.nonAdjacentForm(8) - - // Find the first nonzero coefficient. - i := 255 - for j := i; j >= 0; j-- { - if aNaf[j] != 0 || bNaf[j] != 0 { - break - } - } - - multA := &projCached{} - multB := &affineCached{} - tmp1 := &projP1xP1{} - tmp2 := &projP2{} - tmp2.Zero() - - // Move from high to low bits, doubling the accumulator - // at each iteration and checking whether there is a nonzero - // coefficient to look up a multiple of. - for ; i >= 0; i-- { - tmp1.Double(tmp2) - - // Only update v if we have a nonzero coeff to add in. - if aNaf[i] > 0 { - v.fromP1xP1(tmp1) - aTable.SelectInto(multA, aNaf[i]) - tmp1.Add(v, multA) - } else if aNaf[i] < 0 { - v.fromP1xP1(tmp1) - aTable.SelectInto(multA, -aNaf[i]) - tmp1.Sub(v, multA) - } - - if bNaf[i] > 0 { - v.fromP1xP1(tmp1) - basepointNafTable.SelectInto(multB, bNaf[i]) - tmp1.AddAffine(v, multB) - } else if bNaf[i] < 0 { - v.fromP1xP1(tmp1) - basepointNafTable.SelectInto(multB, -bNaf[i]) - tmp1.SubAffine(v, multB) - } - - tmp2.FromP1xP1(tmp1) - } - - v.fromP2(tmp2) - return v -} diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go deleted file mode 100644 index 83234bbc0f..0000000000 --- a/vendor/filippo.io/edwards25519/tables.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import ( - "crypto/subtle" -) - -// A dynamic lookup table for variable-base, constant-time scalar muls. -type projLookupTable struct { - points [8]projCached -} - -// A precomputed lookup table for fixed-base, constant-time scalar muls. -type affineLookupTable struct { - points [8]affineCached -} - -// A dynamic lookup table for variable-base, variable-time scalar muls. -type nafLookupTable5 struct { - points [8]projCached -} - -// A precomputed lookup table for fixed-base, variable-time scalar muls. -type nafLookupTable8 struct { - points [64]affineCached -} - -// Constructors. - -// Builds a lookup table at runtime. Fast. -func (v *projLookupTable) FromP3(q *Point) { - // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q - // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q - v.points[0].FromP3(q) - tmpP3 := Point{} - tmpP1xP1 := projP1xP1{} - for i := 0; i < 7; i++ { - // Compute (i+1)*Q as Q + i*Q and convert to a projCached - // This is needlessly complicated because the API has explicit - // receivers instead of creating stack objects and relying on RVO - v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i]))) - } -} - -// This is not optimised for speed; fixed-base tables should be precomputed. -func (v *affineLookupTable) FromP3(q *Point) { - // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q - // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q - v.points[0].FromP3(q) - tmpP3 := Point{} - tmpP1xP1 := projP1xP1{} - for i := 0; i < 7; i++ { - // Compute (i+1)*Q as Q + i*Q and convert to affineCached - v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i]))) - } -} - -// Builds a lookup table at runtime. Fast. -func (v *nafLookupTable5) FromP3(q *Point) { - // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q - // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q - v.points[0].FromP3(q) - q2 := Point{} - q2.Add(q, q) - tmpP3 := Point{} - tmpP1xP1 := projP1xP1{} - for i := 0; i < 7; i++ { - v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i]))) - } -} - -// This is not optimised for speed; fixed-base tables should be precomputed. -func (v *nafLookupTable8) FromP3(q *Point) { - v.points[0].FromP3(q) - q2 := Point{} - q2.Add(q, q) - tmpP3 := Point{} - tmpP1xP1 := projP1xP1{} - for i := 0; i < 63; i++ { - v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i]))) - } -} - -// Selectors. - -// Set dest to x*Q, where -8 <= x <= 8, in constant time. -func (v *projLookupTable) SelectInto(dest *projCached, x int8) { - // Compute xabs = |x| - xmask := x >> 7 - xabs := uint8((x + xmask) ^ xmask) - - dest.Zero() - for j := 1; j <= 8; j++ { - // Set dest = j*Q if |x| = j - cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) - dest.Select(&v.points[j-1], dest, cond) - } - // Now dest = |x|*Q, conditionally negate to get x*Q - dest.CondNeg(int(xmask & 1)) -} - -// Set dest to x*Q, where -8 <= x <= 8, in constant time. -func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { - // Compute xabs = |x| - xmask := x >> 7 - xabs := uint8((x + xmask) ^ xmask) - - dest.Zero() - for j := 1; j <= 8; j++ { - // Set dest = j*Q if |x| = j - cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) - dest.Select(&v.points[j-1], dest, cond) - } - // Now dest = |x|*Q, conditionally negate to get x*Q - dest.CondNeg(int(xmask & 1)) -} - -// Given odd x with 0 < x < 2^4, return x*Q (in variable time). -func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) { - *dest = v.points[x/2] -} - -// Given odd x with 0 < x < 2^7, return x*Q (in variable time). -func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) { - *dest = v.points[x/2] -} diff --git a/vendor/github.com/4meepo/tagalign/.gitignore b/vendor/github.com/4meepo/tagalign/.gitignore index bdff1c364d..1c6218ee29 100644 --- a/vendor/github.com/4meepo/tagalign/.gitignore +++ b/vendor/github.com/4meepo/tagalign/.gitignore @@ -17,10 +17,7 @@ *.test .vscode -<<<<<<< HEAD .idea/ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/vendor/github.com/4meepo/tagalign/.goreleaser.yml index b413f6eeb5..37dfec7c88 100644 --- a/vendor/github.com/4meepo/tagalign/.goreleaser.yml +++ b/vendor/github.com/4meepo/tagalign/.goreleaser.yml @@ -1,8 +1,4 @@ -<<<<<<< HEAD version: 2 -======= ---- ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) project_name: tagalign release: @@ -33,8 +29,4 @@ builds: goarch: 386 - goos: freebsd goarch: arm64 -<<<<<<< HEAD main: ./cmd/tagalign/ -======= - main: ./cmd/tagalign/ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/4meepo/tagalign/options.go b/vendor/github.com/4meepo/tagalign/options.go index 8e1fc5dc3a..2a78592465 100644 --- a/vendor/github.com/4meepo/tagalign/options.go +++ b/vendor/github.com/4meepo/tagalign/options.go @@ -2,16 +2,6 @@ package tagalign type Option func(*Helper) -<<<<<<< HEAD -======= -// WithMode specify the mode of tagalign. -func WithMode(mode Mode) Option { - return func(h *Helper) { - h.mode = mode - } -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithSort enable tags sort. // fixedOrder specify the order of tags, the other tags will be sorted by name. // Sory is disabled by default. diff --git a/vendor/github.com/4meepo/tagalign/tagalign.go b/vendor/github.com/4meepo/tagalign/tagalign.go index 76b136b399..8161a0aa7f 100644 --- a/vendor/github.com/4meepo/tagalign/tagalign.go +++ b/vendor/github.com/4meepo/tagalign/tagalign.go @@ -1,42 +1,19 @@ package tagalign import ( -<<<<<<< HEAD "cmp" "fmt" "go/ast" "go/token" "reflect" "slices" -======= - "fmt" - "go/ast" - "go/token" - "log" - "reflect" - "sort" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "strings" "github.com/fatih/structtag" -<<<<<<< HEAD - "golang.org/x/tools/go/analysis" -) - -======= - "golang.org/x/tools/go/analysis" ) -type Mode int - -const ( - StandaloneMode Mode = iota - GolangciLintMode -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Style int const ( @@ -59,7 +36,6 @@ func NewAnalyzer(options ...Option) *analysis.Analyzer { } } -<<<<<<< HEAD func Run(pass *analysis.Pass, options ...Option) { for _, f := range pass.Files { filename := getFilename(pass.Fset, f) @@ -68,13 +44,6 @@ func Run(pass *analysis.Pass, options ...Option) { } h := &Helper{ -======= -func Run(pass *analysis.Pass, options ...Option) []Issue { - var issues []Issue - for _, f := range pass.Files { - h := &Helper{ - mode: StandaloneMode, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) style: DefaultStyle, align: true, } @@ -89,35 +58,19 @@ func Run(pass *analysis.Pass, options ...Option) []Issue { if !h.align && !h.sort { // do nothing -<<<<<<< HEAD return -======= - return nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ast.Inspect(f, func(n ast.Node) bool { h.find(pass, n) return true }) -<<<<<<< HEAD h.Process(pass) } } type Helper struct { -======= - h.Process(pass) - issues = append(issues, h.issues...) - } - return issues -} - -type Helper struct { - mode Mode - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) style Style align bool // whether enable tags align. @@ -126,22 +79,6 @@ type Helper struct { singleFields []*ast.Field consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct. -<<<<<<< HEAD -======= - issues []Issue -} - -// Issue is used to integrate with golangci-lint's inline auto fix. -type Issue struct { - Pos token.Position - Message string - InlineFix InlineFix -} -type InlineFix struct { - StartCol int // zero-based - Length int - NewString string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (w *Helper) find(pass *analysis.Pass, n ast.Node) { @@ -201,7 +138,6 @@ func (w *Helper) find(pass *analysis.Pass, n ast.Node) { split() } -<<<<<<< HEAD func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr string) { pass.Report(analysis.Diagnostic{ Pos: field.Tag.Pos(), @@ -224,44 +160,6 @@ func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr s //nolint:gocognit,gocyclo,nestif func (w *Helper) Process(pass *analysis.Pass) { -======= -func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) { - if w.mode == GolangciLintMode { - iss := Issue{ - Pos: pass.Fset.Position(field.Tag.Pos()), - Message: msg, - InlineFix: InlineFix{ - StartCol: startCol, - Length: len(field.Tag.Value), - NewString: replaceStr, - }, - } - w.issues = append(w.issues, iss) - } - - if w.mode == StandaloneMode { - pass.Report(analysis.Diagnostic{ - Pos: field.Tag.Pos(), - End: field.Tag.End(), - Message: msg, - SuggestedFixes: []analysis.SuggestedFix{ - { - Message: msg, - TextEdits: []analysis.TextEdit{ - { - Pos: field.Tag.Pos(), - End: field.Tag.End(), - NewText: []byte(replaceStr), - }, - }, - }, - }, - }) - } -} - -func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // process grouped fields for _, fields := range w.consecutiveFieldsGroups { offsets := make([]int, len(fields)) @@ -287,11 +185,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit tag, err := strconv.Unquote(field.Tag.Value) if err != nil { // if tag value is not a valid string, report it directly -<<<<<<< HEAD w.report(pass, field, errTagValueSyntax, field.Tag.Value) -======= - w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fields = removeField(fields, i) continue } @@ -299,11 +193,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit tags, err := structtag.Parse(tag) if err != nil { // if tag value is not a valid struct tag, report it directly -<<<<<<< HEAD w.report(pass, field, err.Error(), field.Tag.Value) -======= - w.report(pass, field, column, err.Error(), field.Tag.Value) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fields = removeField(fields, i) continue } @@ -316,11 +206,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit cp[i] = tag } notSortedTagsGroup = append(notSortedTagsGroup, cp) -<<<<<<< HEAD sortTags(w.fixedTagOrder, tags) -======= - sortBy(w.fixedTagOrder, tags) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, t := range tags.Tags() { addKey(t.Key) @@ -331,11 +217,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit } if w.sort && StrictStyle == w.style { -<<<<<<< HEAD sortKeys(w.fixedTagOrder, uniqueKeys) -======= - sortAllKeys(w.fixedTagOrder, uniqueKeys) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) maxTagNum = len(uniqueKeys) } @@ -423,45 +305,26 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit msg := "tag is not aligned, should be: " + unquoteTag -<<<<<<< HEAD w.report(pass, field, msg, newTagValue) -======= - w.report(pass, field, offsets[i], msg, newTagValue) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } // process single fields for _, field := range w.singleFields { -<<<<<<< HEAD tag, err := strconv.Unquote(field.Tag.Value) if err != nil { w.report(pass, field, errTagValueSyntax, field.Tag.Value) -======= - column := pass.Fset.Position(field.Tag.Pos()).Column - 1 - tag, err := strconv.Unquote(field.Tag.Value) - if err != nil { - w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } tags, err := structtag.Parse(tag) if err != nil { -<<<<<<< HEAD w.report(pass, field, err.Error(), field.Tag.Value) -======= - w.report(pass, field, column, err.Error(), field.Tag.Value) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } originalTags := append([]*structtag.Tag(nil), tags.Tags()...) if w.sort { -<<<<<<< HEAD sortTags(w.fixedTagOrder, tags) -======= - sortBy(w.fixedTagOrder, tags) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } newTagValue := fmt.Sprintf("`%s`", tags.String()) @@ -472,7 +335,6 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit msg := "tag is not aligned , should be: " + tags.String() -<<<<<<< HEAD w.report(pass, field, msg, newTagValue) } } @@ -508,90 +370,12 @@ func compareByFixedOrder(fixedOrder []string) func(a, b string) int { return cmp.Compare(oi, oj) } -======= - w.report(pass, field, column, msg, newTagValue) - } -} - -// Issues returns all issues found by the analyzer. -// It is used to integrate with golangci-lint. -func (w *Helper) Issues() []Issue { - log.Println("tagalign 's Issues() should only be called in golangci-lint mode") - return w.issues -} - -// sortBy sorts tags by fixed order. -// If a tag is not in the fixed order, it will be sorted by name. -func sortBy(fixedOrder []string, tags *structtag.Tags) { - // sort by fixed order - sort.Slice(tags.Tags(), func(i, j int) bool { - ti := tags.Tags()[i] - tj := tags.Tags()[j] - - oi := findIndex(fixedOrder, ti.Key) - oj := findIndex(fixedOrder, tj.Key) - - if oi == -1 && oj == -1 { - return ti.Key < tj.Key - } - - if oi == -1 { - return false - } - - if oj == -1 { - return true - } - - return oi < oj - }) -} - -func sortAllKeys(fixedOrder []string, keys []string) { - sort.Slice(keys, func(i, j int) bool { - oi := findIndex(fixedOrder, keys[i]) - oj := findIndex(fixedOrder, keys[j]) - - if oi == -1 && oj == -1 { - return keys[i] < keys[j] - } - - if oi == -1 { - return false - } - - if oj == -1 { - return true - } - - return oi < oj - }) -} - -func findIndex(s []string, e string) int { - for i, a := range s { - if a == e { - return i - } - } - return -1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func alignFormat(length int) string { return "%" + fmt.Sprintf("-%ds", length) } -<<<<<<< HEAD -======= -func max(a, b int) int { - if a > b { - return a - } - return b -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func removeField(fields []*ast.Field, index int) []*ast.Field { if index < 0 || index >= len(fields) { return fields @@ -599,7 +383,6 @@ func removeField(fields []*ast.Field, index int) []*ast.Field { return append(fields[:index], fields[index+1:]...) } -<<<<<<< HEAD func getFilename(fset *token.FileSet, file *ast.File) string { filename := fset.PositionFor(file.Pos(), true).Filename @@ -609,5 +392,3 @@ func getFilename(fset *token.FileSet, file *ast.File) string { return filename } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go index 4adc007698..703cc1c39f 100644 --- a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -125,11 +125,7 @@ const ( ) func (n *nilNil) isDangerNilType(t types.Type) (bool, zeroValue) { -<<<<<<< HEAD switch v := types.Unalias(t).(type) { -======= - switch v := t.(type) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *types.Pointer: return n.checkedTypes.Contains(ptrType), zeroValueNil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index fc09c2324c..cf422304e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,6 +1,5 @@ # Release History -<<<<<<< HEAD ## 1.17.0 (2025-01-07) ### Features Added @@ -8,8 +7,6 @@ * Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern. * Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.16.0 (2024-10-17) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index f8944266f5..d9a4e36dcc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -110,7 +110,6 @@ func (id *ResourceID) String() string { return id.stringValue } -<<<<<<< HEAD // MarshalText returns a textual representation of the ResourceID func (id *ResourceID) MarshalText() ([]byte, error) { return []byte(id.String()), nil @@ -126,8 +125,6 @@ func (id *ResourceID) UnmarshalText(text []byte) error { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { id := &ResourceID{} id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index 836dc1fb75..f496331893 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -40,20 +40,13 @@ type Poller[T any] struct { OrigURL string `json:"origURL"` Method string `json:"method"` FinalState pollers.FinalStateVia `json:"finalState"` -<<<<<<< HEAD ResultPath string `json:"resultPath"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) CurState string `json:"state"` } // New creates a new Poller from the provided initial response. // Pass nil for response to create an empty Poller for rehydration. -<<<<<<< HEAD func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) { -======= -func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if resp == nil { log.Write(log.EventLRO, "Resuming Operation-Location poller.") return &Poller[T]{pl: pl}, nil @@ -90,10 +83,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, -<<<<<<< HEAD ResultPath: resultPath, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) CurState: curState, }, nil } @@ -128,13 +118,6 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error -<<<<<<< HEAD -======= - // when the payload is included with the status monitor on - // terminal success it's in the "result" JSON property - payloadPath := "result" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { @@ -153,11 +136,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { // no JSON path when making a final GET request -<<<<<<< HEAD p.ResultPath = "" -======= - payloadPath = "" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, err := p.pl.Do(req) if err != nil { return err @@ -165,9 +144,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } -<<<<<<< HEAD return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out) -======= - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 9632da6dc7..44ab00d400 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,9 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. -<<<<<<< HEAD Version = "v1.17.0" -======= - Version = "v1.16.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index ac08baf4c8..c66fc0a90a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -32,10 +32,7 @@ type PagingHandler[T any] struct { } // Pager provides operations for iterating over paged responses. -<<<<<<< HEAD // Methods on this type are not safe for concurrent use. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Pager[T any] struct { current *T handler PagingHandler[T] diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 7aabe6173e..4f90e44743 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -50,7 +50,6 @@ const ( // NewPollerOptions contains the optional parameters for NewPoller. type NewPollerOptions[T any] struct { // FinalStateVia contains the final-state-via value for the LRO. -<<<<<<< HEAD // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs. FinalStateVia FinalStateVia @@ -59,10 +58,6 @@ type NewPollerOptions[T any] struct { // NOTE: only used for Operation-Location LROs. OperationLocationResultPath string -======= - FinalStateVia FinalStateVia - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Response contains a preconstructed response type. // The final payload will be unmarshaled into it and returned. Response *T @@ -109,11 +104,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { // op poller must be checked before loc as it can also have a location header -<<<<<<< HEAD opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath) -======= - opr, err = op.New[T](pl, resp, options.FinalStateVia) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if loc.Applicable(resp) { opr, err = loc.New[T](pl, resp) } else if body.Applicable(resp) { @@ -187,11 +178,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options } else if loc.CanResume(asJSON) { opr, _ = loc.New[T](pl, nil) } else if op.CanResume(asJSON) { -<<<<<<< HEAD opr, _ = op.New[T](pl, nil, "", "") -======= - opr, _ = op.New[T](pl, nil, "") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { return nil, fmt.Errorf("unhandled poller token %s", string(raw)) } @@ -219,10 +206,7 @@ type PollingHandler[T any] interface { } // Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. -<<<<<<< HEAD // Methods on this type are not safe for concurrent use. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Poller[T any] struct { op PollingHandler[T] resp *http.Response diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md index a5a7421218..567e6975b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -1,6 +1,5 @@ # Breaking Changes -<<<<<<< HEAD ## v1.8.0 ### New errors from `NewManagedIdentityCredential` in some environments @@ -11,8 +10,6 @@ * Cloud Shell * Service Fabric -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## v1.6.0 ### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 9ea7a6344c..1ffc19a548 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,6 +1,5 @@ # Release History -<<<<<<< HEAD ## 1.8.1 (2025-01-15) ### Bugs Fixed @@ -15,8 +14,6 @@ credential after `ManagedIdentityCredential` receives an unexpected response from IMDS, indicating the response is from something else such as a proxy -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.8.0 (2024-10-08) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 79c86ede61..c99ce5b2b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,21 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential -<<<<<<< HEAD `DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview]. -======= -`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: - -![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) - -1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. -1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. -1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. -1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. -1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. - -> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Managed Identity @@ -132,17 +118,10 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ### Credential chains -<<<<<<< HEAD |Credential|Usage|Reference |-|-|- |[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]| |[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]| -======= -|Credential|Usage -|-|- -|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps -|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Authenticating Azure-Hosted Applications @@ -271,11 +250,8 @@ For more information, see the or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. -<<<<<<< HEAD [ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview [dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index ef1f96dda2..8fc7c64aa3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -22,7 +22,6 @@ Some credential types support opt-in persistent token caching (see [the below ta Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -<<<<<<< HEAD | Operating system | Encryption facility | | ---------------- | ---------------------------------------------- | | Linux | kernel key retention service (keyctl) | @@ -30,15 +29,6 @@ Persistent caches are encrypted at rest using a mechanism that depends on the op | Windows | Data Protection API (DPAPI) | Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. -======= -| Operating system | Encryption facility | -|------------------|---------------------------------------| -| Linux | kernel key retention service (keyctl) | -| macOS | Keychain | -| Windows | Data Protection API (DPAPI) | - -Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Credentials supporting token caching @@ -47,11 +37,7 @@ The following table indicates the state of in-memory and persistent caching in e **Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | -<<<<<<< HEAD | ------------------------------ | ------------------------------------------------------------------- | ------------------------ | -======= -|--------------------------------|---------------------------------------------------------------------|--------------------------| ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | | `AzurePipelinesCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 4b5fbcbf4c..9c4b1cd71c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,10 +8,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) -<<<<<<< HEAD - [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) @@ -240,7 +237,6 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| |401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| -<<<<<<< HEAD ## Troubleshoot persistent token caching issues ### macOS @@ -264,8 +260,6 @@ Try `go build` again with `CGO_ENABLED=1`. You may need to install native build macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like `persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Get additional help Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 3d09bbc60f..40a94154c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -42,11 +42,8 @@ const ( developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" -<<<<<<< HEAD scopeLogFmt = "%s.GetToken() acquired a token for scope %q" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) traceNamespace = "Microsoft.Entra" traceOpGetToken = "GetToken" traceOpAuthenticate = "Authenticate" @@ -108,7 +105,6 @@ func resolveAdditionalTenants(tenants []string) []string { return cp } -<<<<<<< HEAD // resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't // have an explicitly configured tenant and the caller didn't specify a tenant for the token request. // @@ -119,9 +115,6 @@ func resolveAdditionalTenants(tenants []string) []string { // - credName: name of the calling credential type; for error messages // - additionalTenants: optional allow list of tenants the credential may acquire tokens from in // addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option -======= -// resolveTenant returns the correct tenant for a token request ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { if specified == "" || specified == defaultTenant { return defaultTenant, nil @@ -137,7 +130,6 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } -<<<<<<< HEAD if len(additionalTenants) == 0 { switch defaultTenant { case "", organizationsTenantID: @@ -149,8 +141,6 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index bfd5c63dc9..e2f371cfd8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -30,15 +30,9 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { -<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. -======= - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index 9a2974c094..46d0b55192 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -30,15 +30,9 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) // AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. type AzureDeveloperCLICredentialOptions struct { -<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. -======= - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 27bf43409e..82342a0254 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -27,14 +27,10 @@ type ChainedTokenCredentialOptions struct { } // ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, -<<<<<<< HEAD // it tries all the credentials until one authenticates, after which it always uses that credential. For more information, // see [ChainedTokenCredential overview]. // // [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview -======= -// it tries all the credentials until one authenticates, after which it always uses that credential. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ChainedTokenCredential struct { cond *sync.Cond iterating bool @@ -53,12 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil return nil, errors.New("sources cannot contain nil") } -<<<<<<< HEAD if mc, ok := source.(*ManagedIdentityCredential); ok { mc.mic.chained = true } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } cp := make([]azcore.TokenCredential, len(sources)) copy(cp, sources) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index acc445e053..c3af0cdc2d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,38 +26,16 @@ extends: parameters: CloudConfig: Public: -<<<<<<< HEAD SubscriptionConfigurations: - $(sub-config-identity-test-resources) EnableRaceDetector: true Location: westus2 -======= - ServiceConnection: azure-sdk-tests - SubscriptionConfigurationFilePaths: - - eng/common/TestResources/sub-config/AzurePublicMsft.json - SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - - $(sub-config-identity-test-resources) - EnableRaceDetector: true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: -<<<<<<< HEAD PersistOidcToken: true -======= - PreSteps: - - task: AzureCLI@2 - displayName: Set OIDC token - inputs: - addSpnToEnvironment: true - azureSubscription: azure-sdk-tests - inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" - scriptLocation: inlineScript - scriptType: pscore ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 386fa08fbf..92f508094d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -115,11 +115,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { -<<<<<<< HEAD msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", ")) -======= - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) log.Write(EventAuthentication, msg) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 7b4de159e8..14af271f6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -23,7 +23,6 @@ type DefaultAzureCredentialOptions struct { // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions -<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. @@ -31,21 +30,12 @@ type DefaultAzureCredentialOptions struct { // AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string -======= - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add - // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be - // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. - AdditionallyAllowedTenants []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } @@ -53,11 +43,7 @@ type DefaultAzureCredentialOptions struct { // DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by // combining credentials used in Azure hosting environments and credentials used in local development. In // production, it's better to use a specific credential type so authentication is more predictable and easier -<<<<<<< HEAD // to debug. For more information, see [DefaultAzureCredential overview]. -======= -// to debug. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, // stopping when one provides a token: @@ -73,11 +59,8 @@ type DefaultAzureCredentialOptions struct { // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. -<<<<<<< HEAD // // [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type DefaultAzureCredential struct { chain *ChainedTokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index fe96597bf6..53ae9767f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -21,14 +21,9 @@ const credNameDeviceCode = "DeviceCodeCredential" type DeviceCodeCredentialOptions struct { azcore.ClientOptions -<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. -======= - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 0147e69d31..ec89de9b5b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -20,14 +20,9 @@ const credNameBrowser = "InteractiveBrowserCredential" type InteractiveBrowserCredentialOptions struct { azcore.ClientOptions -<<<<<<< HEAD // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. -======= - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 031f2f685a..cc07fd7015 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -65,12 +65,9 @@ type managedIdentityClient struct { id ManagedIDKind msiType msiType probeIMDS bool -<<<<<<< HEAD // chained indicates whether the client is part of a credential chain. If true, the client will return // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response. chained bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // arcKeyDirectory returns the directory expected to contain Azure Arc keys @@ -150,11 +147,7 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { if options.ID != nil { -<<<<<<< HEAD return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") -======= - return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } env = "Service Fabric" c.endpoint = endpoint @@ -225,10 +218,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block if c.probeIMDS { -<<<<<<< HEAD // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) @@ -236,32 +226,14 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi if err != nil { return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } -<<<<<<< HEAD if _, err = c.azClient.Pipeline().Do(req); err != nil { -======= - res, err := c.azClient.Pipeline().Do(req) - if err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } -<<<<<<< HEAD // send normal token requests from now on because something responded -======= - // because IMDS always responds with JSON, assume a non-JSON response is from something else, such - // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating - b, err := azruntime.Payload(res) - if err != nil { - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) - } - if !json.Valid(b) { - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") - } - // send normal token requests from now on because IMDS responded ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.probeIMDS = false } @@ -276,7 +248,6 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { -<<<<<<< HEAD tk, err := c.createAccessToken(resp) if err != nil && c.chained && c.msiType == msiTypeIMDS { // failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at @@ -284,20 +255,14 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi err = newCredentialUnavailableError(credNameManagedIdentity, err.Error()) } return tk, err -======= - return c.createAccessToken(resp) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if c.msiType == msiTypeIMDS { switch resp.StatusCode { case http.StatusBadRequest: if id != nil { -<<<<<<< HEAD // return authenticationFailedError, halting any encompassing credential chain, // because the explicit user-assigned identity implies the developer expected this to work -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" @@ -313,7 +278,6 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } -<<<<<<< HEAD if c.chained { // the response may be from something other than IMDS, for example a proxy returning // 404. Return credentialUnavailableError so credential chains continue to their @@ -321,8 +285,6 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi err = newAuthenticationFailedError(credNameManagedIdentity, "", resp) return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) @@ -337,11 +299,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { -<<<<<<< HEAD return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res) -======= - return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if value.ExpiresIn != "" { expiresIn, err := json.Number(value.ExpiresIn).Int64() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 5493cdbb4f..ef5e4d7212 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -154,16 +154,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti if p.opts.DisableAutomaticAuthentication { return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } -<<<<<<< HEAD return p.reqToken(ctx, client, tro) -======= - at, err := p.reqToken(ctx, client, tro) - if err == nil { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) - log.Write(EventAuthentication, msg) - } - return at, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. @@ -246,11 +237,8 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { -<<<<<<< HEAD msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", ")) log.Write(EventAuthentication, msg) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.record, err = newAuthenticationRecord(ar) } else { err = newAuthenticationFailedErrorFromMSAL(p.name, err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index 93766f2171..efa8c6d3eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -7,13 +7,10 @@ param ( [hashtable] $AdditionalParameters = @{}, [hashtable] $DeploymentOutputs, -<<<<<<< HEAD [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $SubscriptionId, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $TenantId, @@ -22,13 +19,10 @@ param ( [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] [string] $TestApplicationId, -<<<<<<< HEAD [Parameter(Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $Environment, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments @@ -42,14 +36,9 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } -<<<<<<< HEAD az cloud set -n $Environment az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId az account set --subscription $SubscriptionId -======= - az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId - az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } Write-Host "Building container" @@ -82,12 +71,9 @@ $aciName = "azidentity-test" az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` -<<<<<<< HEAD --cpu 1 ` --memory 1.0 ` --os-type Linux ` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 5e8d2be4dc..88c1078a72 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,9 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. -<<<<<<< HEAD version = "v1.8.1" -======= - version = "v1.8.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md index 63e8b52886..d99490438c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/CHANGELOG.md @@ -1,6 +1,5 @@ # Release History -<<<<<<< HEAD ## 1.3.0 (2024-11-06) ### Features Added @@ -13,8 +12,6 @@ * Client requests tokens from the Vault's tenant, overriding any credential default (thanks @francescomari) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.1.0 (2024-02-13) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json index 908b7c6bb0..27217d4ff7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/assets.json @@ -2,9 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/security/keyvault/azkeys", -<<<<<<< HEAD "Tag": "go/security/keyvault/azkeys_d53919c433" -======= - "Tag": "go/security/keyvault/azkeys_2d421aec6c" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml index 15ff7bf9da..75586518a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/ci.yml @@ -21,19 +21,13 @@ pr: include: - sdk/security/keyvault/azkeys -<<<<<<< HEAD extends: template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml -======= -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) parameters: TimeoutInMinutes: 120 ServiceDirectory: 'security/keyvault/azkeys' RunLiveTests: true UsePipelineProxy: false -<<<<<<< HEAD SupportedClouds: 'Public,UsGov,China' CloudConfig: Public: @@ -51,15 +45,3 @@ stages: Path: sdk/security/keyvault/azkeys/platform-matrix.json Selection: sparse GenerateVMJobs: true -======= - AdditionalMatrixConfigs: - - Name: keyvault_test_matrix_addons - Path: sdk/security/keyvault/azkeys/platform-matrix.json - Selection: sparse - GenerateVMJobs: true - - # Due to the high cost of Managed HSMs, we only want to test using them weekly. - ${{ if not(contains(variables['Build.DefinitionName'], 'tests-weekly')) }}: - MatrixFilters: - - ArmTemplateParameters=^(?!.*enableHsm.*true) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go index 60303f1f30..350ef41c42 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/custom_client.go @@ -38,13 +38,10 @@ func NewClient(vaultURL string, credential azcore.TokenCredential, options *Clie }, ) azcoreClient, err := azcore.NewClient(moduleName, version, runtime.PipelineOptions{ -<<<<<<< HEAD APIVersion: runtime.APIVersionOptions{ Location: runtime.APIVersionLocationQueryParam, Name: "api-version", }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PerRetry: []policy.Policy{authPolicy}, Tracing: runtime.TracingOptions{ Namespace: "Microsoft.KeyVault", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json index 6ee85a1f5d..a2a6f970f0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/platform-matrix.json @@ -6,13 +6,8 @@ { "Agent": { "ubuntu-20.04": { -<<<<<<< HEAD "OSVmImage": "env:LINUXVMIMAGE", "Pool": "env:LINUXPOOL" -======= - "OSVmImage": "MMSUbuntu20.04", - "Pool": "azsdk-pool-mms-ubuntu-2004-general" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }, "ArmTemplateParameters": "@{ enableHsm = $true }", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go index 881dd80905..ca94011745 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys/version.go @@ -8,9 +8,5 @@ package azkeys const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys" -<<<<<<< HEAD version = "v1.3.0" -======= - version = "v1.1.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md index 452110f6b4..873368aa1a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md @@ -1,6 +1,5 @@ # Release History -<<<<<<< HEAD ## 1.1.0 (2024-10-21) ### Features Added @@ -12,8 +11,6 @@ ### Other Changes * Upgraded dependencies -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.0.0 (2023-08-15) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go index 95899dbc80..408ae052b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go @@ -17,10 +17,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" -<<<<<<< HEAD -======= - "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` @@ -34,13 +30,7 @@ type KeyVaultChallengePolicyOptions struct { type keyVaultAuthorizer struct { // tro is the policy's authentication parameters. These are discovered from an authentication challenge // elicited ahead of the first client request. -<<<<<<< HEAD tro policy.TokenRequestOptions -======= - tro policy.TokenRequestOptions - // TODO: move into tro once it has a tenant field (https://github.com/Azure/azure-sdk-for-go/issues/19841) - tenantID string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) verifyChallengeResource bool } @@ -65,11 +55,7 @@ func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChall } func (k *keyVaultAuthorizer) authorize(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { -<<<<<<< HEAD if len(k.tro.Scopes) == 0 || k.tro.TenantID == "" { -======= - if len(k.tro.Scopes) == 0 || k.tenantID == "" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if body := req.Body(); body != nil { // We don't know the scope or tenant ID because we haven't seen a challenge yet. We elicit one now by sending // the request without authorization, first removing its body, if any. authorizeOnChallenge will reattach the @@ -116,36 +102,11 @@ func parseTenant(url string) string { return tenant } -<<<<<<< HEAD -======= -type challengePolicyError struct { - err error -} - -func (c *challengePolicyError) Error() string { - return c.err.Error() -} - -func (*challengePolicyError) NonRetriable() { - // marker method -} - -func (c *challengePolicyError) Unwrap() error { - return c.err -} - -var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // updateTokenRequestOptions parses authentication parameters from Key Vault's challenge func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req *http.Request) error { authHeader := resp.Header.Get("WWW-Authenticate") if authHeader == "" { -<<<<<<< HEAD return errors.New("response has no WWW-Authenticate header for challenge authentication") -======= - return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Strip down to auth and resource @@ -165,11 +126,7 @@ func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req } } -<<<<<<< HEAD k.tro.TenantID = parseTenant(vals["authorization"]) -======= - k.tenantID = parseTenant(vals["authorization"]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) scope := "" if v, ok := vals["scope"]; ok { scope = v @@ -177,27 +134,16 @@ func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req scope = v } if scope == "" { -<<<<<<< HEAD return errors.New("could not find a valid resource in the WWW-Authenticate header") -======= - return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if k.verifyChallengeResource { // the challenge resource's host must match the requested vault's host parsed, err := url.Parse(scope) if err != nil { -<<<<<<< HEAD return fmt.Errorf("invalid challenge resource %q: %v", scope, err) } if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { return fmt.Errorf(challengeMatchError, scope) -======= - return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} - } - if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { - return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } if !strings.HasSuffix(scope, "/.default") { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml deleted file mode 100644 index 2f8b8e1a87..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml +++ /dev/null @@ -1,28 +0,0 @@ -# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. -trigger: - branches: - include: - - main - - feature/* - - hotfix/* - - release/* - paths: - include: - - sdk/security/keyvault/internal - -pr: - branches: - include: - - main - - feature/* - - hotfix/* - - release/* - paths: - include: - - sdk/security/keyvault/internal - -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml - parameters: - ServiceDirectory: 'security/keyvault/internal' - RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go index c5b083b854..5a037978fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go @@ -7,9 +7,5 @@ package internal const ( -<<<<<<< HEAD version = "v1.1.0" //nolint -======= - version = "v1.0.0" //nolint ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 14c7c62239..57d0e2777e 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -18,11 +18,8 @@ import ( "encoding/pem" "errors" "fmt" -<<<<<<< HEAD "os" "strings" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" @@ -320,31 +317,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client if err != nil { return Client{}, err } -<<<<<<< HEAD autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION") -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) opts := clientOptions{ authority: authority, // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache disableInstanceDiscovery: cred.tokenProvider != nil, httpClient: shared.DefaultClient, -<<<<<<< HEAD azureRegion: autoEnabledRegion, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, o := range options { o(&opts) } -<<<<<<< HEAD if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") { opts.azureRegion = "" } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) baseOpts := []base.Option{ base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 7a3a03fb6d..e473d1267d 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -89,7 +89,6 @@ type AuthResult struct { ExpiresOn time.Time GrantedScopes []string DeclinedScopes []string -<<<<<<< HEAD Metadata AuthResultMetadata } @@ -107,10 +106,6 @@ const ( Cache TokenSource = 2 ) -======= -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { if err := storageTokenResponse.AccessToken.Validate(); err != nil { @@ -129,7 +124,6 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) } } -<<<<<<< HEAD return AuthResult{ Account: account, IDToken: idToken, @@ -141,9 +135,6 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu TokenSource: Cache, }, }, nil -======= - return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewAuthResult creates an AuthResult. @@ -157,12 +148,9 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco AccessToken: tokenResponse.AccessToken, ExpiresOn: tokenResponse.ExpiresOn.T, GrantedScopes: tokenResponse.GrantedScopes.Slice, -<<<<<<< HEAD Metadata: AuthResultMetadata{ TokenSource: IdentityProvider, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 3ae7ffec84..2134e57c9e 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -18,13 +18,6 @@ import ( ) const addField = "AdditionalFields" -<<<<<<< HEAD -======= -const ( - marshalJSON = "MarshalJSON" - unmarshalJSON = "UnmarshalJSON" -) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( leftBrace = []byte("{")[0] @@ -109,7 +102,6 @@ func delimIs(got json.Token, want rune) bool { // hasMarshalJSON will determine if the value or a pointer to this value has // the MarshalJSON method. func hasMarshalJSON(v reflect.Value) bool { -<<<<<<< HEAD ok := false if _, ok = v.Interface().(json.Marshaler); !ok { var i any @@ -121,43 +113,16 @@ func hasMarshalJSON(v reflect.Value) bool { _, ok = i.(json.Marshaler) } return ok -======= - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - } else { - if !v.CanAddr() { - return false - } - v = v.Addr() - } - - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok - } - return false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. // This will panic if the method is not defined. func callMarshalJSON(v reflect.Value) ([]byte, error) { -<<<<<<< HEAD if marsh, ok := v.Interface().(json.Marshaler); ok { -======= - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return marsh.MarshalJSON() } if v.Kind() == reflect.Ptr { -<<<<<<< HEAD if marsh, ok := v.Elem().Interface().(json.Marshaler); ok { return marsh.MarshalJSON() } @@ -169,20 +134,6 @@ func callMarshalJSON(v reflect.Value) ([]byte, error) { } } -======= - v = v.Elem() - } else { - if v.CanAddr() { - v = v.Addr() - } - } - - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) - return marsh.MarshalJSON() - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) } @@ -197,17 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool { v = v.Addr() } -<<<<<<< HEAD _, ok := v.Interface().(json.Unmarshaler) return ok -======= - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Unmarshaler) - return ok - } - - return false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // hasOmitEmpty indicates if the field has instructed us to not output diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index f2291a2fe7..fda5d7dd33 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -7,10 +7,7 @@ package local import ( "context" "fmt" -<<<<<<< HEAD "html" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "strconv" @@ -145,11 +142,7 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { -<<<<<<< HEAD desc := html.EscapeString(q.Get("error_description")) -======= - desc := q.Get("error_description") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index c139a64e63..e065313444 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -10,11 +10,8 @@ import ( "io" "time" -<<<<<<< HEAD "github.com/google/uuid" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" @@ -23,10 +20,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" -<<<<<<< HEAD -======= - "github.com/google/uuid" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -339,11 +332,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) if err != nil { -<<<<<<< HEAD return fmt.Errorf("unable to resolve an endpoint: %w", err) -======= - return fmt.Errorf("unable to resolve an endpoint: %s", err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } authParams.Endpoints = endpoints return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index a191ba9f69..c3c4a96fc3 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -23,11 +23,7 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" -<<<<<<< HEAD aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance" -======= - instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" @@ -51,22 +47,12 @@ type jsonCaller interface { } var aadTrustedHostList = map[string]bool{ -<<<<<<< HEAD "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list "login.partner.microsoftonline.cn": true, // Microsoft Azure China "login.microsoftonline.de": true, // Microsoft Azure Blackforest "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy "login.microsoftonline.us": true, // Microsoft Azure US Government "login.microsoftonline.com": true, // Microsoft Azure Worldwide -======= - "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list - "login.chinacloudapi.cn": true, // Microsoft Azure China - "login.microsoftonline.de": true, // Microsoft Azure Blackforest - "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy - "login.microsoftonline.us": true, // Microsoft Azure US Government - "login.microsoftonline.com": true, // Microsoft Azure Worldwide - "login.cloudgovapi.us": true, // Microsoft Azure US Government ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TrustedHost checks if an AAD host is trusted/valid. @@ -150,17 +136,12 @@ const ( const ( AAD = "MSSTS" ADFS = "ADFS" -<<<<<<< HEAD DSTS = "DSTS" ) // DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it. const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f" -======= -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. type AuthenticationScheme interface { // Extra parameters that are added to the request to the /token endpoint. @@ -258,7 +239,6 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { // - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint // - the resulting authority URL is invalid func (p AuthParams) WithTenant(ID string) (AuthParams, error) { -<<<<<<< HEAD if ID == "" || ID == p.AuthorityInfo.Tenant { return p, nil } @@ -279,25 +259,6 @@ func (p AuthParams) WithTenant(ID string) (AuthParams, error) { return p, errors.New("dSTS authority doesn't support tenants") } -======= - switch ID { - case "", p.AuthorityInfo.Tenant: - // keep the default tenant because the caller didn't override it - return p, nil - case "common", "consumers", "organizations": - if p.AuthorityInfo.AuthorityType == AAD { - return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) - } - // else we'll return a better error below - } - if p.AuthorityInfo.AuthorityType != AAD { - return p, errors.New("the authority doesn't support tenants") - } - if p.AuthorityInfo.Tenant == "consumers" { - return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) - } - authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) if err == nil { info.Region = p.AuthorityInfo.Region @@ -389,17 +350,12 @@ type Info struct { Host string CanonicalAuthorityURI string AuthorityType string -<<<<<<< HEAD -======= - UserRealmURIPrefix string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ValidateAuthority bool Tenant string Region string InstanceDiscoveryDisabled bool } -<<<<<<< HEAD // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { @@ -438,44 +394,13 @@ func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceD } authorityType = DSTS tenant = DSTSTenant -======= -func firstPathSegment(u *url.URL) (string, error) { - pathParts := strings.Split(u.EscapedPath(), "/") - if len(pathParts) >= 2 { - return pathParts[1], nil - } - - return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) -} - -// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. -func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { - u, err := url.Parse(strings.ToLower(authority)) - if err != nil || u.Scheme != "https" { - return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) - } - - tenant, err := firstPathSegment(u) - if err != nil { - return Info{}, err - } - authorityType := AAD - if tenant == "adfs" { - authorityType = ADFS ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // u.Host includes the port, if any, which is required for private cloud deployments return Info{ Host: u.Host, -<<<<<<< HEAD CanonicalAuthorityURI: cannonicalAuthority, AuthorityType: authorityType, -======= - CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), - AuthorityType: authorityType, - UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ValidateAuthority: validateAuthority, Tenant: tenant, InstanceDiscoveryDisabled: instanceDiscoveryDisabled, @@ -619,11 +544,7 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I discoveryHost = authorityInfo.Host } -<<<<<<< HEAD endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost) -======= - endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) } return resp, err @@ -641,7 +562,6 @@ func detectRegion(ctx context.Context) string { client := http.Client{ Timeout: time.Duration(2 * time.Second), } -<<<<<<< HEAD req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil) req.Header.Set("Metadata", "true") resp, err := client.Do(req) @@ -655,19 +575,6 @@ func detectRegion(ctx context.Context) string { return "" } } -======= - req, _ := http.NewRequest("GET", imdsEndpoint, nil) - req.Header.Set("Metadata", "true") - resp, err := client.Do(req) - // If the request times out or there is an error, it is retried once - if err != nil || resp.StatusCode != 200 { - resp, err = client.Do(req) - if err != nil || resp.StatusCode != 200 { - return "" - } - } - defer resp.Body.Close() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) response, err := io.ReadAll(resp.Body) if err != nil { return "" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index d431ea97f4..d62aac74eb 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -18,18 +18,11 @@ import ( "strings" "time" -<<<<<<< HEAD "github.com/google/uuid" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" -======= - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" - customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" - "github.com/google/uuid" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // HTTPClient represents an HTTP client. @@ -78,7 +71,6 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea unmarshal = customJSON.Unmarshal } -<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil) if err != nil { return fmt.Errorf("could not create request: %w", err) @@ -86,17 +78,6 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea addStdHeaders(headers) req.Header = headers -======= - u, err := url.Parse(endpoint) - if err != nil { - return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) - } - u.RawQuery = qv.Encode() - - addStdHeaders(headers) - - req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if body != nil { // Note: In case your wondering why we are not gzip encoding.... diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 812d17accb..4030ec8d8f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -18,12 +18,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" ) -<<<<<<< HEAD -======= -// ADFS is an active directory federation service authority type. -const ADFS = "ADFS" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type cacheEntry struct { Endpoints authority.Endpoints ValidForDomainsInList map[string]bool @@ -54,11 +48,7 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo return endpoints, nil } -<<<<<<< HEAD endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo) -======= - endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return authority.Endpoints{}, err } @@ -90,11 +80,7 @@ func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPr defer m.mu.Unlock() if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { -<<<<<<< HEAD if authorityInfo.AuthorityType == authority.ADFS { -======= - if authorityInfo.AuthorityType == ADFS { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) domain, err := adfsDomainFromUpn(userPrincipalName) if err == nil { if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { @@ -113,11 +99,7 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use updatedCacheEntry := createcacheEntry(endpoints) -<<<<<<< HEAD if authorityInfo.AuthorityType == authority.ADFS { -======= - if authorityInfo.AuthorityType == ADFS { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Since we're here, we've made a call to the backend. We want to ensure we're caching // the latest values from the server. if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { @@ -134,18 +116,12 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry } -<<<<<<< HEAD func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) { if authorityInfo.AuthorityType == authority.ADFS { return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil } else if authorityInfo.AuthorityType == authority.DSTS { return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil -======= -func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { - if authorityInfo.Tenant == "adfs" { - return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) if err != nil { @@ -158,10 +134,6 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut return "", err } return resp.TenantDiscoveryEndpoint, nil -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go index 73d3ecca45..9ce7d96fe4 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go @@ -14,11 +14,8 @@ package gcp -<<<<<<< HEAD import "context" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const ( // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules // for the environment variables available in GAE environments. @@ -72,11 +69,7 @@ func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, err // AppEngineStandardAvailabilityZone returns the zone the app engine service is running in. func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) { -<<<<<<< HEAD return d.metadata.ZoneWithContext(context.TODO()) -======= - return d.metadata.Zone() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AppEngineStandardCloudRegion returns the region the app engine service is running in. diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go index 6b0affdbd9..4eac3c74b6 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go @@ -15,15 +15,10 @@ package gcp import ( -<<<<<<< HEAD "context" "errors" "os" "strings" -======= - "errors" - "os" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "cloud.google.com/go/compute/metadata" ) @@ -75,7 +70,6 @@ func (d *Detector) CloudPlatform() Platform { // ProjectID returns the ID of the project in which this program is running. func (d *Detector) ProjectID() (string, error) { -<<<<<<< HEAD // N.B. d.metadata.ProjectIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable. s, err := d.metadata.GetWithContext(context.TODO(), "project/project-id") return strings.TrimSpace(s), err @@ -86,36 +80,14 @@ func (d *Detector) instanceID() (string, error) { // N.B. d.metadata.InstanceIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable. s, err := d.metadata.GetWithContext(context.TODO(), "instance/id") return strings.TrimSpace(s), err -======= - return d.metadata.ProjectID() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Detector collects resource information for all GCP platforms. type Detector struct { -<<<<<<< HEAD metadata *metadata.Client os osProvider } -======= - metadata metadataProvider - os osProvider -} - -// metadataProvider contains the subset of the metadata.Client functions used -// by this resource Detector to allow testing with a fake implementation. -type metadataProvider interface { - ProjectID() (string, error) - InstanceID() (string, error) - Get(string) (string, error) - InstanceName() (string, error) - Hostname() (string, error) - Zone() (string, error) - InstanceAttributeValue(string) (string, error) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // osProvider contains the subset of the os package functions used by. type osProvider interface { LookupEnv(string) (string, bool) diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go index f5c9f408de..f137b1fae6 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go @@ -15,10 +15,7 @@ package gcp import ( -<<<<<<< HEAD "context" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" ) @@ -93,11 +90,7 @@ func (d *Detector) CloudRunJobTaskIndex() (string, error) { // FaaSID returns the instance id of the Cloud Run or Cloud Function. func (d *Detector) FaaSID() (string, error) { -<<<<<<< HEAD return d.instanceID() -======= - return d.metadata.InstanceID() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // FaaSCloudRegion detects region from the metadata server. @@ -105,11 +98,7 @@ func (d *Detector) FaaSID() (string, error) { // // https://cloud.google.com/run/docs/reference/container-contract#metadata-server func (d *Detector) FaaSCloudRegion() (string, error) { -<<<<<<< HEAD region, err := d.metadata.GetWithContext(context.TODO(), regionMetadataAttr) -======= - region, err := d.metadata.Get(regionMetadataAttr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go index 27ffa71b31..794cfdf036 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go @@ -15,7 +15,6 @@ package gcp import ( -<<<<<<< HEAD "context" "fmt" "regexp" @@ -33,78 +32,42 @@ const createdByInstanceAttr = "created-by" func (d *Detector) onGCE() bool { _, err := d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr) -======= - "fmt" - "strings" -) - -// See the available GCE instance metadata: -// https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata -const machineTypeMetadataAttr = "instance/machine-type" - -func (d *Detector) onGCE() bool { - _, err := d.metadata.Get(machineTypeMetadataAttr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err == nil } // GCEHostType returns the machine type of the instance on which this program is running. func (d *Detector) GCEHostType() (string, error) { -<<<<<<< HEAD return d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr) -======= - return d.metadata.Get(machineTypeMetadataAttr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEHostID returns the instance ID of the instance on which this program is running. func (d *Detector) GCEHostID() (string, error) { -<<<<<<< HEAD return d.instanceID() -======= - return d.metadata.InstanceID() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEHostName returns the instance name of the instance on which this program is running. // Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which // value is returned. func (d *Detector) GCEHostName() (string, error) { -<<<<<<< HEAD return d.metadata.InstanceNameWithContext(context.TODO()) -======= - return d.metadata.InstanceName() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEInstanceName returns the instance name of the instance on which this program is running. // This is the value visible in the Cloud Console UI, and the prefix for the default hostname // of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). func (d *Detector) GCEInstanceName() (string, error) { -<<<<<<< HEAD return d.metadata.InstanceNameWithContext(context.TODO()) -======= - return d.metadata.InstanceName() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEInstanceHostname returns the full value of the default or custom hostname of the instance // on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm. func (d *Detector) GCEInstanceHostname() (string, error) { -<<<<<<< HEAD return d.metadata.HostnameWithContext(context.TODO()) -======= - return d.metadata.Hostname() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running. func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) { -<<<<<<< HEAD zone, err := d.metadata.ZoneWithContext(context.TODO()) -======= - zone, err := d.metadata.Zone() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", "", err } @@ -117,7 +80,6 @@ func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) { } return zone, strings.Join(splitZone[0:2], "-"), nil } -<<<<<<< HEAD type ManagedInstanceGroup struct { Name string @@ -153,5 +115,3 @@ func (d *Detector) GCEManagedInstanceGroup() (ManagedInstanceGroup, error) { } return mig, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go index 1b3cc5f14e..734d44cc03 100644 --- a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go +++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go @@ -15,10 +15,7 @@ package gcp import ( -<<<<<<< HEAD "context" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "strings" ) @@ -35,7 +32,6 @@ const ( ) func (d *Detector) onGKE() bool { -<<<<<<< HEAD // Check if we are on k8s first _, found := d.os.LookupEnv(k8sServiceHostEnv) if !found { @@ -45,10 +41,6 @@ func (d *Detector) onGKE() bool { // different managed k8s platform. _, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr) return err == nil -======= - _, found := d.os.LookupEnv(k8sServiceHostEnv) - return found ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GKEHostID returns the instance ID of the instance on which this program is running. @@ -58,11 +50,7 @@ func (d *Detector) GKEHostID() (string, error) { // GKEClusterName returns the name if the GKE cluster in which this program is running. func (d *Detector) GKEClusterName() (string, error) { -<<<<<<< HEAD return d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterNameMetadataAttr) -======= - return d.metadata.InstanceAttributeValue(clusterNameMetadataAttr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type LocationType int64 @@ -75,11 +63,7 @@ const ( // GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional. func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) { -<<<<<<< HEAD clusterLocation, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr) -======= - clusterLocation, err := d.metadata.InstanceAttributeValue(clusterLocationMetadataAttr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", UndefinedLocation, err } diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index b82475c75a..304edc3422 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -39,17 +39,11 @@ var ( ) // semVerRegex is the regular expression used to parse a semantic version. -<<<<<<< HEAD // This is not the official regex from the semver spec. It has been modified to allow for loose handling // where versions like 2.1 are detected. const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` -======= -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Version represents a single semantic version. type Version struct { @@ -154,13 +148,8 @@ func NewVersion(v string) (*Version, error) { } sv := &Version{ -<<<<<<< HEAD metadata: m[5], pre: m[4], -======= - metadata: m[8], - pre: m[5], ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) original: v, } @@ -171,11 +160,7 @@ func NewVersion(v string) (*Version, error) { } if m[2] != "" { -<<<<<<< HEAD sv.minor, err = strconv.ParseUint(m[2], 10, 64) -======= - sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -184,11 +169,7 @@ func NewVersion(v string) (*Version, error) { } if m[3] != "" { -<<<<<<< HEAD sv.patch, err = strconv.ParseUint(m[3], 10, 64) -======= - sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -633,13 +614,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { -<<<<<<< HEAD if p == "" { return ErrInvalidMetadata } else if containsOnly(p, num) { -======= - if containsOnly(p, num) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -658,13 +635,9 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { -<<<<<<< HEAD if p == "" { return ErrInvalidMetadata } else if !containsOnly(p, allowed) { -======= - if !containsOnly(p, allowed) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ErrInvalidMetadata } } diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go index 3bd95a182f..d558b9bd82 100644 --- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -49,28 +49,16 @@ func ShiftNBytesLeft(dst, x []byte, n int) { dst = append(dst, make([]byte, n/8)...) } -<<<<<<< HEAD // XorBytesMut replaces X with X XOR Y. len(X) must be >= len(Y). func XorBytesMut(X, Y []byte) { for i := 0; i < len(Y); i++ { -======= -// XorBytesMut assumes equal input length, replaces X with X XOR Y -func XorBytesMut(X, Y []byte) { - for i := 0; i < len(X); i++ { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) X[i] ^= Y[i] } } -<<<<<<< HEAD // XorBytes puts X XOR Y into Z. len(Z) and len(X) must be >= len(Y). func XorBytes(Z, X, Y []byte) { for i := 0; i < len(Y); i++ { -======= -// XorBytes assumes equal input length, puts X XOR Y into Z -func XorBytes(Z, X, Y []byte) { - for i := 0; i < len(X); i++ { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Z[i] = X[i] ^ Y[i] } } diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go index f48d80b06e..24f893017b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -18,14 +18,9 @@ import ( "crypto/cipher" "crypto/subtle" "errors" -<<<<<<< HEAD "math/bits" "github.com/ProtonMail/go-crypto/internal/byteutil" -======= - "github.com/ProtonMail/go-crypto/internal/byteutil" - "math/bits" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) type ocb struct { @@ -114,15 +109,10 @@ func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { if len(nonce) > o.nonceSize { panic("crypto/ocb: Incorrect nonce length given to OCB") } -<<<<<<< HEAD sep := len(plaintext) ret, out := byteutil.SliceForAppend(dst, sep+o.tagSize) tag := o.crypt(enc, out[:sep], nonce, adata, plaintext) copy(out[sep:], tag) -======= - ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) - o.crypt(enc, out, nonce, adata, plaintext) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret } @@ -134,19 +124,10 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { return nil, ocbError("Ciphertext shorter than tag length") } sep := len(ciphertext) - o.tagSize -<<<<<<< HEAD ret, out := byteutil.SliceForAppend(dst, sep) ciphertextData := ciphertext[:sep] tag := o.crypt(dec, out, nonce, adata, ciphertextData) if subtle.ConstantTimeCompare(tag, ciphertext[sep:]) == 1 { -======= - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) - ciphertextData := ciphertext[:sep] - tag := ciphertext[sep:] - o.crypt(dec, out, nonce, adata, ciphertextData) - if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { - ret = ret[:sep] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } for i := range out { @@ -156,12 +137,8 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { } // On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) -<<<<<<< HEAD // function. It writes the resulting plain/ciphertext into Y and returns // the tag. -======= -// function. It returns the resulting plain/ciphertext with the tag appended. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { // // Consider X as a sequence of 128-bit blocks @@ -178,11 +155,7 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { truncatedNonce := make([]byte, len(nonce)) copy(truncatedNonce, nonce) truncatedNonce[len(truncatedNonce)-1] &= 192 -<<<<<<< HEAD var Ktop []byte -======= - Ktop := make([]byte, blockSize) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { Ktop = o.reusableKtop.Ktop } else { @@ -222,7 +195,6 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) blockX := X[i*blockSize : (i+1)*blockSize] blockY := Y[i*blockSize : (i+1)*blockSize] -<<<<<<< HEAD switch instruction { case enc: byteutil.XorBytesMut(checksum, blockX) @@ -231,15 +203,6 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { byteutil.XorBytesMut(blockY, offset) case dec: byteutil.XorBytes(blockY, blockX, offset) -======= - byteutil.XorBytes(blockY, blockX, offset) - switch instruction { - case enc: - o.block.Encrypt(blockY, blockY) - byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockX) - case dec: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) o.block.Decrypt(blockY, blockY) byteutil.XorBytesMut(blockY, offset) byteutil.XorBytesMut(checksum, blockY) @@ -255,7 +218,6 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { o.block.Encrypt(pad, offset) chunkX := X[blockSize*m:] chunkY := Y[blockSize*m : len(X)] -<<<<<<< HEAD switch instruction { case enc: byteutil.XorBytesMut(checksum, chunkX) @@ -274,33 +236,6 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { o.block.Encrypt(tag, tag) byteutil.XorBytesMut(tag, o.hash(adata)) return tag[:o.tagSize] -======= - byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) - // P_* || bit(1) || zeroes(127) - len(P_*) - switch instruction { - case enc: - paddedY := append(chunkX, byte(128)) - paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) - byteutil.XorBytesMut(checksum, paddedY) - case dec: - paddedX := append(chunkY, byte(128)) - paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) - byteutil.XorBytesMut(checksum, paddedX) - } - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) - } else { - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m:], tag[:o.tagSize]) - } - return Y ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // This hash function is used to compute the tag. Per design, on empty input it diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go index dd7b4793ff..e0a677f284 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -23,11 +23,7 @@ import ( // Headers // // base64-encoded Bytes -<<<<<<< HEAD // '=' base64 encoded checksum (optional) not checked anymore -======= -// '=' base64 encoded checksum ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // -----END Type----- // // where Headers is a possibly empty sequence of Key: Value lines. @@ -44,47 +40,15 @@ type Block struct { var ArmorCorrupt error = errors.StructuralError("armor invalid") -<<<<<<< HEAD -======= -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var armorStart = []byte("-----BEGIN ") var armorEnd = []byte("-----END ") var armorEndOfLine = []byte("-----") -<<<<<<< HEAD // lineReader wraps a line based reader. It watches for the end of an armor block type lineReader struct { in *bufio.Reader buf []byte eof bool -======= -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -113,32 +77,9 @@ func (l *lineReader) Read(p []byte) (n int, err error) { if len(line) == 5 && line[0] == '=' { // This is the checksum line -<<<<<<< HEAD // Don't check the checksum l.eof = true -======= - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return 0, io.EOF } @@ -159,33 +100,14 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return } -<<<<<<< HEAD // openpgpReader passes Read calls to the underlying base64 decoder. type openpgpReader struct { lReader *lineReader b64Reader io.Reader -======= -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) -<<<<<<< HEAD -======= - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -253,10 +175,6 @@ TryNextBlock: } p.lReader.in = r -<<<<<<< HEAD -======= - p.oReader.currentCRC = crc24Init ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.oReader.lReader = &p.lReader p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) p.Body = &p.oReader diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index fb34c055cc..550efddf05 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -7,10 +7,7 @@ package armor import ( "encoding/base64" "io" -<<<<<<< HEAD "sort" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var armorHeaderSep = []byte(": ") @@ -18,7 +15,6 @@ var blockEnd = []byte("\n=") var newline = []byte("\n") var armorEndOfLineOut = []byte("-----\n") -<<<<<<< HEAD const crc24Init = 0xb704ce const crc24Poly = 0x1864cfb @@ -36,8 +32,6 @@ func crc24(crc uint32, d []byte) uint32 { return crc } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // writeSlices writes its arguments to the given Writer. func writeSlices(out io.Writer, slices ...[]byte) (err error) { for _, s := range slices { @@ -123,7 +117,6 @@ func (l *lineBreaker) Close() (err error) { // // encoding -> base64 encoder -> lineBreaker -> out type encoding struct { -<<<<<<< HEAD out io.Writer breaker *lineBreaker b64 io.WriteCloser @@ -136,17 +129,6 @@ func (e *encoding) Write(data []byte) (n int, err error) { if e.crcEnabled { e.crc = crc24(e.crc, data) } -======= - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return e.b64.Write(data) } @@ -157,7 +139,6 @@ func (e *encoding) Close() (err error) { } e.breaker.Close() -<<<<<<< HEAD if e.crcEnabled { var checksumBytes [3]byte checksumBytes[0] = byte(e.crc >> 16) @@ -173,29 +154,12 @@ func (e *encoding) Close() (err error) { } func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) { -======= - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bType := []byte(blockType) err = writeSlices(out, armorStart, bType, armorEndOfLineOut) if err != nil { return } -<<<<<<< HEAD keys := make([]string, len(headers)) i := 0 for k := range headers { @@ -205,10 +169,6 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr sort.Strings(keys) for _, k := range keys { err = writeSlices(out, []byte(k), armorHeaderSep, []byte(headers[k]), newline) -======= - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -220,23 +180,15 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr } e := &encoding{ -<<<<<<< HEAD out: out, breaker: newLineBreaker(out, 64), blockType: bType, crc: crc24Init, crcEnabled: checksum, -======= - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) return e, nil } -<<<<<<< HEAD // Encode returns a WriteCloser which will encode the data written to it in // OpenPGP armor. @@ -252,5 +204,3 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) { return encode(out, blockType, headers, doChecksum) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go index 3a5f7a6ae4..5b40e1375d 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -30,17 +30,12 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { if c == '\r' { *s = 1 } else if c == '\n' { -<<<<<<< HEAD if _, err := cw.Write(buf[start:i]); err != nil { return 0, err } if _, err := cw.Write(newline); err != nil { return 0, err } -======= - cw.Write(buf[start:i]) - cw.Write(newline) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) start = i + 1 } case 1: @@ -48,13 +43,9 @@ func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { } } -<<<<<<< HEAD if _, err := cw.Write(buf[start:]); err != nil { return 0, err } -======= - cw.Write(buf[start:]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return len(buf), nil } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go index b59e9e3ce8..db8fb163b6 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -163,19 +163,9 @@ func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLead if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { return nil, err } -<<<<<<< HEAD if _, err := param.Write(fingerprint[:]); err != nil { return nil, err } -======= - // For v5 keys, the 20 leftmost octets of the fingerprint are used. - if _, err := param.Write(fingerprint[:20]); err != nil { - return nil, err - } - if param.Len()-len(curveOID) != 45 { - return nil, errors.New("ecdh: malformed KDF Param") - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); h := pub.KDF.Hash.New() diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go index b978e62223..0eb3937b39 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -9,7 +9,6 @@ import ( "strconv" ) -<<<<<<< HEAD var ( // ErrDecryptSessionKeyParsing is a generic error message for parsing errors in decrypted data // to reduce the risk of oracle attacks. @@ -22,8 +21,6 @@ var ( ErrMDCMissing error = SignatureError("MDC packet not found") ) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A StructuralError is returned when OpenPGP data is found to be syntactically // invalid. type StructuralError string @@ -32,7 +29,6 @@ func (s StructuralError) Error() string { return "openpgp: invalid data: " + string(s) } -<<<<<<< HEAD // A DecryptWithSessionKeyError is returned when a failure occurs when reading from symmetrically decrypted data or // an authentication tag verification fails. // Such an error indicates that the supplied session key is likely wrong or the data got corrupted. @@ -61,8 +57,6 @@ func HandleSensitiveParsingError(err error, decrypted bool) error { return ErrDecryptSessionKeyParsing } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // UnsupportedError indicates that, although the OpenPGP data is valid, it // makes use of currently unimplemented features. type UnsupportedError string @@ -87,12 +81,6 @@ func (b SignatureError) Error() string { return "openpgp: invalid signature: " + string(b) } -<<<<<<< HEAD -======= -var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") -var ErrMDCMissing error = SignatureError("MDC packet not found") - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type signatureExpiredError int func (se signatureExpiredError) Error() string { @@ -107,7 +95,6 @@ func (ke keyExpiredError) Error() string { return "openpgp: key expired" } -<<<<<<< HEAD var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0) type signatureOlderThanKeyError int @@ -116,8 +103,6 @@ func (ske signatureOlderThanKeyError) Error() string { return "openpgp: signature is older than the key" } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ErrKeyExpired error = keyExpiredError(0) type keyIncorrectError int @@ -152,30 +137,24 @@ func (keyRevokedError) Error() string { var ErrKeyRevoked error = keyRevokedError(0) -<<<<<<< HEAD type WeakAlgorithmError string func (e WeakAlgorithmError) Error() string { return "openpgp: weak algorithms are rejected: " + string(e) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type UnknownPacketTypeError uint8 func (upte UnknownPacketTypeError) Error() string { return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) } -<<<<<<< HEAD type CriticalUnknownPacketTypeError uint8 func (upte CriticalUnknownPacketTypeError) Error() string { return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AEADError indicates that there is a problem when initializing or using a // AEAD instance, configuration struct, nonces or index values. type AEADError string @@ -192,7 +171,6 @@ type ErrDummyPrivateKey string func (dke ErrDummyPrivateKey) Error() string { return "openpgp: s2k GNU dummy key: " + string(dke) } -<<<<<<< HEAD // ErrMalformedMessage results when the packet sequence is incorrect type ErrMalformedMessage string @@ -200,5 +178,3 @@ type ErrMalformedMessage string func (dke ErrMalformedMessage) Error() string { return "openpgp: malformed message " + string(dke) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go index a19f26e8b5..c76a75bcda 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -51,34 +51,14 @@ func (sk CipherFunction) Id() uint8 { return uint8(sk) } -<<<<<<< HEAD // KeySize returns the key size, in bytes, of cipher. func (cipher CipherFunction) KeySize() int { switch cipher { -======= -var keySizeByID = map[uint8]int{ - TripleDES.Id(): 24, - CAST5.Id(): cast5.KeySize, - AES128.Id(): 16, - AES192.Id(): 24, - AES256.Id(): 32, -} - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case TripleDES: - return 24 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case CAST5: return cast5.KeySize case AES128: return 16 -<<<<<<< HEAD case AES192, TripleDES: -======= - case AES192: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return 24 case AES256: return 32 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go index 59afefc085..0da2d0d852 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -4,20 +4,14 @@ package ecc import ( "bytes" "crypto/elliptic" -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/bitcurves" "github.com/ProtonMail/go-crypto/brainpool" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" ) -<<<<<<< HEAD const Curve25519GenName = "Curve25519" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CurveInfo struct { GenName string Oid *encoding.OID @@ -51,31 +45,19 @@ var Curves = []CurveInfo{ }, { // Curve25519 -<<<<<<< HEAD GenName: Curve25519GenName, -======= - GenName: "Curve25519", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), Curve: NewCurve25519(), }, { -<<<<<<< HEAD // x448 -======= - // X448 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GenName: "Curve448", Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), Curve: NewX448(), }, { // Ed25519 -<<<<<<< HEAD GenName: Curve25519GenName, -======= - GenName: "Curve25519", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), Curve: NewEd25519(), }, diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go index 9e67a05ddd..5a4c3a8596 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -2,10 +2,7 @@ package ecc import ( -<<<<<<< HEAD "bytes" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/subtle" "io" @@ -94,7 +91,6 @@ func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { } func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { -<<<<<<< HEAD privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) if privateKeyCap >= privateKeyLen+publicKeyLen && @@ -103,9 +99,6 @@ func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { } return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) -======= - return append(privateKey, publicKey...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go index 9f0df9efc3..b6edda7480 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -2,10 +2,7 @@ package ecc import ( -<<<<<<< HEAD "bytes" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/subtle" "io" @@ -88,7 +85,6 @@ func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { } func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { -<<<<<<< HEAD privateKeyCap, privateKeyLen, publicKeyLen := cap(privateKey), len(privateKey), len(publicKey) if privateKeyCap >= privateKeyLen+publicKeyLen && @@ -97,9 +93,6 @@ func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { } return append(privateKey[:privateKeyLen:privateKeyLen], publicKey...) -======= - return append(privateKey, publicKey...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go index 0c53a366c9..77213f66be 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -15,21 +15,15 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/packet" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a @@ -46,15 +40,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err return nil, err } primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) -<<<<<<< HEAD if config.V6() { if err := primary.UpgradeToV6(); err != nil { return nil, err } -======= - if config != nil && config.V5Keys { - primary.UpgradeToV5() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } e := &Entity{ @@ -62,7 +51,6 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err PrivateKey: primary, Identities: make(map[string]*Identity), Subkeys: []Subkey{}, -<<<<<<< HEAD Signatures: []*packet.Signature{}, } @@ -82,11 +70,6 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err } err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) -======= - } - - err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -104,7 +87,6 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { creationTime := config.Now() keyLifetimeSecs := config.KeyLifetime() -<<<<<<< HEAD return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6()) } @@ -113,38 +95,11 @@ func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, selfSignature.CreationTime = creationTime selfSignature.KeyLifetimeSecs = &keyLifetimeSecs -======= - return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) -} - -func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return errors.InvalidArgumentError("user id field contained invalid characters") - } - - if _, ok := t.Identities[uid.Id]; ok { - return errors.InvalidArgumentError("user id exist") - } - - primary := t.PrivateKey - - isPrimaryId := len(t.Identities) == 0 - - selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) - selfSignature.CreationTime = creationTime - selfSignature.KeyLifetimeSecs = &keyLifetimeSecs - selfSignature.IsPrimaryId = &isPrimaryId ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) selfSignature.FlagsValid = true selfSignature.FlagSign = true selfSignature.FlagCertify = true selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 -<<<<<<< HEAD selfSignature.SEIPDv2 = advertiseAead -======= - selfSignature.SEIPDv2 = config.AEAD() != nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set the PreferredHash for the SelfSignature from the packet.Config. // If it is not the must-implement algorithm from rfc4880bis, append that. @@ -173,7 +128,6 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) } -<<<<<<< HEAD if advertiseAead { // Get the preferred AEAD mode from the packet.Config. // If it is not the must-implement algorithm from rfc9580, append that. @@ -212,20 +166,6 @@ func (t *Entity) addUserId(name, comment, email string, config *packet.Config, c } } selfSignature.IsPrimaryId = &isPrimaryId -======= - // And for DefaultMode. - modes := []uint8{uint8(config.AEAD().Mode())} - if config.AEAD().Mode() != packet.AEADModeOCB { - modes = append(modes, uint8(packet.AEADModeOCB)) - } - - // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) - for _, cipher := range selfSignature.PreferredSymmetric { - for _, mode := range modes { - selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // User ID binding signature err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) @@ -253,15 +193,10 @@ func (e *Entity) AddSigningSubkey(config *packet.Config) error { } sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true -<<<<<<< HEAD if config.V6() { if err := sub.UpgradeToV6(); err != nil { return err } -======= - if config != nil && config.V5Keys { - sub.UpgradeToV5() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } subkey := Subkey{ @@ -305,15 +240,10 @@ func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Ti } sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) sub.IsSubkey = true -<<<<<<< HEAD if config.V6() { if err := sub.UpgradeToV6(); err != nil { return err } -======= - if config != nil && config.V5Keys { - sub.UpgradeToV5() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } subkey := Subkey{ @@ -351,14 +281,11 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { } return rsa.GenerateKey(config.Random(), bits) case packet.PubKeyAlgoEdDSA: -<<<<<<< HEAD if config.V6() { // Implementations MUST NOT accept or generate v6 key material // using the deprecated OIDs. return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) curve := ecc.FindEdDSAByGenName(string(config.CurveName())) if curve == nil { return nil, errors.InvalidArgumentError("unsupported curve") @@ -380,7 +307,6 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { return nil, err } return priv, nil -<<<<<<< HEAD case packet.PubKeyAlgoEd25519: priv, err := ed25519.GenerateKey(config.Random()) if err != nil { @@ -393,8 +319,6 @@ func newSigner(config *packet.Config) (signer interface{}, err error) { return nil, err } return priv, nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -417,7 +341,6 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey case packet.PubKeyAlgoECDH: -<<<<<<< HEAD if config.V6() && (config.CurveName() == packet.Curve25519 || config.CurveName() == packet.Curve448) { @@ -425,8 +348,6 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { // using the deprecated OIDs. return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var kdf = ecdh.KDF{ Hash: algorithm.SHA512, Cipher: algorithm.AES256, @@ -436,13 +357,10 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { return nil, errors.InvalidArgumentError("unsupported curve") } return ecdh.GenerateKey(config.Random(), curve, kdf) -<<<<<<< HEAD case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey return x25519.GenerateKey(config.Random()) case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey return x448.GenerateKey(config.Random()) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.InvalidArgumentError("unsupported public key algorithm") } @@ -451,11 +369,7 @@ func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { var bigOne = big.NewInt(1) // generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the -<<<<<<< HEAD // given bit size, using the given random source and pre-populated primes. -======= -// given bit size, using the given random source and prepopulated primes. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { priv := new(rsa.PrivateKey) priv.E = 65537 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go index fc35909aae..a071353e2e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -6,10 +6,7 @@ package openpgp import ( goerrors "errors" -<<<<<<< HEAD "fmt" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "time" @@ -28,7 +25,6 @@ var PrivateKeyType = "PGP PRIVATE KEY BLOCK" // (which must be a signing key), one or more identities claimed by that key, // and zero or more subkeys, which may be encryption keys. type Entity struct { -<<<<<<< HEAD PrimaryKey *packet.PublicKey PrivateKey *packet.PrivateKey Identities map[string]*Identity // indexed by Identity.Name @@ -36,13 +32,6 @@ type Entity struct { Subkeys []Subkey SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6) Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures -======= - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // An Identity represents an identity claimed by an Entity and zero or more @@ -134,21 +123,12 @@ func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { // given Entity. func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // Fail to find any encryption key if the... -<<<<<<< HEAD primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() if primarySelfSignature == nil || // no self-signature found e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) -======= - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired - e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return Key{}, false } @@ -175,15 +155,9 @@ func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { // If we don't have any subkeys for encryption and the primary key // is marked as OK to encrypt with, then we can use it. -<<<<<<< HEAD if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications && e.PrimaryKey.PubKeyAlgo.CanEncrypt() { return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true -======= - if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return Key{}, false @@ -215,21 +189,12 @@ func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { // Fail to find any signing key if the... -<<<<<<< HEAD primarySelfSignature, primaryIdentity := e.PrimarySelfSignature() if primarySelfSignature == nil || // no self-signature found e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired e.Revoked(now) || // primary key has been revoked primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys) -======= - i := e.PrimaryIdentity() - if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired - i.SelfSignature == nil || // user ID has no self-signature - i.SelfSignature.SigExpired(now) || // user ID self-signature has expired - e.Revoked(now) || // primary key has been revoked - i.Revoked(now) { // user ID has been revoked ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return Key{}, false } @@ -258,21 +223,12 @@ func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, // If we don't have any subkeys for signing and the primary key // is marked as OK to sign with, then we can use it. -<<<<<<< HEAD if primarySelfSignature.FlagsValid && (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) && (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) && e.PrimaryKey.PubKeyAlgo.CanSign() && (id == 0 || e.PrimaryKey.KeyId == id) { return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true -======= - if i.SelfSignature.FlagsValid && - (flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && - (flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) && - e.PrimaryKey.PubKeyAlgo.CanSign() && - (id == 0 || e.PrimaryKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // No keys with a valid Signing Flag or no keys matched the id passed in @@ -306,11 +262,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er var keysToEncrypt []*packet.PrivateKey // Add entity private key to encrypt. if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { -<<<<<<< HEAD keysToEncrypt = append(keysToEncrypt, e.PrivateKey) -======= - keysToEncrypt = append(keysToEncrypt, e.PrivateKey) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Add subkeys to encrypt. @@ -322,11 +274,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) } -<<<<<<< HEAD // DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase. -======= -// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, // and don't cause an error to be returned. func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { @@ -339,11 +287,7 @@ func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { // Add subkeys to decrypt. for _, sub := range e.Subkeys { if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { -<<<<<<< HEAD keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) -======= - keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) @@ -377,12 +321,7 @@ type EntityList []*Entity func (el EntityList) KeysById(id uint64) (keys []Key) { for _, e := range el { if e.PrimaryKey.KeyId == id { -<<<<<<< HEAD selfSig, _ := e.PrimarySelfSignature() -======= - ident := e.PrimaryIdentity() - selfSig := ident.SelfSignature ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) } @@ -504,10 +443,6 @@ func readToNextPublicKey(packets *packet.Reader) (err error) { return } else if err != nil { if _, ok := err.(errors.UnsupportedError); ok { -<<<<<<< HEAD -======= - err = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } return @@ -545,10 +480,7 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) { } var revocations []*packet.Signature -<<<<<<< HEAD var directSignatures []*packet.Signature -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EachPacket: for { p, err := packets.Next() @@ -567,13 +499,7 @@ EachPacket: if pkt.SigType == packet.SigTypeKeyRevocation { revocations = append(revocations, pkt) } else if pkt.SigType == packet.SigTypeDirectSignature { -<<<<<<< HEAD directSignatures = append(directSignatures, pkt) -======= - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Else, ignoring the signature as it does not follow anything // we would know to attach it to. @@ -596,7 +522,6 @@ EachPacket: return nil, err } default: -<<<<<<< HEAD // we ignore unknown packets. } } @@ -630,14 +555,6 @@ EachPacket: } e.SelfSignature = mainDirectKeySelfSignature e.Signatures = directSignatures -======= - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, revocation := range revocations { @@ -782,15 +699,12 @@ func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign boo return err } } -<<<<<<< HEAD for _, directSignature := range e.Signatures { err := directSignature.Serialize(w) if err != nil { return err } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -857,15 +771,12 @@ func (e *Entity) Serialize(w io.Writer) error { return err } } -<<<<<<< HEAD for _, directSignature := range e.Signatures { err := directSignature.Serialize(w) if err != nil { return err } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, ident := range e.Identities { err = ident.UserId.Serialize(w) if err != nil { @@ -968,7 +879,6 @@ func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, rea sk.Revocations = append(sk.Revocations, revSig) return nil } -<<<<<<< HEAD func (e *Entity) primaryDirectSignature() *packet.Signature { return e.SelfSignature @@ -989,5 +899,3 @@ func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) { } return primaryIdentity.SelfSignature, primaryIdentity } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go index de15427397..5e46046563 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -3,10 +3,6 @@ package packet import ( -<<<<<<< HEAD -======= - "bytes" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/cipher" "encoding/binary" "io" @@ -18,19 +14,11 @@ import ( type aeadCrypter struct { aead cipher.AEAD chunkSize int -<<<<<<< HEAD nonce []byte -======= - initialNonce []byte ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) associatedData []byte // Chunk-independent associated data chunkIndex []byte // Chunk counter packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet bytesProcessed int // Amount of plaintext bytes encrypted/decrypted -<<<<<<< HEAD -======= - buffer bytes.Buffer // Buffered bytes across chunks ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // computeNonce takes the incremental index and computes an eXclusive OR with @@ -38,21 +26,12 @@ type aeadCrypter struct { // 5.16.1 and 5.16.2). It returns the resulting nonce. func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { -<<<<<<< HEAD return wo.nonce } nonce = make([]byte, len(wo.nonce)) copy(nonce, wo.nonce) offset := len(wo.nonce) - 8 -======= - return append(wo.initialNonce, wo.chunkIndex...) - } - - nonce = make([]byte, len(wo.initialNonce)) - copy(nonce, wo.initialNonce) - offset := len(wo.initialNonce) - 8 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i := 0; i < 8; i++ { nonce[i+offset] ^= wo.chunkIndex[i] } @@ -81,14 +60,9 @@ func (wo *aeadCrypter) incrementIndex() error { type aeadDecrypter struct { aeadCrypter // Embedded ciphertext opener reader io.Reader // 'reader' is a partialLengthReader -<<<<<<< HEAD chunkBytes []byte peekedBytes []byte // Used to detect last chunk buffer []byte // Buffered decrypted bytes -======= - peekedBytes []byte // Used to detect last chunk - eof bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Read decrypts bytes and reads them into dst. It decrypts when necessary and @@ -96,25 +70,14 @@ type aeadDecrypter struct { // and an error. func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { // Return buffered plaintext bytes from previous calls -<<<<<<< HEAD if len(ar.buffer) > 0 { n = copy(dst, ar.buffer) ar.buffer = ar.buffer[n:] return -======= - if ar.buffer.Len() > 0 { - return ar.buffer.Read(dst) - } - - // Return EOF if we've previously validated the final tag - if ar.eof { - return 0, io.EOF ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Read a chunk tagLen := ar.aead.Overhead() -<<<<<<< HEAD copy(ar.chunkBytes, ar.peekedBytes) // Copy bytes peeked in previous chunk or in initialization bytesRead, errRead := io.ReadFull(ar.reader, ar.chunkBytes[tagLen:]) if errRead != nil && errRead != io.EOF && errRead != io.ErrUnexpectedEOF { @@ -146,42 +109,6 @@ func (ar *aeadDecrypter) Close() (err error) { if errChunk != nil { return errChunk } -======= - cipherChunkBuf := new(bytes.Buffer) - _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) - cipherChunk := cipherChunkBuf.Bytes() - if errRead != nil && errRead != io.EOF { - return 0, errRead - } - decrypted, errChunk := ar.openChunk(cipherChunk) - if errChunk != nil { - return 0, errChunk - } - - // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) - } - - // Check final authentication tag - if errRead == io.EOF { - errChunk := ar.validateFinalTag(ar.peekedBytes) - if errChunk != nil { - return n, errChunk - } - ar.eof = true // Mark EOF for when we've returned all buffered data - } - return -} - -// Close is noOp. The final authentication tag of the stream was already -// checked in the last Read call. In the future, this function could be used to -// wipe the reader and peeked, decrypted bytes, if necessary. -func (ar *aeadDecrypter) Close() (err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -189,31 +116,15 @@ func (ar *aeadDecrypter) Close() (err error) { // the underlying plaintext and an error. It accesses peeked bytes from next // chunk, to identify the last chunk and decrypt/validate accordingly. func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { -<<<<<<< HEAD -======= - tagLen := ar.aead.Overhead() - // Restore carried bytes from last call - chunkExtra := append(ar.peekedBytes, data...) - // 'chunk' contains encrypted bytes, followed by an authentication tag. - chunk := chunkExtra[:len(chunkExtra)-tagLen] - ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) adata := ar.associatedData if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { adata = append(ar.associatedData, ar.chunkIndex...) } nonce := ar.computeNextNonce() -<<<<<<< HEAD plainChunk, err := ar.aead.Open(data[:0:len(data)], nonce, data, adata) if err != nil { return nil, errors.ErrAEADTagVerification -======= - plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) - if err != nil { - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ar.bytesProcessed += len(plainChunk) if err = ar.aeadCrypter.incrementIndex(); err != nil { @@ -238,14 +149,8 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { // ... and total number of encrypted octets adata = append(adata, amountBytes...) nonce := ar.computeNextNonce() -<<<<<<< HEAD if _, err := ar.aead.Open(nil, nonce, tag, adata); err != nil { return errors.ErrAEADTagVerification -======= - _, err := ar.aead.Open(nil, nonce, tag, adata) - if err != nil { - return err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -255,18 +160,14 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { type aeadEncrypter struct { aeadCrypter // Embedded plaintext sealer writer io.WriteCloser // 'writer' is a partialLengthWriter -<<<<<<< HEAD chunkBytes []byte offset int -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Write encrypts and writes bytes. It encrypts when necessary and buffers extra // plaintext bytes for next call. When the stream is finished, Close() MUST be // called to append the final tag. func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { -<<<<<<< HEAD for n != len(plaintextBytes) { copied := copy(aw.chunkBytes[aw.offset:aw.chunkSize], plaintextBytes[n:]) n += copied @@ -282,23 +183,6 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { return n, err } aw.offset = 0 -======= - // Append plaintextBytes to existing buffered bytes - n, err = aw.buffer.Write(plaintextBytes) - if err != nil { - return n, err - } - // Encrypt and write chunks - for aw.buffer.Len() >= aw.chunkSize { - plainChunk := aw.buffer.Next(aw.chunkSize) - encryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return n, err - } - _, err = aw.writer.Write(encryptedChunk) - if err != nil { - return n, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return @@ -310,14 +194,8 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { func (aw *aeadEncrypter) Close() (err error) { // Encrypt and write a chunk if there's buffered data left, or if we haven't // written any chunks yet. -<<<<<<< HEAD if aw.offset > 0 || aw.bytesProcessed == 0 { lastEncryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset]) -======= - if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { - plainChunk := aw.buffer.Bytes() - lastEncryptedChunk, err := aw.sealChunk(plainChunk) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -363,11 +241,7 @@ func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { } nonce := aw.computeNextNonce() -<<<<<<< HEAD encrypted := aw.aead.Seal(data[:0], nonce, data, adata) -======= - encrypted := aw.aead.Seal(nil, nonce, data, adata) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) aw.bytesProcessed += len(data) if err := aw.aeadCrypter.incrementIndex(); err != nil { return nil, err diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go index e05b86192c..583765d87c 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -65,45 +65,28 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { blockCipher := ae.cipher.new(key) aead := ae.mode.new(blockCipher) // Carry the first tagLen bytes -<<<<<<< HEAD chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) tagLen := ae.mode.TagLength() chunkBytes := make([]byte, chunkSize+tagLen*2) peekedBytes := chunkBytes[chunkSize+tagLen:] -======= - tagLen := ae.mode.TagLength() - peekedBytes := make([]byte, tagLen) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n, err := io.ReadFull(ae.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) } -<<<<<<< HEAD -======= - chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &aeadDecrypter{ aeadCrypter: aeadCrypter{ aead: aead, chunkSize: chunkSize, -<<<<<<< HEAD nonce: ae.initialNonce, -======= - initialNonce: ae.initialNonce, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) associatedData: ae.associatedData(), chunkIndex: make([]byte, 8), packetTag: packetTypeAEADEncrypted, }, reader: ae.Contents, -<<<<<<< HEAD chunkBytes: chunkBytes, peekedBytes: peekedBytes, }, nil -======= - peekedBytes: peekedBytes}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // associatedData for chunks: tag, version, cipher, mode, chunk size byte diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go index 8f09f3fcf8..0bcb38caca 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -8,16 +8,10 @@ import ( "compress/bzip2" "compress/flate" "compress/zlib" -<<<<<<< HEAD "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" -======= - "github.com/ProtonMail/go-crypto/openpgp/errors" - "io" - "strconv" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Compressed represents a compressed OpenPGP packet. The decompressed contents @@ -46,7 +40,6 @@ type CompressionConfig struct { Level int } -<<<<<<< HEAD // decompressionReader ensures that the whole compression packet is read. type decompressionReader struct { compressed io.Reader @@ -78,8 +71,6 @@ func (dr *decompressionReader) Read(data []byte) (n int, err error) { return n, err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Compressed) parse(r io.Reader) error { var buf [1]byte _, err := readFull(r, buf[:]) @@ -91,7 +82,6 @@ func (c *Compressed) parse(r io.Reader) error { case 0: c.Body = r case 1: -<<<<<<< HEAD c.Body = newDecompressionReader(r, flate.NewReader(r)) case 2: decompressor, err := zlib.NewReader(r) @@ -101,13 +91,6 @@ func (c *Compressed) parse(r io.Reader) error { c.Body = newDecompressionReader(r, decompressor) case 3: c.Body = newDecompressionReader(r, io.NopCloser(bzip2.NewReader(r))) -======= - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go index 801e8b13b5..8bf8e6e51f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -14,7 +14,6 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/s2k" ) -<<<<<<< HEAD var ( defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{ PubKeyAlgoElGamal: true, @@ -43,8 +42,6 @@ var ( // by v6 keys, v6 signatures and SEIPDv2 encrypted data, respectively. var V5Disabled = false -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Config collects a number of parameters along with sensible defaults. // A nil *Config is valid and results in all default values. type Config struct { @@ -104,7 +101,6 @@ type Config struct { // **Note: using this option may break compatibility with other OpenPGP // implementations, as well as future versions of this library.** AEADConfig *AEADConfig -<<<<<<< HEAD // V6Keys configures version 6 key generation. If false, this package still // supports version 6 keys, but produces version 4 keys. V6Keys bool @@ -115,11 +111,6 @@ type Config struct { RejectHashAlgorithms map[crypto.Hash]bool RejectMessageHashAlgorithms map[crypto.Hash]bool RejectCurves map[Curve]bool -======= - // V5Keys configures version 5 key generation. If false, this package still - // supports version 5 keys, but produces version 4 keys. - V5Keys bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "The validity period of the key. This is the number of seconds after // the key creation time that the key expires. If this is not present // or has a value of zero, the key never expires. This is found only on @@ -148,21 +139,17 @@ type Config struct { // might be no other way than to tolerate the missing MDC. Setting this flag, allows this // mode of operation. It should be considered a measure of last resort. InsecureAllowUnauthenticatedMessages bool -<<<<<<< HEAD // InsecureAllowDecryptionWithSigningKeys allows decryption with keys marked as signing keys in the v2 API. // This setting is potentially insecure, but it is needed as some libraries // ignored key flags when selecting a key for encryption. // Not relevant for the v1 API, as all keys were allowed in decryption. InsecureAllowDecryptionWithSigningKeys bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // KnownNotations is a map of Notation Data names to bools, which controls // the notation names that are allowed to be present in critical Notation Data // signature subpackets. KnownNotations map[string]bool // SignatureNotations is a list of Notations to be added to any signatures. SignatureNotations []*Notation -<<<<<<< HEAD // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature // should be enabled for encryption and decryption. // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr). @@ -186,8 +173,6 @@ type Config struct { // weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks. // The default behavior, when the config or flag is nil, is to enable the feature. NonDeterministicSignaturesViaNotation *bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Config) Random() io.Reader { @@ -275,11 +260,7 @@ func (c *Config) S2K() *s2k.Config { return nil } // for backwards compatibility -<<<<<<< HEAD if c.S2KCount > 0 && c.S2KConfig == nil { -======= - if c != nil && c.S2KCount > 0 && c.S2KConfig == nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &s2k.Config{ S2KCount: c.S2KCount, } @@ -315,7 +296,6 @@ func (c *Config) AllowUnauthenticatedMessages() bool { return c.InsecureAllowUnauthenticatedMessages } -<<<<<<< HEAD func (c *Config) AllowDecryptionWithSigningKeys() bool { if c == nil { return false @@ -323,8 +303,6 @@ func (c *Config) AllowDecryptionWithSigningKeys() bool { return c.InsecureAllowDecryptionWithSigningKeys } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Config) KnownNotation(notationName string) bool { if c == nil { return false @@ -338,7 +316,6 @@ func (c *Config) Notations() []*Notation { } return c.SignatureNotations } -<<<<<<< HEAD func (c *Config) V6() bool { if c == nil { @@ -431,5 +408,3 @@ func (c *Config) RandomizeSignaturesViaNotation() bool { func BoolPointer(value bool) *bool { return &value } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go index 4d5c8b4fe2..b90bb28911 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -5,17 +5,11 @@ package packet import ( -<<<<<<< HEAD "bytes" "crypto" "crypto/rsa" "encoding/binary" "encoding/hex" -======= - "crypto" - "crypto/rsa" - "encoding/binary" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "math/big" "strconv" @@ -24,7 +18,6 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" ) @@ -104,34 +97,6 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { } e.Algo = PublicKeyAlgorithm(buf[0]) var cipherFunction byte -======= -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 encoding.Field -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: e.encryptedMPI1 = new(encoding.MPI) @@ -158,7 +123,6 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { return } -<<<<<<< HEAD case PubKeyAlgoX25519: e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6) if err != nil { @@ -178,40 +142,20 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { } } -======= - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = consumeAll(r) return } -<<<<<<< HEAD -======= -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Decrypt decrypts an encrypted session key with the given private key. The // private key must have been decrypted first. // If config is nil, sensible defaults will be used. func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { -<<<<<<< HEAD if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId { return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) } if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) { return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint)) } -======= - if e.KeyId != 0 && e.KeyId != priv.KeyId { - return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if e.Algo != priv.PubKeyAlgo { return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } @@ -237,7 +181,6 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { vsG := e.encryptedMPI1.Bytes() m := e.encryptedMPI2.Bytes() oid := priv.PublicKey.oid.EncodedBytes() -<<<<<<< HEAD fp := priv.PublicKey.Fingerprint[:] if priv.PublicKey.Version == 5 { // For v5 the, the fingerprint must be restricted to 20 bytes @@ -251,18 +194,10 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { default: err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) } -======= - b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) - default: - err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } -<<<<<<< HEAD var key []byte switch priv.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: @@ -292,26 +227,11 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { return errors.UnsupportedError("unsupported algorithm for decryption") } e.Key = key -======= - e.CipherFunc = CipherFunction(b[0]) - if !e.CipherFunc.IsSupported() { - return errors.UnsupportedError("unsupported encryption function") - } - - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } // Serialize writes the encrypted key packet, e, to w. func (e *EncryptedKey) Serialize(w io.Writer) error { -<<<<<<< HEAD var encodedLength int switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: @@ -324,21 +244,10 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6) case PubKeyAlgoX448: encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6) -======= - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = int(e.encryptedMPI1.EncodedLength()) - case PubKeyAlgoElGamal: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) - case PubKeyAlgoECDH: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) } -<<<<<<< HEAD packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength if e.Version == 6 { packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */ @@ -350,14 +259,10 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { } err := serializeHeader(w, packetTypeEncryptedKey, packetLen) -======= - err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } -<<<<<<< HEAD _, err = w.Write([]byte{byte(e.Version)}) if err != nil { return err @@ -386,11 +291,6 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { if err != nil { return err } -======= - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: @@ -408,21 +308,17 @@ func (e *EncryptedKey) Serialize(w io.Writer) error { } _, err := w.Write(e.encryptedMPI2.EncodedBytes()) return err -<<<<<<< HEAD case PubKeyAlgoX25519: err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) return err case PubKeyAlgoX448: err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6) return err -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("internal error") } } -<<<<<<< HEAD // SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains // key, encrypted to pub. // If aeadSupported is set, PKESK v6 is used, otherwise v3. @@ -521,31 +417,6 @@ func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, ciph return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version) case PubKeyAlgoX448: return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version) -======= -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoECDH: - return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } @@ -553,7 +424,6 @@ func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunctio return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) } -<<<<<<< HEAD // SerializeEncryptedKey serializes an encrypted key packet to w that contains // key, encrypted to pub. // PKESKv6 is used if config.AEAD() is not nil. @@ -573,20 +443,13 @@ func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFu } func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error { -======= -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) } cipherMPI := encoding.NewMPI(cipherText) -<<<<<<< HEAD packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength()) -======= - packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) if err != nil { @@ -600,21 +463,13 @@ func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub return err } -<<<<<<< HEAD func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error { -======= -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) if err != nil { return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) } -<<<<<<< HEAD packetLen := len(header) /* header length */ -======= - packetLen := 10 /* header length */ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 @@ -633,11 +488,7 @@ func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, return err } -<<<<<<< HEAD func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { -======= -func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) if err != nil { return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) @@ -646,11 +497,7 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub g := encoding.NewMPI(vsG) m := encoding.NewOID(c) -<<<<<<< HEAD packetLen := len(header) /* header length */ -======= - packetLen := 10 /* header length */ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) err = serializeHeader(w, packetTypeEncryptedKey, packetLen) @@ -668,7 +515,6 @@ func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub _, err = w.Write(m.EncodedBytes()) return err } -<<<<<<< HEAD func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error { ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock) @@ -736,5 +582,3 @@ func encodeChecksumKey(buffer []byte, key []byte) { buffer[len(key)] = byte(checksum >> 8) buffer[len(key)+1] = byte(checksum) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go index 7ebb7ecbb0..8a028c8a17 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -58,15 +58,9 @@ func (l *LiteralData) parse(r io.Reader) (err error) { // on completion. The fileName is truncated to 255 bytes. func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { var buf [4]byte -<<<<<<< HEAD buf[0] = 'b' if !isBinary { buf[0] = 'u' -======= - buf[0] = 't' - if isBinary { - buf[0] = 'b' ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if len(fileName) > 255 { fileName = fileName[:255] diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go index 79f3d9d797..f393c4063b 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -7,24 +7,16 @@ package packet import ( "crypto" "encoding/binary" -<<<<<<< HEAD "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -======= - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "io" - "strconv" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // OnePassSignature represents a one-pass signature packet. See RFC 4880, // section 5.4. type OnePassSignature struct { -<<<<<<< HEAD Version int SigType SignatureType Hash crypto.Hash @@ -46,27 +38,6 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) } ops.Version = int(buf[0]) -======= - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ok bool ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) @@ -76,7 +47,6 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { ops.SigType = SignatureType(buf[1]) ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) -<<<<<<< HEAD if ops.Version == 6 { // Only for v6, a variable-length field containing the salt @@ -122,16 +92,11 @@ func (ops *OnePassSignature) parse(r io.Reader) (err error) { return } ops.IsLast = buf[0] != 0 -======= - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } // Serialize marshals the given OnePassSignature to w. func (ops *OnePassSignature) Serialize(w io.Writer) error { -<<<<<<< HEAD //v3 length 1+1+1+1+8+1 = packetLength := 13 if ops.Version == 6 { @@ -145,10 +110,6 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { var buf [8]byte buf[0] = byte(ops.Version) -======= - var buf [13]byte - buf[0] = onePassSignatureVersion ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) buf[1] = uint8(ops.SigType) var ok bool buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) @@ -156,7 +117,6 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) } buf[3] = uint8(ops.PubKeyAlgo) -<<<<<<< HEAD _, err := w.Write(buf[:4]) if err != nil { @@ -193,16 +153,5 @@ func (ops *OnePassSignature) Serialize(w io.Writer) error { } _, err = w.Write(isLast) -======= - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go index b05c5d1641..cef7c661d3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -7,10 +7,6 @@ package packet import ( "bytes" "io" -<<<<<<< HEAD -======= - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/errors" ) @@ -29,11 +25,7 @@ type OpaquePacket struct { } func (op *OpaquePacket) parse(r io.Reader) (err error) { -<<<<<<< HEAD op.Contents, err = io.ReadAll(r) -======= - op.Contents, err = ioutil.ReadAll(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go index 26121d3fc1..1e92e22c97 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -311,22 +311,15 @@ const ( packetTypePrivateSubkey packetType = 7 packetTypeCompressed packetType = 8 packetTypeSymmetricallyEncrypted packetType = 9 -<<<<<<< HEAD packetTypeMarker packetType = 10 packetTypeLiteralData packetType = 11 packetTypeTrust packetType = 12 -======= - packetTypeLiteralData packetType = 11 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packetTypeUserId packetType = 13 packetTypePublicSubkey packetType = 14 packetTypeUserAttribute packetType = 17 packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 packetTypeAEADEncrypted packetType = 20 -<<<<<<< HEAD packetPadding packetType = 21 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // EncryptedDataPacket holds encrypted data. It is currently implemented by @@ -338,11 +331,7 @@ type EncryptedDataPacket interface { // Read reads a single OpenPGP packet from the given io.Reader. If there is an // error parsing a packet, the whole packet is consumed from the input. func Read(r io.Reader) (p Packet, err error) { -<<<<<<< HEAD tag, len, contents, err := readHeader(r) -======= - tag, _, contents, err := readHeader(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -381,7 +370,6 @@ func Read(r io.Reader) (p Packet, err error) { p = se case packetTypeAEADEncrypted: p = new(AEADEncrypted) -<<<<<<< HEAD case packetPadding: p = Padding(len) case packetTypeMarker: @@ -469,10 +457,6 @@ func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr er } else { err = errors.UnknownPacketTypeError(tag) } -======= - default: - err = errors.UnknownPacketTypeError(tag) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if p != nil { err = p.parse(contents) @@ -489,7 +473,6 @@ type SignatureType uint8 const ( SigTypeBinary SignatureType = 0x00 -<<<<<<< HEAD SigTypeText SignatureType = 0x01 SigTypeGenericCert SignatureType = 0x10 SigTypePersonaCert SignatureType = 0x11 @@ -501,19 +484,6 @@ const ( SigTypeKeyRevocation SignatureType = 0x20 SigTypeSubkeyRevocation SignatureType = 0x28 SigTypeCertificationRevocation SignatureType = 0x30 -======= - SigTypeText = 0x01 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 - SigTypeCertificationRevocation = 0x30 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // PublicKeyAlgorithm represents the different public key system specified for @@ -530,14 +500,11 @@ const ( PubKeyAlgoECDSA PublicKeyAlgorithm = 19 // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 -<<<<<<< HEAD // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh PubKeyAlgoX25519 PublicKeyAlgorithm = 25 PubKeyAlgoX448 PublicKeyAlgorithm = 26 PubKeyAlgoEd25519 PublicKeyAlgorithm = 27 PubKeyAlgoEd448 PublicKeyAlgorithm = 28 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated in RFC 4880, Section 13.5. Use key flags instead. PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 @@ -548,11 +515,7 @@ const ( // key of the given type. func (pka PublicKeyAlgorithm) CanEncrypt() bool { switch pka { -<<<<<<< HEAD case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448: -======= - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } return false @@ -562,11 +525,7 @@ func (pka PublicKeyAlgorithm) CanEncrypt() bool { // sign a message. func (pka PublicKeyAlgorithm) CanSign() bool { switch pka { -<<<<<<< HEAD case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: -======= - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } return false @@ -646,14 +605,11 @@ func (mode AEADMode) TagLength() int { return algorithm.AEADMode(mode).TagLength() } -<<<<<<< HEAD // IsSupported returns true if the aead mode is supported from the library func (mode AEADMode) IsSupported() bool { return algorithm.AEADMode(mode).TagLength() > 0 } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // new returns a fresh instance of the given mode. func (mode AEADMode) new(block cipher.Block) cipher.AEAD { return algorithm.AEADMode(mode).New(block) @@ -668,7 +624,6 @@ const ( KeySuperseded ReasonForRevocation = 1 KeyCompromised ReasonForRevocation = 2 KeyRetired ReasonForRevocation = 3 -<<<<<<< HEAD UserIDNotValid ReasonForRevocation = 32 Unknown ReasonForRevocation = 200 ) @@ -680,10 +635,6 @@ func NewReasonForRevocation(value byte) ReasonForRevocation { return Unknown } -======= -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Curve is a mapping to supported ECC curves for key generation. // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats type Curve string @@ -705,7 +656,6 @@ type TrustLevel uint8 // TrustAmount represents a trust amount per RFC4880 5.2.3.13 type TrustAmount uint8 -<<<<<<< HEAD const ( // versionSize is the length in bytes of the version value. @@ -723,5 +673,3 @@ const ( // fingerprintSize is the length in bytes of the key fingerprint. fingerprintSize = 20 ) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go index d2dae47441..f04e6c6b87 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -9,42 +9,28 @@ import ( "crypto" "crypto/cipher" "crypto/dsa" -<<<<<<< HEAD "crypto/rsa" "crypto/sha1" "crypto/sha256" "crypto/subtle" "fmt" "io" -======= - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "io" - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math/big" "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" "github.com/ProtonMail/go-crypto/openpgp/s2k" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" "golang.org/x/crypto/hkdf" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // PrivateKey represents a possibly encrypted private key. See RFC 4880, @@ -55,7 +41,6 @@ type PrivateKey struct { encryptedData []byte cipher CipherFunction s2k func(out, in []byte) -<<<<<<< HEAD aead AEADMode // only relevant if S2KAEAD is enabled // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or // crypto.Signer/crypto.Decrypter (Decryptor RSA only). @@ -64,16 +49,6 @@ type PrivateKey struct { // Type of encryption of the S2K packet // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or -======= - // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or - // crypto.Signer/crypto.Decrypter (Decryptor RSA only). - PrivateKey interface{} - sha1Checksum bool - iv []byte - - // Type of encryption of the S2K packet - // Allowed values are 0 (Not encrypted), 254 (SHA1), or ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // 255 (2-byte checksum) s2kType S2KType // Full parameters of the S2K packet @@ -86,11 +61,8 @@ type S2KType uint8 const ( // S2KNON unencrypt S2KNON S2KType = 0 -<<<<<<< HEAD // S2KAEAD use authenticated encryption S2KAEAD S2KType = 253 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // S2KSHA1 sha1 sum check S2KSHA1 S2KType = 254 // S2KCHECKSUM sum check @@ -139,7 +111,6 @@ func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKe return pk } -<<<<<<< HEAD func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey { pk := new(PrivateKey) pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) @@ -168,8 +139,6 @@ func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *Private return pk } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that // implements RSA, ECDSA or EdDSA. func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { @@ -189,7 +158,6 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) case eddsa.PrivateKey: pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) -<<<<<<< HEAD case *ed25519.PrivateKey: pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey) case ed25519.PrivateKey: @@ -198,8 +166,6 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) case ed448.PrivateKey: pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("openpgp: unknown signer type in NewSignerPrivateKey") } @@ -207,11 +173,7 @@ func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey return pk } -<<<<<<< HEAD // NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey. -======= -// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { pk := new(PrivateKey) switch priv := decrypter.(type) { @@ -221,13 +183,10 @@ func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *Priv pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) case *ecdh.PrivateKey: pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) -<<<<<<< HEAD case *x25519.PrivateKey: pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey) case *x448.PrivateKey: pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") } @@ -241,14 +200,11 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } v5 := pk.PublicKey.Version == 5 -<<<<<<< HEAD v6 := pk.PublicKey.Version == 6 if V5Disabled && v5 { return errors.UnsupportedError("support for parsing v5 entities is disabled; build with `-tags v5` if needed") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var buf [1]byte _, err = readFull(r, buf[:]) @@ -257,11 +213,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } pk.s2kType = S2KType(buf[0]) var optCount [1]byte -<<<<<<< HEAD if v5 || (v6 && pk.s2kType != S2KNON) { -======= - if v5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, err = readFull(r, optCount[:]); err != nil { return } @@ -271,15 +223,9 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { case S2KNON: pk.s2k = nil pk.Encrypted = false -<<<<<<< HEAD case S2KSHA1, S2KCHECKSUM, S2KAEAD: if (v5 || v6) && pk.s2kType == S2KCHECKSUM { return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version)) -======= - case S2KSHA1, S2KCHECKSUM: - if v5 && pk.s2kType == S2KCHECKSUM { - return errors.StructuralError("wrong s2k identifier for version 5") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } _, err = readFull(r, buf[:]) if err != nil { @@ -289,7 +235,6 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.cipher != 0 && !pk.cipher.IsSupported() { return errors.UnsupportedError("unsupported cipher function in private key") } -<<<<<<< HEAD // [Optional] If string-to-key usage octet was 253, // a one-octet AEAD algorithm. if pk.s2kType == S2KAEAD { @@ -313,8 +258,6 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.s2kParams, err = s2k.ParseIntoParams(r) if err != nil { return @@ -322,32 +265,22 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if pk.s2kParams.Dummy() { return } -<<<<<<< HEAD if pk.s2kParams.Mode() == s2k.Argon2S2K && pk.s2kType != S2KAEAD { return errors.StructuralError("using Argon2 S2K without AEAD is not allowed") } if pk.s2kParams.Mode() == s2k.SimpleS2K && pk.Version == 6 { return errors.StructuralError("using Simple S2K with version 6 keys is not allowed") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.s2k, err = pk.s2kParams.Function() if err != nil { return } pk.Encrypted = true -<<<<<<< HEAD -======= - if pk.s2kType == S2KSHA1 { - pk.sha1Checksum = true - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return errors.UnsupportedError("deprecated s2k function in private key") } if pk.Encrypted { -<<<<<<< HEAD var ivSize int // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode, // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size. @@ -362,23 +295,13 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) } pk.iv = make([]byte, ivSize) -======= - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = readFull(r, pk.iv) if err != nil { return } -<<<<<<< HEAD if v5 && pk.s2kType == S2KAEAD { pk.iv = pk.iv[:pk.aead.IvLength()] } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var privateKeyData []byte @@ -398,11 +321,7 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { return } } else { -<<<<<<< HEAD privateKeyData, err = io.ReadAll(r) -======= - privateKeyData, err = ioutil.ReadAll(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -411,7 +330,6 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { if len(privateKeyData) < 2 { return errors.StructuralError("truncated private key data") } -<<<<<<< HEAD if pk.Version != 6 { // checksum var sum uint16 @@ -428,18 +346,6 @@ func (pk *PrivateKey) parse(r io.Reader) (err error) { // No checksum return pk.parsePrivateKey(privateKeyData) } -======= - var sum uint16 - for i := 0; i < len(privateKeyData)-2; i++ { - sum += uint16(privateKeyData[i]) - } - if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || - privateKeyData[len(privateKeyData)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - privateKeyData = privateKeyData[:len(privateKeyData)-2] - return pk.parsePrivateKey(privateKeyData) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } pk.encryptedData = privateKeyData @@ -471,7 +377,6 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { optional := bytes.NewBuffer(nil) if pk.Encrypted || pk.Dummy() { -<<<<<<< HEAD // [Optional] If string-to-key usage octet was 255, 254, or 253, // a one-octet symmetric encryption algorithm. if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil { @@ -525,20 +430,6 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { if _, err := io.Copy(contents, optional); err != nil { return err } -======= - optional.Write([]byte{uint8(pk.cipher)}) - if err := pk.s2kParams.Serialize(optional); err != nil { - return err - } - if pk.Encrypted { - optional.Write(pk.iv) - } - } - if pk.Version == 5 { - contents.Write([]byte{uint8(optional.Len())}) - } - io.Copy(contents, optional) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !pk.Dummy() { l := 0 @@ -550,15 +441,10 @@ func (pk *PrivateKey) Serialize(w io.Writer) (err error) { return err } l = buf.Len() -<<<<<<< HEAD if pk.Version != 6 { checksum := mod64kHash(buf.Bytes()) buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) } -======= - checksum := mod64kHash(buf.Bytes()) - buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) priv = buf.Bytes() } else { priv, l = pk.encryptedData, len(pk.encryptedData) @@ -624,7 +510,6 @@ func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { return err } -<<<<<<< HEAD func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error { _, err := w.Write(priv.Secret) return err @@ -645,8 +530,6 @@ func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error { return err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // decrypt decrypts an encrypted private key using a decryption key. func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if pk.Dummy() { @@ -655,7 +538,6 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { if !pk.Encrypted { return nil } -<<<<<<< HEAD block := pk.cipher.new(decryptionKey) var data []byte switch pk.s2kType { @@ -701,39 +583,6 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { } default: return errors.InvalidArgumentError("invalid s2k type") -======= - - block := pk.cipher.new(decryptionKey) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } err := pk.parsePrivateKey(data) @@ -749,10 +598,6 @@ func (pk *PrivateKey) decrypt(decryptionKey []byte) error { pk.s2k = nil pk.Encrypted = false pk.encryptedData = nil -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -768,12 +613,9 @@ func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) e if err != nil { return err } -<<<<<<< HEAD if pk.s2kType == S2KAEAD { key = pk.applyHKDF(key) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.decrypt(key) } @@ -788,21 +630,14 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error { key := make([]byte, pk.cipher.KeySize()) pk.s2k(key, passphrase) -<<<<<<< HEAD if pk.s2kType == S2KAEAD { key = pk.applyHKDF(key) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.decrypt(key) } // DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. -<<<<<<< HEAD // Avoids recomputation of similar s2k key derivations. -======= -// Avoids recomputation of similar s2k key derivations. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { // Create a cache to avoid recomputation of key derviations for the same passphrase. s2kCache := &s2k.Cache{} @@ -818,11 +653,7 @@ func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { } // encrypt encrypts an unencrypted private key. -<<<<<<< HEAD func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error { -======= -func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if pk.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } @@ -833,7 +664,6 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip if len(key) != cipherFunction.KeySize() { return errors.InvalidArgumentError("supplied encryption key has the wrong size") } -<<<<<<< HEAD if params.Mode() == s2k.Argon2S2K && s2kType != S2KAEAD { return errors.InvalidArgumentError("using Argon2 S2K without AEAD is not allowed") @@ -843,9 +673,6 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip return errors.InvalidArgumentError("insecure S2K mode") } -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) priv := bytes.NewBuffer(nil) err := pk.serializePrivateKey(priv) if err != nil { @@ -857,7 +684,6 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip pk.s2k, err = pk.s2kParams.Function() if err != nil { return err -<<<<<<< HEAD } privateKeyBytes := priv.Bytes() @@ -905,37 +731,6 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip return errors.InvalidArgumentError("invalid s2k type for encryption") } -======= - } - - privateKeyBytes := priv.Bytes() - pk.sha1Checksum = true - block := pk.cipher.new(key) - pk.iv = make([]byte, pk.cipher.blockSize()) - _, err = rand.Read(pk.iv) - if err != nil { - return err - } - cfb := cipher.NewCFBEncrypter(block, pk.iv) - - if pk.sha1Checksum { - pk.s2kType = S2KSHA1 - h := sha1.New() - h.Write(privateKeyBytes) - sum := h.Sum(nil) - privateKeyBytes = append(privateKeyBytes, sum...) - } else { - pk.s2kType = S2KCHECKSUM - var sum uint16 - for _, b := range privateKeyBytes { - sum += uint16(b) - } - priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) - } - - pk.encryptedData = make([]byte, len(privateKeyBytes)) - cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.Encrypted = true pk.PrivateKey = nil return err @@ -954,7 +749,6 @@ func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error return err } s2k(key, passphrase) -<<<<<<< HEAD s2kType := S2KSHA1 if config.AEAD() != nil { s2kType = S2KAEAD @@ -964,10 +758,6 @@ func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error } // Encrypt the private key with the derived encryption key. return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random()) -======= - // Encrypt the private key with the derived encryption key. - return pk.encrypt(key, params, config.Cipher()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. @@ -986,7 +776,6 @@ func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) e s2k(encryptionKey, passphrase) for _, key := range keys { if key != nil && !key.Dummy() && !key.Encrypted { -<<<<<<< HEAD s2kType := S2KSHA1 if config.AEAD() != nil { s2kType = S2KAEAD @@ -997,9 +786,6 @@ func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) e } else { err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random()) } -======= - err = key.encrypt(encryptionKey, params, config.Cipher()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1016,11 +802,7 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error { S2KMode: s2k.IteratedSaltedS2K, S2KCount: 65536, Hash: crypto.SHA256, -<<<<<<< HEAD }, -======= - } , ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DefaultCipher: CipherAES256, } return pk.EncryptWithConfig(passphrase, config) @@ -1040,7 +822,6 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeEdDSAPrivateKey(w, priv) case *ecdh.PrivateKey: err = serializeECDHPrivateKey(w, priv) -<<<<<<< HEAD case *x25519.PrivateKey: err = serializeX25519PrivateKey(w, priv) case *x448.PrivateKey: @@ -1049,8 +830,6 @@ func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { err = serializeEd25519PrivateKey(w, priv) case *ed448.PrivateKey: err = serializeEd448PrivateKey(w, priv) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.InvalidArgumentError("unknown private key type") } @@ -1071,7 +850,6 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { return pk.parseECDHPrivateKey(data) case PubKeyAlgoEdDSA: return pk.parseEdDSAPrivateKey(data) -<<<<<<< HEAD case PubKeyAlgoX25519: return pk.parseX25519PrivateKey(data) case PubKeyAlgoX448: @@ -1084,10 +862,6 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { err = errors.StructuralError("unknown private key type") return } -======= - } - panic("impossible") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { @@ -1208,7 +982,6 @@ func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { return nil } -<<<<<<< HEAD func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) { publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey) privateKey := x25519.NewPrivateKey(*publicKey) @@ -1289,8 +1062,6 @@ func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) @@ -1315,7 +1086,6 @@ func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { return nil } -<<<<<<< HEAD func (pk *PrivateKey) additionalData() ([]byte, error) { additionalData := bytes.NewBuffer(nil) // Write additional data prefix based on packet type @@ -1351,8 +1121,6 @@ func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte { return encryptionKey } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func validateDSAParameters(priv *dsa.PrivateKey) error { p := priv.P // group prime q := priv.Q // subgroup order diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index 8ef023ab3b..f8da781bbe 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -5,10 +5,6 @@ package packet import ( -<<<<<<< HEAD -======= - "crypto" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/dsa" "crypto/rsa" "crypto/sha1" @@ -24,39 +20,24 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/ecdh" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/elgamal" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/x25519" "github.com/ProtonMail/go-crypto/openpgp/x448" ) -======= -) - -type kdfHashFunction byte -type kdfAlgorithm byte - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. type PublicKey struct { Version int CreationTime time.Time PubKeyAlgo PublicKeyAlgorithm -<<<<<<< HEAD PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey -======= - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Fingerprint []byte KeyId uint64 IsSubkey bool @@ -80,7 +61,6 @@ func (pk *PublicKey) UpgradeToV5() { pk.setFingerprintAndKeyId() } -<<<<<<< HEAD // UpgradeToV6 updates the version of the key to v6, and updates all necessary // fields. func (pk *PublicKey) UpgradeToV6() error { @@ -89,17 +69,11 @@ func (pk *PublicKey) UpgradeToV6() error { return pk.checkV6Compatibility() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // signingKey provides a convenient abstraction over signature verification // for v3 and v4 public keys. type signingKey interface { SerializeForHash(io.Writer) error -<<<<<<< HEAD SerializeSignaturePrefix(io.Writer) error -======= - SerializeSignaturePrefix(io.Writer) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) serializeWithoutHeaders(io.Writer) error } @@ -208,7 +182,6 @@ func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey return pk } -<<<<<<< HEAD func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey { pk := &PublicKey{ Version: 4, @@ -257,8 +230,6 @@ func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey return pk } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (pk *PublicKey) parse(r io.Reader) (err error) { // RFC 4880, section 5.5.2 var buf [6]byte @@ -266,7 +237,6 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { if err != nil { return } -<<<<<<< HEAD pk.Version = int(buf[0]) if pk.Version != 4 && pk.Version != 5 && pk.Version != 6 { @@ -280,14 +250,6 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { if pk.Version >= 5 { // Read the four-octet scalar octet count // The count is not used in this implementation -======= - if buf[0] != 4 && buf[0] != 5 { - return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) - } - - pk.Version = int(buf[0]) - if pk.Version == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var n [4]byte _, err = readFull(r, n[:]) if err != nil { @@ -296,10 +258,7 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { } pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) -<<<<<<< HEAD // Ignore four-ocet length -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: err = pk.parseRSA(r) @@ -313,7 +272,6 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { err = pk.parseECDH(r) case PubKeyAlgoEdDSA: err = pk.parseEdDSA(r) -<<<<<<< HEAD case PubKeyAlgoX25519: err = pk.parseX25519(r) case PubKeyAlgoX448: @@ -322,8 +280,6 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { err = pk.parseEd25519(r) case PubKeyAlgoEd448: err = pk.parseEd448(r) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) } @@ -337,38 +293,27 @@ func (pk *PublicKey) parse(r io.Reader) (err error) { func (pk *PublicKey) setFingerprintAndKeyId() { // RFC 4880, section 12.2 -<<<<<<< HEAD if pk.Version >= 5 { fingerprint := sha256.New() if err := pk.SerializeForHash(fingerprint); err != nil { // Should not happen for a hash. panic(err) } -======= - if pk.Version == 5 { - fingerprint := sha256.New() - pk.SerializeForHash(fingerprint) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.Fingerprint = make([]byte, 32) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) } else { fingerprint := sha1.New() -<<<<<<< HEAD if err := pk.SerializeForHash(fingerprint); err != nil { // Should not happen for a hash. panic(err) } -======= - pk.SerializeForHash(fingerprint) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.Fingerprint = make([]byte, 20) copy(pk.Fingerprint, fingerprint.Sum(nil)) pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) } } -<<<<<<< HEAD func (pk *PublicKey) checkV6Compatibility() error { // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. switch pk.PubKeyAlgo { @@ -386,8 +331,6 @@ func (pk *PublicKey) checkV6Compatibility() error { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // parseRSA parses RSA public key material from the given Reader. See RFC 4880, // section 5.5.2. func (pk *PublicKey) parseRSA(r io.Reader) (err error) { @@ -476,27 +419,17 @@ func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } -<<<<<<< HEAD -======= - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) } -<<<<<<< HEAD pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c, ok := curveInfo.Curve.(ecc.ECDSACurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -516,7 +449,6 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { if _, err = pk.oid.ReadFrom(r); err != nil { return } -<<<<<<< HEAD curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { @@ -528,8 +460,6 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return errors.StructuralError("cannot read v6 key with deprecated OID: Curve25519Legacy") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.p = new(encoding.MPI) if _, err = pk.p.ReadFrom(r); err != nil { return @@ -539,15 +469,6 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { return } -<<<<<<< HEAD -======= - curveInfo := ecc.FindByOid(pk.oid) - - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c, ok := curveInfo.Curve.(ecc.ECDHCurve) if !ok { return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) @@ -576,22 +497,16 @@ func (pk *PublicKey) parseECDH(r io.Reader) (err error) { } func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { -<<<<<<< HEAD if pk.Version == 6 { // Implementations MUST NOT accept or generate version 6 key material using the deprecated OIDs. return errors.StructuralError("cannot generate v6 key with deprecated algorithm: EdDSALegacy") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) pk.oid = new(encoding.OID) if _, err = pk.oid.ReadFrom(r); err != nil { return } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) curveInfo := ecc.FindByOid(pk.oid) if curveInfo == nil { return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) @@ -627,7 +542,6 @@ func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { return } -<<<<<<< HEAD func (pk *PublicKey) parseX25519(r io.Reader) (err error) { point := make([]byte, x25519.KeySize) _, err = io.ReadFull(r, point) @@ -686,19 +600,12 @@ func (pk *PublicKey) SerializeForHash(w io.Writer) error { if err := pk.SerializeSignaturePrefix(w); err != nil { return err } -======= -// SerializeForHash serializes the PublicKey to w with the special packet -// header format needed for hashing. -func (pk *PublicKey) SerializeForHash(w io.Writer) error { - pk.SerializeSignaturePrefix(w) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.serializeWithoutHeaders(w) } // SerializeSignaturePrefix writes the prefix for this public key to the given Writer. // The prefix is used when calculating a signature over this public key. See // RFC 4880, section 5.2.4. -<<<<<<< HEAD func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { var pLength = pk.algorithmSpecificByteCount() // version, timestamp, algorithm @@ -711,20 +618,11 @@ func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error { // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet. 0x95 + byte(pk.Version), -======= -func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { - var pLength = pk.algorithmSpecificByteCount() - if pk.Version == 5 { - pLength += 10 // version, timestamp (4), algorithm, key octet count (4). - w.Write([]byte{ - 0x9A, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) byte(pLength >> 24), byte(pLength >> 16), byte(pLength >> 8), byte(pLength), }) -<<<<<<< HEAD return err } if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil { @@ -737,36 +635,19 @@ func (pk *PublicKey) Serialize(w io.Writer) (err error) { length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header length += pk.algorithmSpecificByteCount() if pk.Version >= 5 { -======= - return - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - length += pk.algorithmSpecificByteCount() - if pk.Version == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) length += 4 // octet key count } packetType := packetTypePublicKey if pk.IsSubkey { packetType = packetTypePublicSubkey } -<<<<<<< HEAD err = serializeHeader(w, packetType, int(length)) -======= - err = serializeHeader(w, packetType, length) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } return pk.serializeWithoutHeaders(w) } -<<<<<<< HEAD func (pk *PublicKey) algorithmSpecificByteCount() uint32 { length := uint32(0) switch pk.PubKeyAlgo { @@ -800,33 +681,6 @@ func (pk *PublicKey) algorithmSpecificByteCount() uint32 { length += ed25519.PublicKeySize case PubKeyAlgoEd448: length += ed448.PublicKeySize -======= -func (pk *PublicKey) algorithmSpecificByteCount() int { - length := 0 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += int(pk.n.EncodedLength()) - length += int(pk.e.EncodedLength()) - case PubKeyAlgoDSA: - length += int(pk.p.EncodedLength()) - length += int(pk.q.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) - case PubKeyAlgoElGamal: - length += int(pk.p.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) - case PubKeyAlgoECDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - case PubKeyAlgoECDH: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - length += int(pk.kdf.EncodedLength()) - case PubKeyAlgoEdDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("unknown public key algorithm") } @@ -845,11 +699,7 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { return } -<<<<<<< HEAD if pk.Version >= 5 { -======= - if pk.Version == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n := pk.algorithmSpecificByteCount() if _, err = w.Write([]byte{ byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), @@ -907,7 +757,6 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { } _, err = w.Write(pk.p.EncodedBytes()) return -<<<<<<< HEAD case PubKeyAlgoX25519: publicKey := pk.PublicKey.(*x25519.PublicKey) _, err = w.Write(publicKey.Point) @@ -924,8 +773,6 @@ func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { publicKey := pk.PublicKey.(*ed448.PublicKey) _, err = w.Write(publicKey.Point) return -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return errors.InvalidArgumentError("bad public-key algorithm") } @@ -935,7 +782,6 @@ func (pk *PublicKey) CanSign() bool { return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH } -<<<<<<< HEAD // VerifyHashTag returns nil iff sig appears to be a plausible signature of the data // hashed into signed, based solely on its HashTag. signed is mutated by this call. func VerifyHashTag(signed hash.Hash, sig *Signature) (err error) { @@ -950,8 +796,6 @@ func VerifyHashTag(signed hash.Hash, sig *Signature) (err error) { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // VerifySignature returns nil iff sig is a valid signature, made by this // public key, of the data hashed into signed. signed is mutated by this call. func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { @@ -963,12 +807,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro } signed.Write(sig.HashSuffix) hashBytes := signed.Sum(nil) -<<<<<<< HEAD // see discussion https://github.com/ProtonMail/go-crypto/issues/107 if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { -======= - if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.SignatureError("hash tag doesn't match") } @@ -1007,7 +847,6 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro return errors.SignatureError("EdDSA verification failure") } return nil -<<<<<<< HEAD case PubKeyAlgoEd25519: ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey) if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) { @@ -1020,8 +859,6 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro return errors.SignatureError("ed448 verification failure") } return nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return errors.SignatureError("Unsupported public key algorithm used in signature") } @@ -1029,16 +866,8 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro // keySignatureHash returns a Hash of the message that needs to be signed for // pk to assert a subkey relationship to signed. -<<<<<<< HEAD func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) { h = hashFunc -======= -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RFC 4880, section 5.2.4 err = pk.SerializeForHash(h) @@ -1050,7 +879,6 @@ func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, return } -<<<<<<< HEAD // VerifyKeyHashTag returns nil iff sig appears to be a plausible signature over this // primary key and subkey, based solely on its HashTag. func (pk *PublicKey) VerifyKeyHashTag(signed *PublicKey, sig *Signature) error { @@ -1073,12 +901,6 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error return err } h, err := keySignatureHash(pk, signed, preparedHash) -======= -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1092,7 +914,6 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error if sig.EmbeddedSignature == nil { return errors.StructuralError("signing subkey is missing cross-signature") } -<<<<<<< HEAD preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify() if err != nil { return err @@ -1101,12 +922,6 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error // data as the main signature, so we cannot just recursively // call signed.VerifyKeySignature(...) if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil { -======= - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) } if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { @@ -1117,7 +932,6 @@ func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error return nil } -<<<<<<< HEAD func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) { return pk.SerializeForHash(hashFunc) } @@ -1133,24 +947,11 @@ func (pk *PublicKey) VerifyRevocationHashTag(sig *Signature) (err error) { return err } return VerifyHashTag(preparedHash, sig) -======= -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - - return ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifyRevocationSignature returns nil iff sig is a valid signature, made by this // public key. func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { -<<<<<<< HEAD preparedHash, err := sig.PrepareVerify() if err != nil { return err @@ -1159,27 +960,16 @@ func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { return err } return pk.VerifySignature(preparedHash, sig) -======= - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, // made by this public key, of signed. func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { -<<<<<<< HEAD preparedHash, err := sig.PrepareVerify() if err != nil { return err } h, err := keySignatureHash(pk, signed, preparedHash) -======= - h, err := keySignatureHash(pk, signed, sig.Hash) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1188,7 +978,6 @@ func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *Pub // userIdSignatureHash returns a Hash of the message that needs to be signed // to assert that pk is a valid key for id. -<<<<<<< HEAD func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { // RFC 4880, section 5.2.4 @@ -1198,17 +987,6 @@ func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) { if err := pk.serializeWithoutHeaders(h); err != nil { return err } -======= -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var buf [5]byte buf[0] = 0xb4 @@ -1219,7 +997,6 @@ func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash h.Write(buf[:]) h.Write([]byte(id)) -<<<<<<< HEAD return nil } @@ -1240,15 +1017,11 @@ func (pk *PublicKey) VerifyUserIdHashTag(id string, sig *Signature) (err error) return err } return VerifyHashTag(preparedHash, sig) -======= - return ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifyUserIdSignature returns nil iff sig is a valid signature, made by this // public key, that id is the identity of pub. func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { -<<<<<<< HEAD h, err := sig.PrepareVerify() if err != nil { return err @@ -1269,12 +1042,6 @@ func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { if err := directKeySignatureHash(pk, h); err != nil { return err } -======= - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pk.VerifySignature(h, sig) } @@ -1305,7 +1072,6 @@ func (pk *PublicKey) BitLength() (bitLength uint16, err error) { bitLength = pk.p.BitLength() case PubKeyAlgoEdDSA: bitLength = pk.p.BitLength() -<<<<<<< HEAD case PubKeyAlgoX25519: bitLength = x25519.KeySize * 8 case PubKeyAlgoX448: @@ -1314,15 +1080,12 @@ func (pk *PublicKey) BitLength() (bitLength uint16, err error) { bitLength = ed25519.PublicKeySize * 8 case PubKeyAlgoEd448: bitLength = ed448.PublicKeySize * 8 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.InvalidArgumentError("bad public-key algorithm") } return } -<<<<<<< HEAD // Curve returns the used elliptic curve of this public key. // Returns an error if no elliptic curve is used. func (pk *PublicKey) Curve() (curve Curve, err error) { @@ -1347,21 +1110,11 @@ func (pk *PublicKey) Curve() (curve Curve, err error) { // expired or is created in the future. func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { if pk.CreationTime.Unix() > currentTime.Unix() { -======= -// KeyExpired returns whether sig is a self-signature of a key that has -// expired or is created in the future. -func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { - if pk.CreationTime.After(currentTime) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { return false } expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) -<<<<<<< HEAD return currentTime.Unix() > expiry.Unix() -======= - return currentTime.After(expiry) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go index 3739a44f5b..dd84092392 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -10,15 +10,12 @@ import ( "github.com/ProtonMail/go-crypto/openpgp/errors" ) -<<<<<<< HEAD type PacketReader interface { Next() (p Packet, err error) Push(reader io.Reader) (err error) Unread(p Packet) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Reader reads packets from an io.Reader and allows packets to be 'unread' so // that they result from the next call to Next. type Reader struct { @@ -35,7 +32,6 @@ type Reader struct { const maxReaders = 32 // Next returns the most recently unread Packet, or reads another packet from -<<<<<<< HEAD // the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped. func (r *Reader) Next() (p Packet, err error) { for { @@ -98,49 +94,19 @@ func (r *Reader) NextWithUnsupported() (p Packet, err error) { } func (r *Reader) read() (p Packet, err error) { -======= -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(r.q) > 0 { p = r.q[len(r.q)-1] r.q = r.q[:len(r.q)-1] return } -<<<<<<< HEAD - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) -======= - for len(r.readers) > 0 { p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == io.EOF { r.readers = r.readers[:len(r.readers)-1] continue } -<<<<<<< HEAD return p, err } -======= - // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); ok { - continue - } - if _, ok := err.(errors.UnsupportedError); ok { - switch p.(type) { - case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: - return nil, err - } - continue - } - return nil, err - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, io.EOF } @@ -168,7 +134,6 @@ func NewReader(r io.Reader) *Reader { readers: []io.Reader{r}, } } -<<<<<<< HEAD // CheckReader is similar to Reader but additionally // uses the pushdown automata to verify the read packet sequence. @@ -242,5 +207,3 @@ func NewCheckReader(r io.Reader) *CheckReader { fullyRead: false, } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index c69b41fc0a..3a4b366d87 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -8,26 +8,17 @@ import ( "bytes" "crypto" "crypto/dsa" -<<<<<<< HEAD "encoding/asn1" "encoding/binary" "hash" "io" "math/big" -======= - "encoding/binary" - "hash" - "io" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "time" "github.com/ProtonMail/go-crypto/openpgp/ecdsa" -<<<<<<< HEAD "github.com/ProtonMail/go-crypto/openpgp/ed25519" "github.com/ProtonMail/go-crypto/openpgp/ed448" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/eddsa" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" @@ -35,12 +26,8 @@ import ( ) const ( -<<<<<<< HEAD // First octet of key flags. // See RFC 9580, section 5.2.3.29 for details. -======= - // See RFC 4880, section 5.2.3.21 for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) KeyFlagCertify = 1 << iota KeyFlagSign KeyFlagEncryptCommunications @@ -51,7 +38,6 @@ const ( KeyFlagGroupKey ) -<<<<<<< HEAD const ( // First octet of keyserver preference flags. // See RFC 9580, section 5.2.3.25 for details. @@ -68,20 +54,14 @@ const ( const SaltNotationName = "salt@notations.openpgpjs.org" // Signature represents a signature. See RFC 9580, section 5.2. -======= -// Signature represents a signature. See RFC 4880, section 5.2. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Signature struct { Version int SigType SignatureType PubKeyAlgo PublicKeyAlgorithm Hash crypto.Hash -<<<<<<< HEAD // salt contains a random salt value for v6 signatures // See RFC 9580 Section 5.2.4. salt []byte -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // HashSuffix is extra data that is hashed in after the signed data. HashSuffix []byte @@ -100,10 +80,7 @@ type Signature struct { DSASigR, DSASigS encoding.Field ECDSASigR, ECDSASigS encoding.Field EdDSASigR, EdDSASigS encoding.Field -<<<<<<< HEAD EdSig []byte -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // rawSubpackets contains the unparsed subpackets, in order. rawSubpackets []outputSubpacket @@ -119,25 +96,17 @@ type Signature struct { SignerUserId *string IsPrimaryId *bool Notations []*Notation -<<<<<<< HEAD IntendedRecipients []*Recipient -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TrustLevel and TrustAmount can be set by the signer to assert that // the key is not only valid but also trustworthy at the specified // level. -<<<<<<< HEAD // See RFC 9580, section 5.2.3.21 for details. -======= - // See RFC 4880, section 5.2.3.13 for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) TrustLevel TrustLevel TrustAmount TrustAmount // TrustRegularExpression can be used in conjunction with trust Signature // packets to limit the scope of the trust that is extended. -<<<<<<< HEAD // See RFC 9580, section 5.2.3.22 for details. TrustRegularExpression *string @@ -158,27 +127,11 @@ type Signature struct { // FlagsValid is set if any flags were given. See RFC 9580, section // 5.2.3.29 for details. -======= - // See RFC 4880, section 5.2.3.14 for details. - TrustRegularExpression *string - - // PolicyURI can be set to the URI of a document that describes the - // policy under which the signature was issued. See RFC 4880, section - // 5.2.3.20 for details. - PolicyURI string - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) FlagsValid bool FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool // RevocationReason is set if this signature has been revoked. -<<<<<<< HEAD // See RFC 9580, section 5.2.3.31 for details. -======= - // See RFC 4880, section 5.2.3.23 for details. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) RevocationReason *ReasonForRevocation RevocationReasonText string @@ -195,7 +148,6 @@ type Signature struct { outSubpackets []outputSubpacket } -<<<<<<< HEAD // VerifiableSignature internally keeps state if the // the signature has been verified before. type VerifiableSignature struct { @@ -221,16 +173,10 @@ func (sig *Signature) Salt() []byte { func (sig *Signature) parse(r io.Reader) (err error) { // RFC 9580, section 5.2.3 var buf [7]byte -======= -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = readFull(r, buf[:1]) if err != nil { return } -<<<<<<< HEAD sig.Version = int(buf[0]) if sig.Version != 4 && sig.Version != 5 && sig.Version != 6 { err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) @@ -246,25 +192,13 @@ func (sig *Signature) parse(r io.Reader) (err error) { } else { _, err = readFull(r, buf[:5]) } -======= - if buf[0] != 4 && buf[0] != 5 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - sig.Version = int(buf[0]) - _, err = readFull(r, buf[:5]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } sig.SigType = SignatureType(buf[0]) sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) switch sig.PubKeyAlgo { -<<<<<<< HEAD case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448: -======= - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) return @@ -282,7 +216,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) } -<<<<<<< HEAD var hashedSubpacketsLength int if sig.Version == 6 { // For a v6 signature, a four-octet length is used. @@ -294,9 +227,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { } else { hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4]) } -======= - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashedSubpackets := make([]byte, hashedSubpacketsLength) _, err = readFull(r, hashedSubpackets) if err != nil { @@ -312,7 +242,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } -<<<<<<< HEAD if sig.Version == 6 { _, err = readFull(r, buf[:4]) } else { @@ -328,13 +257,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { } else { unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1]) } -======= - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) unhashedSubpackets := make([]byte, unhashedSubpacketsLength) _, err = readFull(r, unhashedSubpackets) if err != nil { @@ -350,7 +272,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { return } -<<<<<<< HEAD if sig.Version == 6 { // Only for v6 signatures, a variable-length field containing the salt _, err = readFull(r, buf[:1]) @@ -375,8 +296,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { sig.salt = salt } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: sig.RSASignature = new(encoding.MPI) @@ -407,7 +326,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { return } -<<<<<<< HEAD case PubKeyAlgoEd25519: sig.EdSig, err = ed25519.ReadSignature(r) if err != nil { @@ -418,8 +336,6 @@ func (sig *Signature) parse(r io.Reader) (err error) { if err != nil { return } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("unreachable") } @@ -427,11 +343,7 @@ func (sig *Signature) parse(r io.Reader) (err error) { } // parseSignatureSubpackets parses subpackets of the main signature packet. See -<<<<<<< HEAD // RFC 9580, section 5.2.3.1. -======= -// RFC 4880, section 5.2.3.1. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { for len(subpackets) > 0 { subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) @@ -452,10 +364,7 @@ type signatureSubpacketType uint8 const ( creationTimeSubpacket signatureSubpacketType = 2 signatureExpirationSubpacket signatureSubpacketType = 3 -<<<<<<< HEAD exportableCertSubpacket signatureSubpacketType = 4 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) trustSubpacket signatureSubpacketType = 5 regularExpressionSubpacket signatureSubpacketType = 6 keyExpirationSubpacket signatureSubpacketType = 9 @@ -464,11 +373,8 @@ const ( notationDataSubpacket signatureSubpacketType = 20 prefHashAlgosSubpacket signatureSubpacketType = 21 prefCompressionSubpacket signatureSubpacketType = 22 -<<<<<<< HEAD keyserverPrefsSubpacket signatureSubpacketType = 23 prefKeyserverSubpacket signatureSubpacketType = 24 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) primaryUserIdSubpacket signatureSubpacketType = 25 policyUriSubpacket signatureSubpacketType = 26 keyFlagsSubpacket signatureSubpacketType = 27 @@ -477,20 +383,13 @@ const ( featuresSubpacket signatureSubpacketType = 30 embeddedSignatureSubpacket signatureSubpacketType = 32 issuerFingerprintSubpacket signatureSubpacketType = 33 -<<<<<<< HEAD intendedRecipientSubpacket signatureSubpacketType = 35 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) prefCipherSuitesSubpacket signatureSubpacketType = 39 ) // parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { -<<<<<<< HEAD // RFC 9580, section 5.2.3.7 -======= - // RFC 4880, section 5.2.3.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( length uint32 packetType signatureSubpacketType @@ -548,35 +447,24 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r t := binary.BigEndian.Uint32(subpacket) sig.CreationTime = time.Unix(int64(t), 0) case signatureExpirationSubpacket: -<<<<<<< HEAD // Signature expiration time, section 5.2.3.18 -======= - // Signature expiration time, section 5.2.3.10 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) != 4 { err = errors.StructuralError("expiration subpacket with bad length") return } sig.SigLifetimeSecs = new(uint32) *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) -<<<<<<< HEAD case exportableCertSubpacket: if subpacket[0] == 0 { err = errors.UnsupportedError("signature with non-exportable certification") return } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case trustSubpacket: if len(subpacket) != 2 { err = errors.StructuralError("trust subpacket with bad length") return } -<<<<<<< HEAD // Trust level and amount, section 5.2.3.21 -======= - // Trust level and amount, section 5.2.3.13 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sig.TrustLevel = TrustLevel(subpacket[0]) sig.TrustAmount = TrustAmount(subpacket[1]) case regularExpressionSubpacket: @@ -584,11 +472,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r err = errors.StructuralError("regexp subpacket with bad length") return } -<<<<<<< HEAD // Trust regular expression, section 5.2.3.22 -======= - // Trust regular expression, section 5.2.3.14 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RFC specifies the string should be null-terminated; remove a null byte from the end if subpacket[len(subpacket)-1] != 0x00 { err = errors.StructuralError("expected regular expression to be null-terminated") @@ -597,11 +481,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r trustRegularExpression := string(subpacket[:len(subpacket)-1]) sig.TrustRegularExpression = &trustRegularExpression case keyExpirationSubpacket: -<<<<<<< HEAD // Key expiration time, section 5.2.3.13 -======= - // Key expiration time, section 5.2.3.6 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) != 4 { err = errors.StructuralError("key expiration subpacket with bad length") return @@ -609,7 +489,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.KeyLifetimeSecs = new(uint32) *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) case prefSymmetricAlgosSubpacket: -<<<<<<< HEAD // Preferred symmetric algorithms, section 5.2.3.14 sig.PreferredSymmetric = make([]byte, len(subpacket)) copy(sig.PreferredSymmetric, subpacket) @@ -617,34 +496,18 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r // Issuer, section 5.2.3.12 if sig.Version > 4 && isHashed { err = errors.StructuralError("issuer subpacket found in v6 key") -======= - // Preferred symmetric algorithms, section 5.2.3.7 - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if sig.Version > 4 { - err = errors.StructuralError("issuer subpacket found in v5 key") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } if len(subpacket) != 8 { err = errors.StructuralError("issuer subpacket with bad length") return } -<<<<<<< HEAD if sig.Version <= 4 { sig.IssuerKeyId = new(uint64) *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) } case notationDataSubpacket: // Notation data, section 5.2.3.24 -======= - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case notationDataSubpacket: - // Notation data, section 5.2.3.16 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) < 8 { err = errors.StructuralError("notation data subpacket with bad length") return @@ -666,7 +529,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.Notations = append(sig.Notations, ¬ation) case prefHashAlgosSubpacket: -<<<<<<< HEAD // Preferred hash algorithms, section 5.2.3.16 sig.PreferredHash = make([]byte, len(subpacket)) copy(sig.PreferredHash, subpacket) @@ -688,17 +550,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.PreferredKeyserver = string(subpacket) case primaryUserIdSubpacket: // Primary User ID, section 5.2.3.27 -======= - // Preferred hash algorithms, section 5.2.3.8 - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) != 1 { err = errors.StructuralError("primary user id subpacket with bad length") return @@ -708,20 +559,11 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r *sig.IsPrimaryId = true } case keyFlagsSubpacket: -<<<<<<< HEAD // Key flags, section 5.2.3.29 sig.FlagsValid = true if len(subpacket) == 0 { return } -======= - // Key flags, section 5.2.3.21 - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if subpacket[0]&KeyFlagCertify != 0 { sig.FlagCertify = true } @@ -747,27 +589,16 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r userId := string(subpacket) sig.SignerUserId = &userId case reasonForRevocationSubpacket: -<<<<<<< HEAD // Reason For Revocation, section 5.2.3.31 -======= - // Reason For Revocation, section 5.2.3.23 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket) == 0 { err = errors.StructuralError("empty revocation reason subpacket") return } sig.RevocationReason = new(ReasonForRevocation) -<<<<<<< HEAD *sig.RevocationReason = NewReasonForRevocation(subpacket[0]) sig.RevocationReasonText = string(subpacket[1:]) case featuresSubpacket: // Features subpacket, section 5.2.3.32 specifies a very general -======= - *sig.RevocationReason = ReasonForRevocation(subpacket[0]) - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // mechanism for OpenPGP implementations to signal support for new // features. if len(subpacket) > 0 { @@ -781,23 +612,13 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r } case embeddedSignatureSubpacket: // Only usage is in signatures that cross-certify -<<<<<<< HEAD // signing subkeys. section 5.2.3.34 describes the -======= - // signing subkeys. section 5.2.3.26 describes the ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // format, with its usage described in section 11.1 if sig.EmbeddedSignature != nil { err = errors.StructuralError("Cannot have multiple embedded signatures") return } sig.EmbeddedSignature = new(Signature) -<<<<<<< HEAD -======= - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { return nil, err } @@ -805,11 +626,7 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) } case policyUriSubpacket: -<<<<<<< HEAD // Policy URI, section 5.2.3.28 -======= - // Policy URI, section 5.2.3.20 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sig.PolicyURI = string(subpacket) case issuerFingerprintSubpacket: if len(subpacket) == 0 { @@ -817,26 +634,17 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r return } v, l := subpacket[0], len(subpacket[1:]) -<<<<<<< HEAD if v >= 5 && l != 32 || v < 5 && l != 20 { -======= - if v == 5 && l != 32 || v != 5 && l != 20 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, errors.StructuralError("bad fingerprint length") } sig.IssuerFingerprint = make([]byte, l) copy(sig.IssuerFingerprint, subpacket[1:]) sig.IssuerKeyId = new(uint64) -<<<<<<< HEAD if v >= 5 { -======= - if v == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) } else { *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) } -<<<<<<< HEAD case intendedRecipientSubpacket: // Intended Recipient Fingerprint, section 5.2.3.36 if len(subpacket) < 1 { @@ -851,11 +659,6 @@ func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (r sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint}) case prefCipherSuitesSubpacket: // Preferred AEAD cipher suites, section 5.2.3.15 -======= - case prefCipherSuitesSubpacket: - // Preferred AEAD cipher suites - // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(subpacket)%2 != 0 { err = errors.StructuralError("invalid aead cipher suite length") return @@ -897,7 +700,6 @@ func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId } -<<<<<<< HEAD func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool { if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil { return bytes.Equal(sig.IssuerFingerprint, fingerprint) @@ -908,11 +710,6 @@ func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId // serializeSubpacketLength marshals the given length into to. func serializeSubpacketLength(to []byte, length int) int { // RFC 9580, Section 4.2.1. -======= -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if length < 192 { to[0] = byte(length) return 1 @@ -958,31 +755,19 @@ func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { to = to[n:] } } -<<<<<<< HEAD -======= - return ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SigExpired returns whether sig is a signature that has expired or is created // in the future. func (sig *Signature) SigExpired(currentTime time.Time) bool { -<<<<<<< HEAD if sig.CreationTime.Unix() > currentTime.Unix() { -======= - if sig.CreationTime.After(currentTime) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { return false } expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) -<<<<<<< HEAD return currentTime.Unix() > expiry.Unix() -======= - return currentTime.After(expiry) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. @@ -1006,7 +791,6 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { uint8(sig.SigType), uint8(sig.PubKeyAlgo), uint8(hashId), -<<<<<<< HEAD }) hashedSubpacketsLength := len(hashedSubpackets) if sig.Version == 6 { @@ -1029,27 +813,14 @@ func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { var l uint64 = uint64(lenPrefix + len(hashedSubpackets)) if sig.Version == 5 { // v5 case -======= - uint8(len(hashedSubpackets) >> 8), - uint8(len(hashedSubpackets)), - }) - hashedFields.Write(hashedSubpackets) - - var l uint64 = uint64(6 + len(hashedSubpackets)) - if sig.Version == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashedFields.Write([]byte{0x05, 0xff}) hashedFields.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) } else { -<<<<<<< HEAD // v4 and v6 case hashedFields.Write([]byte{byte(sig.Version), 0xff}) -======= - hashedFields.Write([]byte{0x04, 0xff}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashedFields.Write([]byte{ uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), }) @@ -1077,7 +848,6 @@ func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { return } -<<<<<<< HEAD // PrepareSign must be called to create a hash object before Sign for v6 signatures. // The created hash object initially hashes a randomly generated salt // as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6, @@ -1139,8 +909,6 @@ func (sig *Signature) PrepareVerify() (hash.Hash, error) { return hasher, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Sign signs a message with a private key. The hash, h, must contain // the hash of the message to be signed and will be mutated by this function. // On success, the signature is stored in sig. Call Serialize to write it out. @@ -1151,7 +919,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e } sig.Version = priv.PublicKey.Version sig.IssuerFingerprint = priv.PublicKey.Fingerprint -<<<<<<< HEAD if sig.Version < 6 && config.RandomizeSignaturesViaNotation() { sig.removeNotationsWithName(SaltNotationName) salt, err := SignatureSaltForHash(sig.Hash, config.Random()) @@ -1166,8 +933,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e } sig.Notations = append(sig.Notations, ¬ation) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) if err != nil { return err @@ -1197,7 +962,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.DSASigS = new(encoding.MPI).SetBig(s) } case PubKeyAlgoECDSA: -<<<<<<< HEAD var r, s *big.Int if sk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { r, s, err = ecdsa.Sign(config.Random(), sk, digest) @@ -1208,10 +972,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e r, s, err = unwrapECDSASig(b) } } -======= - sk := priv.PrivateKey.(*ecdsa.PrivateKey) - r, s, err := ecdsa.Sign(config.Random(), sk, digest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == nil { sig.ECDSASigR = new(encoding.MPI).SetBig(r) @@ -1224,7 +984,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e sig.EdDSASigR = encoding.NewMPI(r) sig.EdDSASigS = encoding.NewMPI(s) } -<<<<<<< HEAD case PubKeyAlgoEd25519: sk := priv.PrivateKey.(*ed25519.PrivateKey) signature, err := ed25519.Sign(sk, digest) @@ -1237,8 +996,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e if err == nil { sig.EdSig = signature } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) } @@ -1246,7 +1003,6 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e return } -<<<<<<< HEAD // unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA signature. func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { var ecsdaSig struct { @@ -1259,8 +1015,6 @@ func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { return ecsdaSig.R, ecsdaSig.S, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SignUserId computes a signature from priv, asserting that pub is a valid // key for the identity id. On success, the signature is stored in sig. Call // Serialize to write it out. @@ -1269,7 +1023,6 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, co if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } -<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err @@ -1296,13 +1049,6 @@ func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, con return err } return sig.Sign(prepareHash, priv, config) -======= - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, @@ -1310,15 +1056,11 @@ func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, con // If config is nil, sensible defaults will be used. func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, config *Config) error { -<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err } h, err := keySignatureHash(hashKey, pub, prepareHash) -======= - h, err := keySignatureHash(hashKey, pub, sig.Hash) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1332,15 +1074,11 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) if priv.Dummy() { return errors.ErrDummyPrivateKey("dummy key found") } -<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err } h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash) -======= - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } @@ -1351,7 +1089,6 @@ func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) // stored in sig. Call Serialize to write it out. // If config is nil, sensible defaults will be used. func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { -<<<<<<< HEAD prepareHash, err := sig.PrepareSign(config) if err != nil { return err @@ -1360,13 +1097,6 @@ func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config return err } return sig.Sign(prepareHash, priv, config) -======= - h, err := keyRevocationHash(pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // RevokeSubkey computes a subkey revocation signature of pub using priv. @@ -1383,11 +1113,7 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { if len(sig.outSubpackets) == 0 { sig.outSubpackets = sig.rawSubpackets } -<<<<<<< HEAD if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil { -======= - if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") } @@ -1404,18 +1130,14 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { case PubKeyAlgoEdDSA: sigLength = int(sig.EdDSASigR.EncodedLength()) sigLength += int(sig.EdDSASigS.EncodedLength()) -<<<<<<< HEAD case PubKeyAlgoEd25519: sigLength = ed25519.SignatureSize case PubKeyAlgoEd448: sigLength = ed448.SignatureSize -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("impossible") } -<<<<<<< HEAD hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */ @@ -1426,14 +1148,6 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { length += 4 + /* the two length fields are four-octet instead of two */ 1 + /* salt length */ len(sig.salt) /* length salt */ -======= - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - if sig.Version == 5 { - length -= 4 // eight-octet instead of four-octet big endian ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } err = serializeHeader(w, packetTypeSignature, length) if err != nil { @@ -1447,7 +1161,6 @@ func (sig *Signature) Serialize(w io.Writer) (err error) { } func (sig *Signature) serializeBody(w io.Writer) (err error) { -<<<<<<< HEAD var fields []byte if sig.Version == 6 { // v6 signatures use 4 octets for length @@ -1463,17 +1176,12 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { fields = sig.HashSuffix[:6+hashedSubpacketsLen] } -======= - hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) - fields := sig.HashSuffix[:6+hashedSubpacketsLen] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = w.Write(fields) if err != nil { return } unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) -<<<<<<< HEAD var unhashedSubpackets []byte if sig.Version == 6 { unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen) @@ -1488,12 +1196,6 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { unhashedSubpackets[1] = byte(unhashedSubpacketsLen) serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) } -======= - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = w.Write(unhashedSubpackets) if err != nil { @@ -1504,7 +1206,6 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } -<<<<<<< HEAD if sig.Version == 6 { // write salt for v6 signatures _, err = w.Write([]byte{uint8(len(sig.salt))}) @@ -1517,8 +1218,6 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch sig.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: _, err = w.Write(sig.RSASignature.EncodedBytes()) @@ -1537,13 +1236,10 @@ func (sig *Signature) serializeBody(w io.Writer) (err error) { return } _, err = w.Write(sig.EdDSASigS.EncodedBytes()) -<<<<<<< HEAD case PubKeyAlgoEd25519: err = ed25519.WriteSignature(w, sig.EdSig) case PubKeyAlgoEd448: err = ed448.WriteSignature(w, sig.EdSig) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("impossible") } @@ -1561,32 +1257,14 @@ type outputSubpacket struct { func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { creationTime := make([]byte, 4) binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) -<<<<<<< HEAD // Signature Creation Time subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, true, creationTime}) // Signature Expiration Time -======= - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil && sig.Version == 4 { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - if sig.IssuerFingerprint != nil { - contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) - } - if sig.SignerUserId != nil { - subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { sigLifetime := make([]byte, 4) binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) } -<<<<<<< HEAD // Trust Signature if sig.TrustLevel != 0 { subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) @@ -1654,11 +1332,6 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } // Key Flags // Key flags may only appear in self-signatures or certification signatures. -======= - - // Key flags may only appear in self-signatures or certification signatures. - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if sig.FlagsValid { var flags byte if sig.FlagCertify { @@ -1682,7 +1355,6 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.FlagGroupKey { flags |= KeyFlagGroupKey } -<<<<<<< HEAD subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, true, []byte{flags}}) } // Signer's User ID @@ -1696,24 +1368,6 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) } // Features -======= - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - for _, notation := range sig.Notations { - subpackets = append( - subpackets, - outputSubpacket{ - true, - notationDataSubpacket, - notation.IsCritical, - notation.getData(), - }) - } - - // The following subpackets may only appear in self-signatures. - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var features = byte(0x00) if sig.SEIPDv1 { features |= 0x01 @@ -1721,7 +1375,6 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.SEIPDv2 { features |= 0x08 } -<<<<<<< HEAD if features != 0x00 { subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) } @@ -1752,48 +1405,6 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp }) } // Preferred AEAD Ciphersuites -======= - - if features != 0x00 { - subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) - } - - if sig.TrustLevel != 0 { - subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) - } - - if sig.TrustRegularExpression != nil { - // RFC specifies the string should be null-terminated; add a null byte to the end - subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) - } - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - if len(sig.PolicyURI) > 0 { - subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(sig.PreferredCipherSuites) > 0 { serialized := make([]byte, len(sig.PreferredCipherSuites)*2) for i, cipherSuite := range sig.PreferredCipherSuites { @@ -1802,26 +1413,6 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp } subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) } -<<<<<<< HEAD -======= - - // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. - if sig.RevocationReason != nil { - subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) - } - - // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. - if sig.EmbeddedSignature != nil { - var buf bytes.Buffer - err = sig.EmbeddedSignature.serializeBody(&buf) - if err != nil { - return - } - subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -1863,11 +1454,6 @@ func (sig *Signature) AddMetadataToHashSuffix() { binary.BigEndian.PutUint32(buf[:], lit.Time) suffix.Write(buf[:]) -<<<<<<< HEAD -======= - // Update the counter and restore trailing bytes - l = uint64(suffix.Len()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) suffix.Write([]byte{0x05, 0xff}) suffix.Write([]byte{ uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), @@ -1875,7 +1461,6 @@ func (sig *Signature) AddMetadataToHashSuffix() { }) sig.HashSuffix = suffix.Bytes() } -<<<<<<< HEAD // SaltLengthForHash selects the required salt length for the given hash algorithm, // as per Table 23 (Hash algorithm registry) of the crypto refresh. @@ -1922,5 +1507,3 @@ func (sig *Signature) removeNotationsWithName(name string) { } sig.Notations = updatedNotations } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go index e05c417dda..2812a1db88 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -7,19 +7,13 @@ package packet import ( "bytes" "crypto/cipher" -<<<<<<< HEAD "crypto/sha256" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "strconv" "github.com/ProtonMail/go-crypto/openpgp/errors" "github.com/ProtonMail/go-crypto/openpgp/s2k" -<<<<<<< HEAD "golang.org/x/crypto/hkdf" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // This is the largest session key that we'll support. Since at most 256-bit cipher @@ -47,7 +41,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } ske.Version = int(buf[0]) -<<<<<<< HEAD if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 { return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") } @@ -63,12 +56,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { } } -======= - if ske.Version != 4 && ske.Version != 5 { - return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Cipher function if _, err := readFull(r, buf[:]); err != nil { return err @@ -78,11 +65,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) } -<<<<<<< HEAD if ske.Version >= 5 { -======= - if ske.Version == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AEAD mode if _, err := readFull(r, buf[:]); err != nil { return errors.StructuralError("cannot read AEAD octet from packet") @@ -90,7 +73,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { ske.Mode = AEADMode(buf[0]) } -<<<<<<< HEAD if ske.Version > 5 { // Scalar octet count if _, err := readFull(r, buf[:]); err != nil { @@ -98,8 +80,6 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var err error if ske.s2k, err = s2k.Parse(r); err != nil { if _, ok := err.(errors.ErrDummyPrivateKey); ok { @@ -108,11 +88,7 @@ func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { return err } -<<<<<<< HEAD if ske.Version >= 5 { -======= - if ske.Version == 5 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AEAD IV iv := make([]byte, ske.Mode.IvLength()) _, err := readFull(r, iv) @@ -153,13 +129,8 @@ func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunc case 4: plaintextKey, cipherFunc, err := ske.decryptV4(key) return plaintextKey, cipherFunc, err -<<<<<<< HEAD case 5, 6: plaintextKey, err := ske.aeadDecrypt(ske.Version, key) -======= - case 5: - plaintextKey, err := ske.decryptV5(key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return plaintextKey, CipherFunction(0), err } err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") @@ -185,15 +156,9 @@ func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, return plaintextKey, cipherFunc, nil } -<<<<<<< HEAD func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) { adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)} aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version) -======= -func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { - adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} - aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) if err != nil { @@ -230,7 +195,6 @@ func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Conf // the given passphrase. The returned session key must be passed to // SerializeSymmetricallyEncrypted. // If config is nil, sensible defaults will be used. -<<<<<<< HEAD // Deprecated: Use SerializeSymmetricKeyEncryptedAEADReuseKey instead. func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { return SerializeSymmetricKeyEncryptedAEADReuseKey(w, sessionKey, passphrase, config.AEAD() != nil, config) @@ -247,12 +211,6 @@ func SerializeSymmetricKeyEncryptedAEADReuseKey(w io.Writer, sessionKey []byte, var version int if aeadSupported { version = 6 -======= -func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { - var version int - if config.AEAD() != nil { - version = 5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { version = 4 } @@ -277,22 +235,15 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass switch version { case 4: packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize -<<<<<<< HEAD case 5, 6: -======= - case 5: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ivLen := config.AEAD().Mode().IvLength() tagLen := config.AEAD().Mode().TagLength() packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen } -<<<<<<< HEAD if version > 5 { packetLength += 2 // additional octet count fields } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) if err != nil { return @@ -301,7 +252,6 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass // Symmetric Key Encrypted Version buf := []byte{byte(version)} -<<<<<<< HEAD if version > 5 { // Scalar octet count buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) @@ -318,15 +268,6 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass // Scalar octet count buf = append(buf, byte(len(s2kBytes))) } -======= - // Cipher function - buf = append(buf, byte(cipherFunc)) - - if version == 5 { - // AEAD mode - buf = append(buf, byte(config.AEAD().Mode())) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, err = w.Write(buf) if err != nil { return @@ -347,17 +288,10 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass if err != nil { return } -<<<<<<< HEAD case 5, 6: mode := config.AEAD().Mode() adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)} aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version) -======= - case 5: - mode := config.AEAD().Mode() - adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} - aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Sample iv using random reader iv := make([]byte, config.AEAD().Mode().IvLength()) @@ -381,7 +315,6 @@ func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, pass return } -<<<<<<< HEAD func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) { var blockCipher cipher.Block if version > 5 { @@ -394,9 +327,5 @@ func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, asso } else { blockCipher = c.new(inputKey) } -======= -func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { - blockCipher := c.new(inputKey) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return mode.new(blockCipher) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go index 60e4017e3e..0e898742cf 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -74,13 +74,10 @@ func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.Read // SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet // to w and returns a WriteCloser to which the to-be-encrypted packets can be // written. -<<<<<<< HEAD // If aeadSupported is set to true, SEIPDv2 is used with the indicated CipherSuite. // Otherwise, SEIPDv1 is used with the indicated CipherFunction. // Note: aeadSupported MUST match the value passed to SerializeEncryptedKeyAEAD // and/or SerializeSymmetricKeyEncryptedAEADReuseKey. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If config is nil, sensible defaults will be used. func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { writeCloser := noOpCloser{w} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go index 92b2dd7b5b..3ddc4fe4a9 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -7,13 +7,9 @@ package packet import ( "crypto/cipher" "crypto/sha256" -<<<<<<< HEAD "fmt" "io" "strconv" -======= - "io" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ProtonMail/go-crypto/openpgp/errors" "golang.org/x/crypto/hkdf" @@ -31,31 +27,19 @@ func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { se.Cipher = CipherFunction(headerData[0]) // cipherFunc must have block size 16 to use AEAD if se.Cipher.blockSize() != 16 { -<<<<<<< HEAD return errors.UnsupportedError("invalid aead cipher: " + strconv.Itoa(int(se.Cipher))) -======= - return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Mode se.Mode = AEADMode(headerData[1]) if se.Mode.TagLength() == 0 { -<<<<<<< HEAD return errors.UnsupportedError("unknown aead mode: " + strconv.Itoa(int(se.Mode))) -======= - return errors.UnsupportedError("unknown aead mode: " + string(se.Mode)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Chunk size se.ChunkSizeByte = headerData[2] if se.ChunkSizeByte > 16 { -<<<<<<< HEAD return errors.UnsupportedError("invalid aead chunk size byte: " + strconv.Itoa(int(se.ChunkSizeByte))) -======= - return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Salt @@ -80,7 +64,6 @@ func (se *SymmetricallyEncrypted) associatedData() []byte { // decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { -<<<<<<< HEAD if se.Cipher.KeySize() != len(inputKey) { return nil, errors.StructuralError(fmt.Sprintf("invalid session key length for cipher: got %d bytes, but expected %d bytes", len(inputKey), se.Cipher.KeySize())) } @@ -91,13 +74,6 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e tagLen := se.Mode.TagLength() chunkBytes := make([]byte, chunkSize+tagLen*2) peekedBytes := chunkBytes[chunkSize+tagLen:] -======= - aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) - - // Carry the first tagLen bytes - tagLen := se.Mode.TagLength() - peekedBytes := make([]byte, tagLen) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n, err := io.ReadFull(se.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) @@ -107,7 +83,6 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e aeadCrypter: aeadCrypter{ aead: aead, chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), -<<<<<<< HEAD nonce: nonce, associatedData: se.associatedData(), chunkIndex: nonce[len(nonce)-8:], @@ -115,14 +90,6 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e }, reader: se.Contents, chunkBytes: chunkBytes, -======= - initialNonce: nonce, - associatedData: se.associatedData(), - chunkIndex: make([]byte, 8), - packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, - }, - reader: se.Contents, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) peekedBytes: peekedBytes, }, nil } @@ -156,11 +123,7 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite // Random salt salt := make([]byte, aeadSaltSize) -<<<<<<< HEAD if _, err := io.ReadFull(rand, salt); err != nil { -======= - if _, err := rand.Read(salt); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } @@ -170,7 +133,6 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) -<<<<<<< HEAD chunkSize := decodeAEADChunkSize(chunkSizeByte) tagLen := aead.Overhead() chunkBytes := make([]byte, chunkSize+tagLen) @@ -185,18 +147,6 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite }, writer: ciphertext, chunkBytes: chunkBytes, -======= - return &aeadEncrypter{ - aeadCrypter: aeadCrypter{ - aead: aead, - chunkSize: decodeAEADChunkSize(chunkSizeByte), - associatedData: prefix, - chunkIndex: make([]byte, 8), - initialNonce: nonce, - packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, - }, - writer: ciphertext, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -206,17 +156,10 @@ func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inpu encryptionKey := make([]byte, c.KeySize()) _, _ = readFull(hkdfReader, encryptionKey) -<<<<<<< HEAD nonce = make([]byte, mode.IvLength()) // Last 64 bits of nonce are the counter _, _ = readFull(hkdfReader, nonce[:len(nonce)-8]) -======= - // Last 64 bits of nonce are the counter - nonce = make([]byte, mode.IvLength()-8) - - _, _ = readFull(hkdfReader, nonce) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) blockCipher := c.new(encryptionKey) aead = mode.new(blockCipher) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go index c1c35c6b8f..8b18623684 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -148,11 +148,7 @@ const mdcPacketTagByte = byte(0x80) | 0x40 | 19 func (ser *seMDCReader) Close() error { if ser.error { -<<<<<<< HEAD return errors.ErrMDCHashMismatch -======= - return errors.ErrMDCMissing ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for !ser.eof { @@ -163,11 +159,7 @@ func (ser *seMDCReader) Close() error { break } if err != nil { -<<<<<<< HEAD return errors.ErrMDCHashMismatch -======= - return errors.ErrMDCMissing ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -180,11 +172,7 @@ func (ser *seMDCReader) Close() error { // The hash already includes the MDC header, but we still check its value // to confirm encryption correctness if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { -<<<<<<< HEAD return errors.ErrMDCHashMismatch -======= - return errors.ErrMDCMissing ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -249,15 +237,9 @@ func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunct block := c.new(key) blockSize := block.BlockSize() iv := make([]byte, blockSize) -<<<<<<< HEAD _, err = io.ReadFull(config.Random(), iv) if err != nil { return nil, err -======= - _, err = config.Random().Read(iv) - if err != nil { - return ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) _, err = ciphertext.Write(prefix) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go index 113ba00d12..63814ed132 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -9,10 +9,6 @@ import ( "image" "image/jpeg" "io" -<<<<<<< HEAD -======= - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const UserAttrImageSubpacket = 1 @@ -66,11 +62,7 @@ func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { func (uat *UserAttribute) parse(r io.Reader) (err error) { // RFC 4880, section 5.13 -<<<<<<< HEAD b, err := io.ReadAll(r) -======= - b, err := ioutil.ReadAll(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go index 676909a4ca..3c7451a3c3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -6,10 +6,6 @@ package packet import ( "io" -<<<<<<< HEAD -======= - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" ) @@ -69,11 +65,7 @@ func NewUserId(name, comment, email string) *UserId { func (uid *UserId) parse(r io.Reader) (err error) { // RFC 4880, section 5.11 -<<<<<<< HEAD b, err := io.ReadAll(r) -======= - b, err := ioutil.ReadAll(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go index b821bb519a..e6dd9b5fd3 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -46,10 +46,7 @@ type MessageDetails struct { DecryptedWith Key // the private key used to decrypt the message, if any. IsSigned bool // true if the message is signed. SignedByKeyId uint64 // the key id of the signer, if any. -<<<<<<< HEAD SignedByFingerprint []byte // the key fingerprint of the signer, if any. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SignedBy *Key // the key of the signer, if available. LiteralData *packet.LiteralData // the metadata of the contents UnverifiedBody io.Reader // the contents of the message. @@ -121,11 +118,7 @@ ParsePackets: // This packet contains the decryption key encrypted to a public key. md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) switch p.Algo { -<<<<<<< HEAD case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448: -======= - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break default: continue @@ -240,11 +233,7 @@ FindKey: } mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) if sensitiveParsingErr != nil { -<<<<<<< HEAD return nil, errors.HandleSensitiveParsingError(sensitiveParsingErr, md.decrypted != nil) -======= - return nil, errors.StructuralError("parsing error") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return mdFinal, nil } @@ -282,25 +271,17 @@ FindLiteralData: prevLast = true } -<<<<<<< HEAD h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt) -======= - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { md.SignatureError = err } md.IsSigned = true -<<<<<<< HEAD if p.Version == 6 { md.SignedByFingerprint = p.KeyFingerprint } md.SignedByKeyId = p.KeyId -======= - md.SignedByKeyId = p.KeyId ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if keyring != nil { keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) if len(keys) > 0 { @@ -316,11 +297,7 @@ FindLiteralData: if md.IsSigned && md.SignatureError == nil { md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} } else if md.decrypted != nil { -<<<<<<< HEAD md.UnverifiedBody = &checkReader{md, false} -======= - md.UnverifiedBody = checkReader{md} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { md.UnverifiedBody = md.LiteralData.Body } @@ -328,7 +305,6 @@ FindLiteralData: return md, nil } -<<<<<<< HEAD func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) { switch sigType { case packet.SigTypeBinary: @@ -339,18 +315,12 @@ func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (has return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // hashForSignature returns a pair of hashes that can be used to verify a // signature. The signature may specify that the contents of the signed message // should be preprocessed (i.e. to normalize line endings). Thus this function // returns two hashes. The second should be used to hash the message itself and // performs any needed preprocessing. -<<<<<<< HEAD func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) { -======= -func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { return nil, nil, errors.UnsupportedError("unsupported hash function") } @@ -358,7 +328,6 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) } h := hashFunc.New() -<<<<<<< HEAD if sigSalt != nil { h.Write(sigSalt) } @@ -372,16 +341,6 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. case packet.SigTypeText: return h, wrappedHash, nil } -======= - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) } @@ -389,7 +348,6 @@ func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash. // it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger // MDC checks. type checkReader struct { -<<<<<<< HEAD md *MessageDetails checked bool } @@ -401,31 +359,16 @@ func (cr *checkReader) Read(buf []byte) (int, error) { // Only check once return n, io.EOF } -======= - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (int, error) { - n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) - if sensitiveParsingError == io.EOF { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) mdcErr := cr.md.decrypted.Close() if mdcErr != nil { return n, mdcErr } -<<<<<<< HEAD cr.checked = true -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return n, io.EOF } if sensitiveParsingError != nil { -<<<<<<< HEAD return n, errors.HandleSensitiveParsingError(sensitiveParsingError, true) -======= - return n, errors.StructuralError("parsing error") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return n, nil @@ -449,10 +392,7 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { scr.wrappedHash.Write(buf[:n]) } -<<<<<<< HEAD readsDecryptedData := scr.md.decrypted != nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if sensitiveParsingError == io.EOF { var p packet.Packet var readError error @@ -471,11 +411,7 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { key := scr.md.SignedBy signatureError := key.PublicKey.VerifySignature(scr.h, sig) if signatureError == nil { -<<<<<<< HEAD signatureError = checkMessageSignatureDetails(key, sig, scr.config) -======= - signatureError = checkSignatureDetails(key, sig, scr.config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } scr.md.Signature = sig scr.md.SignatureError = signatureError @@ -499,25 +435,15 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // unsigned hash of its own. In order to check this we need to // close that Reader. if scr.md.decrypted != nil { -<<<<<<< HEAD if sensitiveParsingError := scr.md.decrypted.Close(); sensitiveParsingError != nil { return n, errors.HandleSensitiveParsingError(sensitiveParsingError, true) -======= - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - return n, mdcErr ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return n, io.EOF } if sensitiveParsingError != nil { -<<<<<<< HEAD return n, errors.HandleSensitiveParsingError(sensitiveParsingError, readsDecryptedData) -======= - return n, errors.StructuralError("parsing error") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return n, nil @@ -528,22 +454,13 @@ func (scr *signatureCheckReader) Read(buf []byte) (int, error) { // if any, and a possible signature verification error. // If the signer isn't known, ErrUnknownIssuer is returned. func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { -<<<<<<< HEAD return verifyDetachedSignature(keyring, signed, signature, nil, false, config) -======= - var expectedHashes []crypto.Hash - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // VerifyDetachedSignatureAndHash performs the same actions as // VerifyDetachedSignature and checks that the expected hash functions were used. func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { -<<<<<<< HEAD return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) -======= - return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CheckDetachedSignature takes a signed file and a detached signature and @@ -551,41 +468,24 @@ func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader // signature verification error. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { -<<<<<<< HEAD _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config) return -======= - var expectedHashes []crypto.Hash - return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CheckDetachedSignatureAndHash performs the same actions as // CheckDetachedSignature and checks that the expected hash functions were used. func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { -<<<<<<< HEAD _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config) return } func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { -======= - _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) - return -} - -func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType var keys []Key var p packet.Packet -<<<<<<< HEAD -======= - expectedHashesLen := len(expectedHashes) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packets := packet.NewReader(signature) for { p, err = packets.Next() @@ -607,7 +507,6 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec issuerKeyId = *sig.IssuerKeyId hashFunc = sig.Hash sigType = sig.SigType -<<<<<<< HEAD if checkHashes { matchFound := false // check for hashes @@ -621,18 +520,6 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers") } } -======= - - for i, expectedHash := range expectedHashes { - if hashFunc == expectedHash { - break - } - if i+1 == expectedHashesLen { - return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") - } - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) if len(keys) > 0 { break @@ -643,15 +530,11 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec panic("unreachable") } -<<<<<<< HEAD h, err := sig.PrepareVerify() if err != nil { return nil, nil, err } wrappedHash, err := wrapHashForSignature(h, sigType) -======= - h, wrappedHash, err := hashForSignature(hashFunc, sigType) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, err } @@ -663,11 +546,7 @@ func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expec for _, key := range keys { err = key.PublicKey.VerifySignature(h, sig) if err == nil { -<<<<<<< HEAD return sig, key.Entity, checkMessageSignatureDetails(&key, sig, config) -======= - return sig, key.Entity, checkSignatureDetails(&key, sig, config) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -685,11 +564,7 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, return CheckDetachedSignature(keyring, signed, body, config) } -<<<<<<< HEAD // checkMessageSignatureDetails returns an error if: -======= -// checkSignatureDetails returns an error if: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // - The signature (or one of the binding signatures mentioned below) // has a unknown critical notation data subpacket // - The primary key of the signing entity is revoked @@ -707,23 +582,11 @@ func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, // NOTE: The order of these checks is important, as the caller may choose to // ignore ErrSignatureExpired or ErrKeyExpired errors, but should never // ignore any other errors. -<<<<<<< HEAD func checkMessageSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { now := config.Now() primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature() signedBySubKey := key.PublicKey != key.Entity.PrimaryKey sigsToCheck := []*packet.Signature{signature, primarySelfSignature} -======= -// -// TODO: Also return an error if: -// - The primary key is expired according to a direct-key signature -// - (For V5 keys only:) The direct-key signature (exists and) is expired -func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { - now := config.Now() - primaryIdentity := key.Entity.PrimaryIdentity() - signedBySubKey := key.PublicKey != key.Entity.PrimaryKey - sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if signedBySubKey { sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) } @@ -736,17 +599,10 @@ func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet } if key.Entity.Revoked(now) || // primary key is revoked (signedBySubKey && key.Revoked(now)) || // subkey is revoked -<<<<<<< HEAD (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4 return errors.ErrKeyRevoked } if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired -======= - primaryIdentity.Revoked(now) { // primary identity is revoked - return errors.ErrKeyRevoked - } - if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.ErrKeyExpired } if signedBySubKey { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go index bc36863355..670d60226a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -26,11 +26,8 @@ const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a43129 const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" -<<<<<<< HEAD const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" @@ -165,7 +162,6 @@ TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== =IiS2 -----END PGP PRIVATE KEY BLOCK-----` -<<<<<<< HEAD // See OpenPGP crypto refresh Section A.3. const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- @@ -238,20 +234,6 @@ aU71tdtNBQ== =e7jT -----END PGP PRIVATE KEY BLOCK-----` -======= -// Generated with the above private key -const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- -Version: OpenPGP.js v4.10.7 -Comment: https://openpgpjs.org - -xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf -bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G -y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv -UQdl5MlBka1QSNbMq2Bz7XwNPg4= -=6lbM ------END PGP MESSAGE-----` - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv @@ -352,7 +334,6 @@ AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY hz3tYjKhoFTKEIq3y3Pp =h/aX -----END PGP PUBLIC KEY BLOCK-----` -<<<<<<< HEAD const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK----- Comment: Bob's OpenPGP Transferable Secret Key @@ -474,5 +455,3 @@ byVJHvLO/XErtC+GNIJeMg== =liRq -----END PGP MESSAGE----- ` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go index 1e109b337c..6871b84fc9 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -87,17 +87,10 @@ func decodeCount(c uint8) int { // encodeMemory converts the Argon2 "memory" in the range parallelism*8 to // 2**31, inclusive, to an encoded memory. The return value is the // octet that is actually stored in the GPG file. encodeMemory panics -<<<<<<< HEAD // if is not in the above range // See OpenPGP crypto refresh Section 3.7.1.4. func encodeMemory(memory uint32, parallelism uint8) uint8 { if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) { -======= -// if is not in the above range -// See OpenPGP crypto refresh Section 3.7.1.4. -func encodeMemory(memory uint32, parallelism uint8) uint8 { - if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic("Memory argument memory is outside the required range") } @@ -206,13 +199,8 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { } params = &Params{ -<<<<<<< HEAD mode: SaltedS2K, hashId: hashId, -======= - mode: SaltedS2K, - hashId: hashId, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } else { // Enforce IteratedSaltedS2K method otherwise hashId, ok := algorithm.HashToHashId(c.hash()) @@ -223,11 +211,7 @@ func Generate(rand io.Reader, c *Config) (*Params, error) { c.S2KMode = IteratedSaltedS2K } params = &Params{ -<<<<<<< HEAD mode: IteratedSaltedS2K, -======= - mode: IteratedSaltedS2K, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hashId: hashId, countByte: c.EncodedCount(), } @@ -299,12 +283,9 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { params.passes = buf[Argon2SaltSize] params.parallelism = buf[Argon2SaltSize+1] params.memoryExp = buf[Argon2SaltSize+2] -<<<<<<< HEAD if err := validateArgon2Params(params); err != nil { return nil, err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return params, nil case GnuS2K: // This is a GNU extension. See @@ -322,31 +303,22 @@ func ParseIntoParams(r io.Reader) (params *Params, err error) { return nil, errors.UnsupportedError("S2K function") } -<<<<<<< HEAD func (params *Params) Mode() Mode { return params.mode } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (params *Params) Dummy() bool { return params != nil && params.mode == GnuS2K } func (params *Params) salt() []byte { switch params.mode { -<<<<<<< HEAD case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] case Argon2S2K: return params.saltBytes[:Argon2SaltSize] default: return nil -======= - case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] - case Argon2S2K: return params.saltBytes[:Argon2SaltSize] - default: return nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -443,7 +415,6 @@ func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Co f(key, passphrase) return nil } -<<<<<<< HEAD // validateArgon2Params checks that the argon2 parameters are valid according to RFC9580. func validateArgon2Params(params *Params) error { @@ -463,5 +434,3 @@ func validateArgon2Params(params *Params) error { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go index 9e0748796b..616e0d12c6 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -5,11 +5,7 @@ package s2k // the same parameters. type Cache map[Params][]byte -<<<<<<< HEAD // GetOrComputeDerivedKey tries to retrieve the key -======= -// GetOrComputeDerivedKey tries to retrieve the key ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // for the given s2k parameters from the cache. // If there is no hit, it derives the key with the s2k function from the passphrase, // updates the cache, and returns the key. diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go index e9ce286032..b93db1ab85 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -50,15 +50,9 @@ type Config struct { type Argon2Config struct { NumberOfPasses uint8 DegreeOfParallelism uint8 -<<<<<<< HEAD // Memory specifies the desired Argon2 memory usage in kibibytes. // For example memory=64*1024 sets the memory cost to ~64 MB. Memory uint32 -======= - // The memory parameter for Argon2 specifies desired memory usage in kibibytes. - // For example memory=64*1024 sets the memory cost to ~64 MB. - Memory uint32 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Config) Mode() Mode { @@ -121,11 +115,7 @@ func (c *Argon2Config) EncodedMemory() uint8 { } memory := c.Memory -<<<<<<< HEAD lowerBound := uint32(c.Parallelism()) * 8 -======= - lowerBound := uint32(c.Parallelism())*8 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) upperBound := uint32(2147483648) switch { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go index 18a3d2c526..b0f6ef7b09 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -76,15 +76,11 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S sig := createSignaturePacket(signingKey.PublicKey, sigType, config) -<<<<<<< HEAD h, err := sig.PrepareSign(config) if err != nil { return } wrappedHash, err := wrapHashForSignature(h, sig.SigType) -======= - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return } @@ -283,7 +279,6 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") } -<<<<<<< HEAD var salt []byte if signer != nil { var opsVersion = 3 @@ -292,17 +287,12 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } ops := &packet.OnePassSignature{ Version: opsVersion, -======= - if signer != nil { - ops := &packet.OnePassSignature{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SigType: sigType, Hash: hash, PubKeyAlgo: signer.PubKeyAlgo, KeyId: signer.KeyId, IsLast: true, } -<<<<<<< HEAD if opsVersion == 6 { ops.KeyFingerprint = signer.Fingerprint salt, err = packet.SignatureSaltForHash(hash, config.Random()) @@ -311,8 +301,6 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } ops.Salt = salt } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := ops.Serialize(payload); err != nil { return nil, err } @@ -340,31 +328,19 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit } if signer != nil { -<<<<<<< HEAD h, wrappedHash, err := hashForSignature(hash, sigType, salt) -======= - h, wrappedHash, err := hashForSignature(hash, sigType) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } metadata := &packet.LiteralData{ -<<<<<<< HEAD Format: 'u', -======= - Format: 't', ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) FileName: hints.FileName, Time: epochSeconds, } if hints.IsBinary { metadata.Format = 'b' } -<<<<<<< HEAD return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil -======= - return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return literalData, nil } @@ -422,7 +398,6 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") } -<<<<<<< HEAD primarySelfSignature, _ := to[i].PrimarySelfSignature() if primarySelfSignature == nil { return nil, errors.InvalidArgumentError("entity without a self-signature") @@ -436,17 +411,6 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash) candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites) candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression) -======= - sig := to[i].PrimaryIdentity().SelfSignature - if !sig.SEIPDv2 { - aeadSupported = false - } - - candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) - candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) - candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // In the event that the intersection of supported algorithms is empty we use the ones @@ -480,7 +444,6 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En } } -<<<<<<< HEAD var symKey []byte if aeadSupported { symKey = make([]byte, aeadCipherSuite.Cipher.KeySize()) @@ -488,19 +451,12 @@ func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *En symKey = make([]byte, cipher.KeySize()) } -======= - symKey := make([]byte, cipher.KeySize()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, err := io.ReadFull(config.Random(), symKey); err != nil { return nil, err } for _, key := range encryptKeys { -<<<<<<< HEAD if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil { -======= - if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } } @@ -537,25 +493,17 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con hashToHashId(crypto.SHA3_512), } defaultHashes := candidateHashes[0:1] -<<<<<<< HEAD primarySelfSignature, _ := signed.PrimarySelfSignature() if primarySelfSignature == nil { return nil, errors.StructuralError("signed entity has no self-signature") } preferredHashes := primarySelfSignature.PreferredHash -======= - preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(preferredHashes) == 0 { preferredHashes = defaultHashes } candidateHashes = intersectPreferences(candidateHashes, preferredHashes) if len(candidateHashes) == 0 { -<<<<<<< HEAD return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes") -======= - return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) @@ -570,10 +518,7 @@ type signatureWriter struct { hashType crypto.Hash wrappedHash hash.Hash h hash.Hash -<<<<<<< HEAD salt []byte // v6 only -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) signer *packet.PrivateKey sigType packet.SignatureType config *packet.Config @@ -597,13 +542,10 @@ func (s signatureWriter) Close() error { sig.Hash = s.hashType sig.Metadata = s.metadata -<<<<<<< HEAD if err := sig.SetSalt(s.salt); err != nil { return err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := sig.Sign(s.h, s.signer, s.config); err != nil { return err } diff --git a/vendor/github.com/alecthomas/go-check-sumtype/README.md b/vendor/github.com/alecthomas/go-check-sumtype/README.md index 5a458db386..287aa68b7f 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/README.md +++ b/vendor/github.com/alecthomas/go-check-sumtype/README.md @@ -92,15 +92,12 @@ passing checks, set the `-default-signifies-exhasutive=false` flag. As a special case, if the type switch statement contains a `default` clause that always panics, then exhaustiveness checks are still performed. -<<<<<<< HEAD By default, `go-check-sumtype` will not include shared interfaces in the exhaustiviness check. This can be changed by setting the `-include-shared-interfaces=true` flag. When this flag is set, `go-check-sumtype` will not require that all concrete structs are listed in the switch statement, as long as the switch statement is exhaustive with respect to interfaces the structs implement. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Details and motivation Sum types are otherwise known as discriminated unions. That is, a sum type is diff --git a/vendor/github.com/alecthomas/go-check-sumtype/check.go b/vendor/github.com/alecthomas/go-check-sumtype/check.go index c286c653a8..ff7fec728a 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/check.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/check.go @@ -29,11 +29,7 @@ func (e inexhaustiveError) Error() string { // Names returns a sorted list of names corresponding to the missing variant // cases. func (e inexhaustiveError) Names() []string { -<<<<<<< HEAD list := make([]string, 0, len(e.Missing)) -======= - var list []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, o := range e.Missing { list = append(list, o.Name()) } @@ -96,13 +92,10 @@ func missingVariantsInSwitch( ) (*sumTypeDef, []types.Object) { asserted := findTypeAssertExpr(swtch) ty := pkg.TypesInfo.TypeOf(asserted) -<<<<<<< HEAD if ty == nil { panic(fmt.Sprintf("no type found for asserted expression: %v", asserted)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) def := findDef(defs, ty) if def == nil { // We couldn't find a corresponding sum type, so there's @@ -114,19 +107,11 @@ func missingVariantsInSwitch( // A catch-all case defeats all exhaustiveness checks. return def, nil } -<<<<<<< HEAD variantTypes := make([]types.Type, 0, len(variantExprs)) for _, expr := range variantExprs { variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) } return def, def.missing(variantTypes, config.IncludeSharedInterfaces) -======= - var variantTypes []types.Type - for _, expr := range variantExprs { - variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) - } - return def, def.missing(variantTypes) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // switchVariants returns all case expressions found in a type switch. This diff --git a/vendor/github.com/alecthomas/go-check-sumtype/config.go b/vendor/github.com/alecthomas/go-check-sumtype/config.go index bb09a184c5..5c722b75c4 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/config.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/config.go @@ -2,10 +2,7 @@ package gochecksumtype type Config struct { DefaultSignifiesExhaustive bool -<<<<<<< HEAD // IncludeSharedInterfaces in the exhaustiviness check. If true, we do not need to list all concrete structs, as long // as the switch statement is exhaustive with respect to interfaces the structs implement. IncludeSharedInterfaces bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/alecthomas/go-check-sumtype/def.go b/vendor/github.com/alecthomas/go-check-sumtype/def.go index 01f02cffc5..71bdf2f72d 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/def.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/def.go @@ -71,11 +71,7 @@ type sumTypeDef struct { // sum type declarations. If no such sum type definition could be found for // any of the given declarations, then an error is returned. func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) { -<<<<<<< HEAD defs := make([]sumTypeDef, 0, len(decls)) -======= - var defs []sumTypeDef ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var errs []error for _, decl := range decls { def, err := newSumTypeDef(decl.Package.Types, decl) @@ -108,11 +104,7 @@ func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) { return nil, notInterfaceError{decl} } hasUnexported := false -<<<<<<< HEAD for i := range iface.NumMethods() { -======= - for i := 0; i < iface.NumMethods(); i++ { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !iface.Method(i).Exported() { hasUnexported = true break @@ -153,11 +145,7 @@ func (def *sumTypeDef) String() string { // missing returns a list of variants in this sum type that are not in the // given list of types. -<<<<<<< HEAD func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) []types.Object { -======= -func (def *sumTypeDef) missing(tys []types.Type) []types.Object { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(ag): This is O(n^2). Fix that. /shrug var missing []types.Object for _, v := range def.Variants { @@ -167,7 +155,6 @@ func (def *sumTypeDef) missing(tys []types.Type) []types.Object { ty = indirect(ty) if types.Identical(varty, ty) { found = true -<<<<<<< HEAD break } if includeSharedInterfaces && implements(varty, ty) { @@ -179,26 +166,18 @@ func (def *sumTypeDef) missing(tys []types.Type) []types.Object { // we do not include interfaces extending the sumtype, as the // all implementations of those interfaces are already covered // by the sumtype. -======= - } - } - if !found { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) missing = append(missing, v) } } return missing } -<<<<<<< HEAD func isInterface(ty types.Type) bool { underlying := indirect(ty).Underlying() _, ok := underlying.(*types.Interface) return ok } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // indirect dereferences through an arbitrary number of pointer types. func indirect(ty types.Type) types.Type { if ty, ok := ty.(*types.Pointer); ok { @@ -206,7 +185,6 @@ func indirect(ty types.Type) types.Type { } return ty } -<<<<<<< HEAD func implements(varty, interfaceType types.Type) bool { underlying := interfaceType.Underlying() @@ -215,5 +193,3 @@ func implements(varty, interfaceType types.Type) bool { } return false } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index ba4aa81b5e..a015cc5b20 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -165,7 +165,6 @@ type Config struct { // Controls how a resolved AWS account ID is handled for endpoint routing. AccountIDEndpointMode AccountIDEndpointMode -<<<<<<< HEAD // RequestChecksumCalculation determines when request checksum calculation is performed. // @@ -193,8 +192,6 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index b11e09ef33..57d5c02ee5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,8 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.34.0" -======= -const goModuleVersion = "1.32.4" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index bd8e4b3c92..01d758d5ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -34,12 +34,9 @@ const ( FeatureMetadata2 ) -<<<<<<< HEAD // Hardcoded value to specify which version of the user agent we're using const uaMetadata = "ua/2.1" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (k SDKAgentKeyType) string() string { switch k { case APIMetadata: @@ -79,7 +76,6 @@ type UserAgentFeature string // Enumerates UserAgentFeature. const ( -<<<<<<< HEAD UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) UserAgentFeatureWaiter = "B" UserAgentFeaturePaginator = "C" @@ -102,21 +98,6 @@ const ( UserAgentFeatureRequestChecksumWhenRequired = "a" UserAgentFeatureResponseChecksumWhenSupported = "b" UserAgentFeatureResponseChecksumWhenRequired = "c" -======= - UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) - UserAgentFeatureWaiter = "B" - UserAgentFeaturePaginator = "C" - UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) - UserAgentFeatureRetryModeStandard = "E" - UserAgentFeatureRetryModeAdaptive = "F" - UserAgentFeatureS3Transfer = "G" - UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) - UserAgentFeatureS3CryptoV2 = "I" // n/a - UserAgentFeatureS3ExpressBucket = "J" - UserAgentFeatureS3AccessGrants = "K" // not yet implemented - UserAgentFeatureGZIPRequestCompression = "L" - UserAgentFeatureProtocolRPCV2CBOR = "M" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // RequestUserAgent is a build middleware that set the User-Agent for the request. @@ -138,10 +119,7 @@ type RequestUserAgent struct { func NewRequestUserAgent() *RequestUserAgent { userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() addProductName(userAgent) -<<<<<<< HEAD addUserAgentMetadata(userAgent) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) addProductName(sdkAgent) r := &RequestUserAgent{ @@ -169,13 +147,10 @@ func addProductName(builder *smithyhttp.UserAgentBuilder) { builder.AddKeyValue(aws.SDKName, aws.SDKVersion) } -<<<<<<< HEAD func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) { builder.AddKey(uaMetadata) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. func AddUserAgentKey(key string) func(*middleware.Stack) error { return func(stack *middleware.Stack) error { @@ -300,17 +275,10 @@ func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { const userAgent = "User-Agent" -<<<<<<< HEAD if len(u.features) > 0 { updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) } updateHTTPHeader(request, userAgent, u.userAgent.Build()) -======= - updateHTTPHeader(request, userAgent, u.userAgent.Build()) - if len(u.features) > 0 { - updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go index 68b63709d4..6669a3ddfd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -1,13 +1,8 @@ package query import ( -<<<<<<< HEAD "net/url" "strconv" -======= - "fmt" - "net/url" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Array represents the encoding of Query lists and sets. A Query array is a @@ -26,24 +21,8 @@ type Array struct { // keys for each element in the list. For example, an entry might have the // key "ParentStructure.ListName.member.MemberName.1". // -<<<<<<< HEAD // When the array is not flat the prefix will contain the memberName otherwise the memberName is ignored prefix string -======= - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string - // Whether the list is flat or not. A list that is not flat will produce the - // following entry to the url.Values for a given entry: - // ListName.MemberName.1=value - // A list that is flat will produce the following: - // ListName.1=value - flat bool - // The location name of the member. In most cases this should be "member". - memberName string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Elements are stored in values, so we keep track of the list size here. size int32 // Empty lists are encoded as "=", if we add a value later we will @@ -55,7 +34,6 @@ func newArray(values url.Values, prefix string, flat bool, memberName string) *A emptyValue := newValue(values, prefix, flat) emptyValue.String("") -<<<<<<< HEAD if !flat { // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead prefix = prefix + keySeparator + memberName @@ -64,13 +42,6 @@ func newArray(values url.Values, prefix string, flat bool, memberName string) *A return &Array{ values: values, prefix: prefix, -======= - return &Array{ - values: values, - prefix: prefix, - flat: flat, - memberName: memberName, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) emptyValue: emptyValue, } } @@ -84,16 +55,7 @@ func (a *Array) Value() Value { // Query lists start a 1, so adjust the size first a.size++ -<<<<<<< HEAD // Lists can't have flat members // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead return newValue(a.values, a.prefix+keySeparator+strconv.FormatInt(int64(a.size), 10), false) -======= - prefix := a.prefix - if !a.flat { - prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) - } - // Lists can't have flat members - return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go index f722e25e89..305a8ace30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -1,13 +1,6 @@ package query -<<<<<<< HEAD import "net/url" -======= -import ( - "fmt" - "net/url" -) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Object represents the encoding of Query structures and unions. A Query // object is a representation of a mapping of string keys to arbitrary @@ -60,24 +53,16 @@ func (o *Object) FlatKey(name string) Value { func (o *Object) key(name string, flatValue bool) Value { if o.prefix != "" { -<<<<<<< HEAD // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead return newValue(o.values, o.prefix+keySeparator+name, flatValue) -======= - return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return newValue(o.values, name, flatValue) } func (o *Object) keyWithValues(name string, flatValue bool) Value { if o.prefix != "" { -<<<<<<< HEAD // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead return newAppendValue(o.values, o.prefix+keySeparator+name, flatValue) -======= - return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return newAppendValue(o.values, name, flatValue) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go index d7b463f04b..8063c592dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -7,11 +7,8 @@ import ( "github.com/aws/smithy-go/encoding/httpbinding" ) -<<<<<<< HEAD const keySeparator = "." -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Value represents a Query Value type. type Value struct { // The query values to add the value to. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 29531ff294..1b485f9988 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -116,7 +116,6 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { case errors.As(err, &conErr) && conErr.ConnectionError(): retryable = true -<<<<<<< HEAD case strings.Contains(err.Error(), "use of closed network connection"): fallthrough case strings.Contains(err.Error(), "connection reset"): @@ -124,9 +123,6 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { // are effectively the same. It appears to be the difference between // sync and async read of TCP RST in the stdlib's net.Conn read loop. // see #2737 -======= - case strings.Contains(err.Error(), "connection reset"): ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) retryable = true case errors.As(err, &urlErr): diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index f7bad1cf85..3cb7d14ef4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.29.2 (2025-01-24) * **Bug Fix**: Fix env config naming and usage of deprecated ioutil @@ -48,8 +47,6 @@ * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.28.3 (2024-11-07) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 2e5b7a52de..09d9b63116 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -83,15 +83,12 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the AccountIDEndpointMode if present in env var or shared config profile resolveAccountIDEndpointMode, -<<<<<<< HEAD // Sets the RequestChecksumCalculation if present in env var or shared config profile resolveRequestChecksumCalculation, // Sets the ResponseChecksumValidation if present in env var or shared config profile resolveResponseChecksumValidation, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A Config represents a generic configuration value or set of values. This type @@ -221,11 +218,7 @@ func resolveConfigLoaders(options *LoadOptions) []loader { loaders[0] = loadEnvConfig // specification of a profile should cause a load failure if it doesn't exist -<<<<<<< HEAD if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" { -======= - if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) loaders[1] = loadSharedConfig } else { loaders[1] = loadSharedConfigIgnoreNotExist diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 0783f9eedf..9db507e38e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -5,10 +5,6 @@ import ( "context" "fmt" "io" -<<<<<<< HEAD -======= - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "strconv" "strings" @@ -24,7 +20,6 @@ const CredentialsSourceName = "EnvConfigCredentials" // Environment variables that will be read for configuration values. const ( -<<<<<<< HEAD awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID" awsAccessKeyEnv = "AWS_ACCESS_KEY" @@ -82,81 +77,18 @@ const ( awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION" awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" -======= - awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" - awsAccessKeyEnvVar = "AWS_ACCESS_KEY" - - awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - awsSecretKeyEnvVar = "AWS_SECRET_KEY" - - awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" - - awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - - awsRegionEnvVar = "AWS_REGION" - awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" - - awsProfileEnvVar = "AWS_PROFILE" - awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" - - awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" - - awsConfigFileEnvVar = "AWS_CONFIG_FILE" - - awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" - - awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" - - awsRoleARNEnvVar = "AWS_ROLE_ARN" - awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" - - awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" - - awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" - - awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" - - awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" - awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" - - awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" - - awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" - - awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" - - awsDefaultMode = "AWS_DEFAULTS_MODE" - - awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" - awsRetryMode = "AWS_RETRY_MODE" - awsSdkAppID = "AWS_SDK_UA_APP_ID" - - awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" - awsEndpointURL = "AWS_ENDPOINT_URL" - - awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION" - awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" awsAccountIDEnv = "AWS_ACCOUNT_ID" awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" -<<<<<<< HEAD awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION" awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( credAccessEnvKeys = []string{ -<<<<<<< HEAD awsAccessKeyIDEnv, awsAccessKeyEnv, } @@ -171,22 +103,6 @@ var ( profileEnvKeys = []string{ awsProfileEnv, awsDefaultProfileEnv, -======= - awsAccessKeyIDEnvVar, - awsAccessKeyEnvVar, - } - credSecretEnvKeys = []string{ - awsSecretAccessKeyEnvVar, - awsSecretKeyEnvVar, - } - regionEnvKeys = []string{ - awsRegionEnvVar, - awsDefaultRegionEnvVar, - } - profileEnvKeys = []string{ - awsProfileEnvVar, - awsDefaultProfileEnvVar, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ) @@ -382,15 +298,12 @@ type EnvConfig struct { // Indicates whether account ID will be required/ignored in endpoint2.0 routing AccountIDEndpointMode aws.AccountIDEndpointMode -<<<<<<< HEAD // Indicates whether request checksum should be calculated RequestChecksumCalculation aws.RequestChecksumCalculation // Indicates whether response checksum should be validated ResponseChecksumValidation aws.ResponseChecksumValidation -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -411,7 +324,6 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) if creds.HasKeys() { creds.AccountID = os.Getenv(awsAccountIDEnv) -<<<<<<< HEAD creds.SessionToken = os.Getenv(awsSessionTokenEnv) cfg.Credentials = creds } @@ -419,20 +331,10 @@ func NewEnvConfig() (EnvConfig, error) { cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv) cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv) cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv) -======= - creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) - cfg.Credentials = creds - } - - cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) - cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) - cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) setStringFromEnvVal(&cfg.Region, regionEnvKeys) setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) -<<<<<<< HEAD cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv) cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv) @@ -495,70 +397,6 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv}) if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil { -======= - cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) - cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) - - cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) - - cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) - - cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) - cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) - - cfg.AppID = os.Getenv(awsSdkAppID) - - if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil { - return cfg, err - } - if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { - return cfg, err - } - - if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { - return cfg, err - } - - setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { - return cfg, err - } - cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) - if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { - return cfg, err - } - - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { - return cfg, err - } - - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { - return cfg, err - } - - if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { - return cfg, err - } - - if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { - return cfg, err - } - if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { - return cfg, err - } - - setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) - - if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return cfg, err } @@ -570,7 +408,6 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } -<<<<<<< HEAD if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil { return cfg, err } @@ -578,8 +415,6 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return cfg, nil } @@ -612,7 +447,6 @@ func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpo return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil } -<<<<<<< HEAD func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) { return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil } @@ -621,8 +455,6 @@ func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseC return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -719,7 +551,6 @@ func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) e return nil } -<<<<<<< HEAD func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error { for _, k := range keys { value := os.Getenv(k) @@ -759,8 +590,6 @@ func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetRegion returns the AWS Region if set in the environment. Returns an empty // string if not set. func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { @@ -817,11 +646,7 @@ func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { return nil, false, nil } -<<<<<<< HEAD b, err := os.ReadFile(c.CustomCABundle) -======= - b, err := ioutil.ReadFile(c.CustomCABundle) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, false, err } @@ -845,11 +670,7 @@ func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { // GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use // with configured endpoints. func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { -<<<<<<< HEAD if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" { -======= - if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return endpt, true, nil } return "", false, nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index e14a79ef64..1859fe9316 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,8 +3,4 @@ package config // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.29.2" -======= -const goModuleVersion = "1.28.3" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 3156c563da..0810ecf16a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -216,7 +216,6 @@ type LoadOptions struct { // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool -<<<<<<< HEAD // Whether account id should be built into endpoint resolution AccountIDEndpointMode aws.AccountIDEndpointMode @@ -226,10 +225,6 @@ type LoadOptions struct { // Specifies if response checksum should be validated ResponseChecksumValidation aws.ResponseChecksumValidation -======= - AccountIDEndpointMode aws.AccountIDEndpointMode - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Service endpoint override. This value is not necessarily final and is // passed to the service's EndpointResolverV2 for further delegation. BaseEndpoint string @@ -300,7 +295,6 @@ func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountI return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil } -<<<<<<< HEAD func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil } @@ -309,8 +303,6 @@ func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.Res return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) { return o.BaseEndpoint, o.BaseEndpoint != "", nil } @@ -380,7 +372,6 @@ func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { } } -<<<<<<< HEAD // WithRequestChecksumCalculation is a helper function to construct functional options // that sets RequestChecksumCalculation on config's LoadOptions func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc { @@ -401,8 +392,6 @@ func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOption } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index bb739f84be..a8ff40d846 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -242,7 +242,6 @@ func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.A return } -<<<<<<< HEAD // requestChecksumCalculationProvider provides access to the RequestChecksumCalculation type requestChecksumCalculationProvider interface { getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) @@ -277,8 +276,6 @@ func getResponseChecksumValidation(ctx context.Context, configs configs) (value return } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index c2df1b398e..a68bd0993f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -182,7 +182,6 @@ func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs return nil } -<<<<<<< HEAD // resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's // SharedConfig or EnvConfig func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error { @@ -213,8 +212,6 @@ func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, con return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index 4ee4a4fec3..00b071fe6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -118,14 +118,11 @@ const ( accountIDKey = "aws_account_id" accountIDEndpointMode = "account_id_endpoint_mode" -<<<<<<< HEAD requestChecksumCalculationKey = "request_checksum_calculation" responseChecksumValidationKey = "response_checksum_validation" checksumWhenSupported = "when_supported" checksumWhenRequired = "when_required" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -354,15 +351,12 @@ type SharedConfig struct { S3DisableExpressAuth *bool AccountIDEndpointMode aws.AccountIDEndpointMode -<<<<<<< HEAD // RequestChecksumCalculation indicates if the request checksum should be calculated RequestChecksumCalculation aws.RequestChecksumCalculation // ResponseChecksumValidation indicates if the response checksum should be validated ResponseChecksumValidation aws.ResponseChecksumValidation -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -1150,7 +1144,6 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) } -<<<<<<< HEAD if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil { return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err) } @@ -1158,8 +1151,6 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), @@ -1234,7 +1225,6 @@ func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key st return nil } -<<<<<<< HEAD func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error { if !sec.Has(key) { return nil @@ -1271,8 +1261,6 @@ func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { if c.RequestMinCompressSizeBytes == nil { return 0, false, nil @@ -1291,7 +1279,6 @@ func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.Account return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil } -<<<<<<< HEAD func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil } @@ -1300,8 +1287,6 @@ func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.Re return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 517cacec7a..8a3ed35bed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.17.55 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -46,8 +45,6 @@ * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.17.44 (2024-11-07) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index bb44597712..fd852ab0c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,8 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.17.55" -======= -const goModuleVersion = "1.17.44" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index fdb7ff883c..3154dfa30f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.16.25 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -25,8 +24,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.16.19 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 5e91ba28ab..cb091ba334 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,8 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.16.25" -======= -const goModuleVersion = "1.16.19" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 9489ff2a5e..9818ebc724 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.3.29 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -25,8 +24,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.3.23 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 313517304c..3976533036 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,8 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.3.29" -======= -const goModuleVersion = "1.3.23" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 71a945af89..43f6449be3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -47,12 +47,9 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, -<<<<<<< HEAD "ap-southeast-7" : { "description" : "Asia Pacific (Thailand)" }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "aws-global" : { "description" : "AWS Standard global region" }, @@ -95,12 +92,9 @@ "me-south-1" : { "description" : "Middle East (Bahrain)" }, -<<<<<<< HEAD "mx-central-1" : { "description" : "Mexico (Central)" }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sa-east-1" : { "description" : "South America (Sao Paulo)" }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 92195c00bd..d30a3176b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v2.6.29 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -26,8 +25,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v2.6.23 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index d94e914833..994cb44cf8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,8 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "2.6.29" -======= -const goModuleVersion = "2.6.23" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 465a27c6ea..1d23b9be22 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,10 +1,7 @@ -<<<<<<< HEAD # v1.8.2 (2025-01-24) * **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.8.1 (2024-08-15) * **Dependency Update**: Bump minimum Go version to 1.21. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index cb5dc76a60..355ed39e11 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,8 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.8.2" -======= -const goModuleVersion = "1.8.1" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 432c4d0a13..ef78753a92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.12.2 (2025-01-24) * **Dependency Update**: Upgrade to smithy-go v1.22.2. @@ -7,8 +6,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.12.0 (2024-10-04) * **Feature**: Add support for HTTP client metrics. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index d7ed9406be..cbf79b401d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,8 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.12.2" -======= -const goModuleVersion = "1.12.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 077ab4ecde..715587f519 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.12.10 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -25,8 +24,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.12.4 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 0c99c6f414..8d8a4c9faa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,8 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.12.10" -======= -const goModuleVersion = "1.12.4" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md index 4685f65fb0..bc175840da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.37.14 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -37,8 +36,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.37.5 (2024-11-07) * **Bug Fix**: Adds case-insensitive handling of error message fields in service responses diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go index ce77e2ac3e..320afdf250 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -685,11 +685,7 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/kms") }) -<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { -======= - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go index 88d2cb9d1c..a57e7a29f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -3,8 +3,4 @@ package kms // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.37.14" -======= -const goModuleVersion = "1.37.5" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go index 461f371f0d..706d1dd418 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -365,7 +365,6 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ -<<<<<<< HEAD Region: "ap-southeast-7", }: endpoints.Endpoint{}, endpoints.EndpointKey{ @@ -384,8 +383,6 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "ca-central-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ @@ -620,7 +617,6 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ -<<<<<<< HEAD Region: "mx-central-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ @@ -639,8 +635,6 @@ var defaultPartitions = endpoints.Partitions{ Deprecated: aws.TrueTernary, }, endpoints.EndpointKey{ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "sa-east-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index bd100f7037..d8f0d5d1ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.24.12 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -29,8 +28,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.24.5 (2024-11-07) * **Bug Fix**: Adds case-insensitive handling of error message fields in service responses diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index cf9812ea3c..0b244f142c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -684,11 +684,7 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") }) -<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { -======= - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 3855509558..3bdb12089c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,8 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.24.12" -======= -const goModuleVersion = "1.24.5" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 6d591c068c..2e65069572 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.28.11 (2025-01-24) * **Documentation**: Fixed typos in the descriptions. @@ -30,8 +29,6 @@ * **Dependency Update**: Update to smithy-go v1.22.1. * **Dependency Update**: Updated to the latest SDK module versions -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.28.4 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 3c54315d42..9b7f4acc84 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -684,11 +684,7 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") }) -<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { -======= - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 4fc50898fb..2ab3524479 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -12,11 +12,7 @@ import ( // Creates and returns access and refresh tokens for clients that are // authenticated using client secrets. The access token can be used to fetch -<<<<<<< HEAD // short-lived credentials for the assigned AWS accounts or to access application -======= -// short-term credentials for the assigned AWS accounts or to access application ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // APIs using bearer authentication. func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { if params == nil { @@ -47,37 +43,21 @@ type CreateTokenInput struct { // This member is required. ClientSecret *string -<<<<<<< HEAD // Supports the following OAuth grant types: Authorization Code, Device Code, and // Refresh Token. Specify one of the following values, depending on the grant type // that you want: // // * Authorization Code - authorization_code -======= - // Supports the following OAuth grant types: Device Code and Refresh Token. - // Specify either of the following values, depending on the grant type that you - // want: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // * Device Code - urn:ietf:params:oauth:grant-type:device_code // // * Refresh Token - refresh_token // -<<<<<<< HEAD -======= - // For information about how to obtain the device code, see the StartDeviceAuthorization topic. - // ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. GrantType *string // Used only when calling this API for the Authorization Code grant type. The -<<<<<<< HEAD // short-lived code is used to identify this authorization request. -======= - // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateTokenAPI. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Code *string // Used only when calling this API for the Authorization Code grant type. This @@ -85,15 +65,9 @@ type CreateTokenInput struct { // challenge value the client passed at authorization time. CodeVerifier *string -<<<<<<< HEAD // Used only when calling this API for the Device Code grant type. This // short-lived code is used to identify this authorization request. This comes from // the result of the StartDeviceAuthorizationAPI. -======= - // Used only when calling this API for the Device Code grant type. This short-term - // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorizationAPI. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DeviceCode *string // Used only when calling this API for the Authorization Code grant type. This @@ -102,11 +76,7 @@ type CreateTokenInput struct { RedirectUri *string // Used only when calling this API for the Refresh Token grant type. This token is -<<<<<<< HEAD // used to refresh short-lived tokens, such as the access token, that might expire. -======= - // used to refresh short-term tokens, such as the access token, that might expire. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index ff02cdab36..e5253ce884 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -12,11 +12,7 @@ import ( // Creates and returns access and refresh tokens for clients and applications that // are authenticated using IAM entities. The access token can be used to fetch -<<<<<<< HEAD // short-lived credentials for the assigned Amazon Web Services accounts or to -======= -// short-term credentials for the assigned Amazon Web Services accounts or to ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // access application APIs using bearer authentication. func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { if params == nil { @@ -63,11 +59,7 @@ type CreateTokenWithIAMInput struct { Assertion *string // Used only when calling this API for the Authorization Code grant type. This -<<<<<<< HEAD // short-lived code is used to identify this authorization request. The code is -======= - // short-term code is used to identify this authorization request. The code is ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // obtained through a redirect from IAM Identity Center to a redirect URI persisted // in the Authorization Code GrantOptions for the application. Code *string @@ -83,11 +75,7 @@ type CreateTokenWithIAMInput struct { RedirectUri *string // Used only when calling this API for the Refresh Token grant type. This token is -<<<<<<< HEAD // used to refresh short-lived tokens, such as the access token, that might expire. -======= - // used to refresh short-term tokens, such as the access token, that might expire. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 604ccfce8a..2022270db2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -10,15 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -<<<<<<< HEAD // Registers a public client with IAM Identity Center. This allows clients to // perform authorization using the authorization code grant with Proof Key for Code // Exchange (PKCE) or the device code grant. -======= -// Registers a client with IAM Identity Center. This allows clients to initiate -// device authorization. The output should be persisted for reuse through many -// authentication requests. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { if params == nil { params = &RegisterClientInput{} @@ -54,7 +48,6 @@ type RegisterClientInput struct { EntitledApplicationArn *string // The list of OAuth 2.0 grant types that are defined by the client. This list is -<<<<<<< HEAD // used to restrict the token granting flows available to the client. Supports the // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh // Token. @@ -64,9 +57,6 @@ type RegisterClientInput struct { // * Device Code - urn:ietf:params:oauth:grant-type:device_code // // * Refresh Token - refresh_token -======= - // used to restrict the token granting flows available to the client. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GrantTypes []string // The IAM Identity Center Issuer URL associated with an instance of IAM Identity diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index 4d0f8b31ef..f3510b18c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -8,18 +8,12 @@ // Center. The service also enables the client to fetch the user’s access token // upon successful authentication and authorization with IAM Identity Center. // -<<<<<<< HEAD // # API namespaces // // IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity // Center OpenID Connect uses the sso-oidc namespace. // // # Considerations for using this guide -======= -// IAM Identity Center uses the sso and identitystore API namespaces. -// -// # Considerations for Using This Guide ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Before you begin using this guide, we recommend that you first review the // following important information about how the IAM Identity Center OIDC service diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index d85a80c1aa..9f78e8f741 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,8 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.28.11" -======= -const goModuleVersion = "1.28.4" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 0acf9392ec..4a95edf92f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # v1.33.10 (2025-01-24) * **Dependency Update**: Updated to the latest SDK module versions @@ -45,8 +44,6 @@ * **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # v1.32.4 (2024-11-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 1d957a1193..25787325f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -688,11 +688,7 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") }) -<<<<<<< HEAD if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { -======= - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index f47b34cfe8..d056327746 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -16,11 +16,7 @@ import ( // Amazon Web Services resources. These temporary credentials consist of an access // key ID, a secret access key, and a security token. Typically, you use AssumeRole // within your account or for cross-account access. For a comparison of AssumeRole -<<<<<<< HEAD // with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the -======= -// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IAM User Guide. // // # Permissions @@ -30,7 +26,6 @@ import ( // cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken // API operations. // -<<<<<<< HEAD // (Optional) You can pass inline or managed session policies to this operation. // You can pass a single JSON policy document to use as an inline session policy. // You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use @@ -41,18 +36,6 @@ import ( // policies. You can use the role's temporary credentials in subsequent Amazon Web // Services API calls to access resources in the account that owns the role. You // cannot use session policies to grant more permissions than those allowed by the -======= -// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a -// single JSON policy document to use as an inline session policy. You can also -// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed -// session policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. Passing policies to this operation -// returns new temporary credentials. The resulting session's permissions are the -// intersection of the role's identity-based policy and the session policies. You -// can use the role's temporary credentials in subsequent Amazon Web Services API -// calls to access resources in the account that owns the role. You cannot use -// session policies to grant more permissions than those allowed by the ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // identity-based policy of the role that is being assumed. For more information, // see [Session Policies]in the IAM User Guide. // @@ -121,16 +104,9 @@ import ( // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -<<<<<<< HEAD // [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -======= -// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { @@ -164,24 +140,18 @@ type AssumeRoleInput struct { // the temporary security credentials will expose the role session name to the // external account in their CloudTrail logs. // -<<<<<<< HEAD // For security purposes, administrators can view this field in [CloudTrail logs] to help identify // who performed an action in Amazon Web Services. Your administrator might require // that you specify your user name as the session name when you assume the role. // For more information, see [sts:RoleSessionName]sts:RoleSessionName . // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // -<<<<<<< HEAD // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. RoleSessionName *string @@ -200,11 +170,7 @@ type AssumeRoleInput struct { // 43200 seconds (12 hours), depending on the maximum session duration setting for // your role. However, if you assume a role using role chaining and provide a // DurationSeconds parameter value greater than one hour, the operation fails. To -<<<<<<< HEAD // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]. -======= - // learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // By default, the value is set to 3600 seconds. // @@ -214,11 +180,7 @@ type AssumeRoleInput struct { // parameter that specifies the maximum length of the console session. For more // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. // -<<<<<<< HEAD // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration -======= - // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 @@ -263,14 +225,10 @@ type AssumeRoleInput struct { // by percentage how close the policies and tags for your request are to the upper // size limit. // -<<<<<<< HEAD // For more information about role session permissions, see [Session policies]. // // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -======= - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to @@ -325,17 +283,10 @@ type AssumeRoleInput struct { SerialNumber *string // The source identity specified by the principal that is calling the AssumeRole -<<<<<<< HEAD // operation. The source identity value persists across [chained role]sessions. // // You can require users to specify a source identity when they assume a role. You // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy. -======= - // operation. - // - // You can require users to specify a source identity when they assume a role. You - // do this by using the sts:SourceIdentity condition key in a role trust policy. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // You can use source identity information in CloudTrail logs to determine who took // actions with a role. You can use the aws:SourceIdentity condition key to // further control access to Amazon Web Services resources based on the value of @@ -344,7 +295,6 @@ type AssumeRoleInput struct { // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can also -<<<<<<< HEAD // include underscores or any of the following characters: +=,.@-. You cannot use a // value that begins with the text aws: . This prefix is reserved for Amazon Web // Services internal use. @@ -352,13 +302,6 @@ type AssumeRoleInput struct { // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity -======= - // include underscores or any of the following characters: =,.@-. You cannot use a - // value that begins with the text aws: . This prefix is reserved for Amazon Web - // Services internal use. - // - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SourceIdentity *string // A list of session tags that you want to pass. Each session tag consists of a @@ -411,13 +354,8 @@ type AssumeRoleInput struct { // a tag key as transitive, the corresponding key and value passes to subsequent // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. // -<<<<<<< HEAD // This parameter is optional. The transitive status of a session tag does not // impact its packed binary size. -======= - // This parameter is optional. When you set session tags as transitive, the - // session policy and session tags packed binary limit is not affected. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // If you choose not to specify a transitive tag key, then no tags are passed from // this session to any subsequent sessions. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 42c71f5c24..d0e117ac92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -16,11 +16,7 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -<<<<<<< HEAD // produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -======= -// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // The temporary security credentials returned by this operation consist of an // access key ID, a secret access key, and a security token. Applications can use @@ -134,17 +130,10 @@ import ( // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session // [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length -<<<<<<< HEAD // [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html // [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -======= -// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison -// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html // [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html // [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining @@ -230,11 +219,8 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // -<<<<<<< HEAD // For more information about role session permissions, see [Session policies]. // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext @@ -243,10 +229,7 @@ type AssumeRoleWithSAMLInput struct { // size limit. // // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -<<<<<<< HEAD // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to @@ -324,12 +307,8 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 -<<<<<<< HEAD // The value in the SourceIdentity attribute in the SAML assertion. The source // identity value persists across [chained role]sessions. -======= - // The value in the SourceIdentity attribute in the SAML assertion. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // You can require users to set a source identity value when they assume a role. // You do this by using the sts:SourceIdentity condition key in a role trust @@ -346,11 +325,7 @@ type AssumeRoleWithSAMLOutput struct { // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // -<<<<<<< HEAD // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts -======= - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 1099b87ce3..0ae4bc173e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -31,11 +31,7 @@ import ( // Services credentials. Instead, the identity of the caller is validated by using // a token from the web identity provider. For a comparison of // AssumeRoleWithWebIdentity with the other API operations that produce temporary -<<<<<<< HEAD // credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -======= -// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these @@ -49,11 +45,7 @@ import ( // DurationSeconds parameter to specify the duration of your session. You can // provide a value from 900 seconds (15 minutes) up to the maximum session duration // setting for the role. This setting can have a value from 1 hour to 12 hours. To -<<<<<<< HEAD // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide. -======= -// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The maximum session duration limit applies when you use the AssumeRole* API // operations or the assume-role* CLI commands. However the limit does not apply // when you use those operations to create a console URL. For more information, see @@ -119,50 +111,23 @@ import ( // that you avoid using any personally identifiable information (PII) in this // field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. // -<<<<<<< HEAD // For more information about how to use OIDC federation and the -======= -// For more information about how to use web identity federation and the ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AssumeRoleWithWebIdentity API, see the following resources: // // [Using Web Identity Federation API Operations for Mobile Apps] // - and [Federation Through a Web-based Identity Provider]. // -<<<<<<< HEAD -======= -// [Web Identity Federation Playground] -// - . Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then using -// those credentials to make a request to Amazon Web Services. -// ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Amazon Web Services SDK for iOS Developer Guide] // - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the // identity providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. // -<<<<<<< HEAD // [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ // [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -======= -// [Web Identity Federation with Mobile Applications] -// - . This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon S3. -// -// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ -// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session -// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/ -// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ -// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length -// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html // [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html @@ -172,11 +137,7 @@ import ( // [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -<<<<<<< HEAD // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration -======= -// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html // [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { @@ -198,7 +159,6 @@ type AssumeRoleWithWebIdentityInput struct { // The Amazon Resource Name (ARN) of the role that the caller is assuming. // -<<<<<<< HEAD // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles]. // The trust policies of these roles must accept the cognito-identity.amazonaws.com // service principal and must contain the cognito-identity.amazonaws.com:aud @@ -210,8 +170,6 @@ type AssumeRoleWithWebIdentityInput struct { // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. RoleArn *string @@ -221,39 +179,28 @@ type AssumeRoleWithWebIdentityInput struct { // associated with that user. This session name is included as part of the ARN and // assumed role ID in the AssumedRoleUser response element. // -<<<<<<< HEAD // For security purposes, administrators can view this field in [CloudTrail logs] to help identify // who performed an action in Amazon Web Services. Your administrator might require // that you specify your user name as the session name when you assume the role. // For more information, see [sts:RoleSessionName]sts:RoleSessionName . // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // -<<<<<<< HEAD // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This member is required. RoleSessionName *string // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the // identity provider. Your application must get this token by authenticating the // user who is using your application with a web identity provider before the -<<<<<<< HEAD // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token // must be formatted as either an integer or a long integer. Tokens must be signed // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or // ES512). -======= - // application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA - // algorithms (RS256) are supported. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // This member is required. WebIdentityToken *string @@ -295,11 +242,8 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // -<<<<<<< HEAD // For more information about role session permissions, see [Session policies]. // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext @@ -308,10 +252,7 @@ type AssumeRoleWithWebIdentityInput struct { // size limit. // // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -<<<<<<< HEAD // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to @@ -409,11 +350,7 @@ type AssumeRoleWithWebIdentityOutput struct { // of upper- and lower-case alphanumeric characters with no spaces. You can also // include underscores or any of the following characters: =,.@- // -<<<<<<< HEAD // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts -======= - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html SourceIdentity *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 6380eaaae2..e2ecc792ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -20,11 +20,7 @@ import ( // credentials of an IAM user. As a result, this call is appropriate in contexts // where those credentials can be safeguarded, usually in a server-based // application. For a comparison of GetFederationToken with the other API -<<<<<<< HEAD // operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -======= -// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Although it is possible to call GetFederationToken using the security // credentials of an Amazon Web Services account root user rather than an IAM user @@ -108,15 +104,9 @@ import ( // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html // [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken -<<<<<<< HEAD // [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -======= -// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison -// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index f0096ab3d0..fdc451117b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -22,11 +22,7 @@ import ( // the call returns, IAM users can then make programmatic calls to API operations // that require MFA authentication. An incorrect MFA code causes the API to return // an access denied error. For a comparison of GetSessionToken with the other API -<<<<<<< HEAD // operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -======= -// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // No permissions are required for users to perform this operation. The purpose of // the sts:GetSessionToken operation is to authenticate the user using MFA. You @@ -67,17 +63,10 @@ import ( // credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. // // [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html -<<<<<<< HEAD // [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken // [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials // [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html // [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -======= -// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison -// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken -// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { params = &GetSessionTokenInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 8ccfba7ba3..59349890f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -410,7 +410,6 @@ func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhtt } } -<<<<<<< HEAD type awsAwsquery_deserializeOpAssumeRoot struct { } @@ -526,8 +525,6 @@ func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, met } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { } @@ -2386,7 +2383,6 @@ func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **Assume return nil } -<<<<<<< HEAD func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -2442,8 +2438,6 @@ func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, dec return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index dcf8d7d773..70a88452ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -13,10 +13,7 @@ "api_op_AssumeRole.go", "api_op_AssumeRoleWithSAML.go", "api_op_AssumeRoleWithWebIdentity.go", -<<<<<<< HEAD "api_op_AssumeRoot.go", -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "api_op_DecodeAuthorizationMessage.go", "api_op_GetAccessKeyInfo.go", "api_op_GetCallerIdentity.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 6bf48e4be2..6db8f82fb5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,8 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.33.10" -======= -const goModuleVersion = "1.32.4" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index bb68d8fb95..4e07994d04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -176,12 +176,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-5", }: endpoints.Endpoint{}, endpoints.EndpointKey{ -<<<<<<< HEAD Region: "ap-southeast-7", }: endpoints.Endpoint{}, endpoints.EndpointKey{ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "aws-global", }: endpoints.Endpoint{ Hostname: "sts.amazonaws.com", @@ -229,12 +226,9 @@ var defaultPartitions = endpoints.Partitions{ Region: "me-south-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ -<<<<<<< HEAD Region: "mx-central-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Region: "sa-east-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go index 94e32462a2..96b222136b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -226,7 +226,6 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -<<<<<<< HEAD type awsAwsquery_serializeOpAssumeRoot struct { } @@ -297,8 +296,6 @@ func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, return next.HandleSerialize(ctx, in) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { } @@ -894,7 +891,6 @@ func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRole return nil } -<<<<<<< HEAD func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error { object := value.Object() _ = object @@ -919,8 +915,6 @@ func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value qu return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { object := value.Object() _ = object diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 76b5392d36..041629bba2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -95,13 +95,8 @@ func (e *IDPRejectedClaimException) ErrorCode() string { func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The error returned if the message passed to DecodeAuthorizationMessage was -<<<<<<< HEAD // invalid. This can happen if the token contains invalid characters, such as line // breaks, or if the message has expired. -======= -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type InvalidAuthorizationMessageException struct { Message *string @@ -223,17 +218,10 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM -<<<<<<< HEAD // console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM // User Guide. // // [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html -======= -// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM -// User Guide. -// -// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type RegionDisabledException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go index 723d773c9d..1026e22118 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -70,7 +70,6 @@ func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Conte return next.HandleInitialize(ctx, in) } -<<<<<<< HEAD type validateOpAssumeRoot struct { } @@ -91,8 +90,6 @@ func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middlewa return next.HandleInitialize(ctx, in) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type validateOpDecodeAuthorizationMessage struct { } @@ -165,13 +162,10 @@ func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) } -<<<<<<< HEAD func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) } @@ -284,7 +278,6 @@ func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) } } -<<<<<<< HEAD func validateOpAssumeRootInput(v *AssumeRootInput) error { if v == nil { return nil @@ -303,8 +296,6 @@ func validateOpAssumeRootInput(v *AssumeRootInput) error { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 4da18aa2c8..2945185b0b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -349,11 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s if cfg.hasSSOTokenProviderConfiguration() { skippedFiles = 0 for _, f := range files { -<<<<<<< HEAD section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)) -======= - section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ok { var ssoSession ssoSession ssoSession.setFromIniSection(section) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 4761481d6b..7ab65bae79 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,8 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -<<<<<<< HEAD const SDKVersion = "1.55.6" -======= -const SDKVersion = "1.55.5" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go index 8a54b52ebd..0c54d99494 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go @@ -3,10 +3,7 @@ package dynamodbattribute import ( "bytes" "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "reflect" "runtime" @@ -29,11 +26,7 @@ func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { -<<<<<<< HEAD err = errors.New(s) -======= - err = fmt.Errorf(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -81,11 +74,7 @@ func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (er if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { -<<<<<<< HEAD err = errors.New(s) -======= - err = fmt.Errorf(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -139,11 +128,7 @@ func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { -<<<<<<< HEAD err = errors.New(s) -======= - err = fmt.Errorf(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -192,11 +177,7 @@ func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { -<<<<<<< HEAD err = errors.New(s) -======= - err = fmt.Errorf(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -244,11 +225,7 @@ func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) { if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { -<<<<<<< HEAD err = errors.New(s) -======= - err = fmt.Errorf(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } @@ -278,11 +255,7 @@ func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) { if e, ok := r.(runtime.Error); ok { err = e } else if s, ok := r.(string); ok { -<<<<<<< HEAD err = errors.New(s) -======= - err = fmt.Errorf(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { err = r.(error) } diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index da44f788cf..de39171cf0 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # Release (2025-01-21) ## General Highlights @@ -18,8 +17,6 @@ * `github.com/aws/smithy-go`: v1.22.1 * **Bug Fix**: Fix failure to replace URI path segments when their names overlap. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Release (2024-10-03) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md index 106fea678a..1f8d01ff6a 100644 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -39,7 +39,6 @@ To send us a pull request, please: GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). -<<<<<<< HEAD ### Changelog Documents (You can SKIP this step if you are only changing the code generator, and not the runtime). @@ -71,8 +70,6 @@ These are used to generate the content `CHANGELOG.md` and Release Notes. The for * description: Description of this change. Most of the times is the same as the title of the PR * modules: which Go modules does this change impact. The root module is expressed as "." -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index ef77695f40..a3c2cf173d 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -98,7 +98,6 @@ module-version: ############## .PHONY: install-changelog -<<<<<<< HEAD external-changelog: mkdir -p .changelog cp changelog-template.json .changelog/00000000-0000-0000-0000-000000000000.json @@ -106,7 +105,5 @@ external-changelog: @echo "Make sure to rename the file with your new id, like .changelog/12345678-1234-1234-1234-123456789012.json" @echo "See CONTRIBUTING.md 'Changelog Documents' and an example at https://github.com/aws/smithy-go/pull/543/files" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) install-changelog: go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go index 7b4e035f15..9ae308540c 100644 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -22,7 +22,6 @@ func bufCap(b []byte, n int) []byte { // replacePathElement replaces a single element in the path []byte. // Escape is used to control whether the value will be escaped using Amazon path escape style. func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { -<<<<<<< HEAD // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error fieldBuf = bufCap(fieldBuf, len(key)+2) // { } fieldBuf = append(fieldBuf, uriTokenStart) @@ -45,38 +44,11 @@ func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([] encodeSep = false } end := start + len(fieldBuf) -======= - fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - - start := bytes.Index(path, fieldBuf) - end := start + len(fieldBuf) - if start < 0 || len(path[end:]) == 0 { - // TODO what to do about error? - return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) - } - - encodeSep := true - if path[end] == uriTokenSkip { - // '+' token means do not escape slashes - encodeSep = false - end++ - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if escape { val = EscapePath(val, encodeSep) } -<<<<<<< HEAD -======= - if path[end] != uriTokenStop { - return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) - } - end++ - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fieldBuf = bufCap(fieldBuf, len(val)) fieldBuf = append(fieldBuf, val...) diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index a5cf9948b1..a51ceca4ce 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,8 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -<<<<<<< HEAD const goModuleVersion = "1.22.2" -======= -const goModuleVersion = "1.22.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go index 8e7794c6cd..db9801bea5 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/host.go +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -69,11 +69,7 @@ func ValidPortNumber(port string) bool { return true } -<<<<<<< HEAD // ValidHostLabel returns whether the label is a valid RFC 3986 host label. -======= -// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go index f5757282cc..d1beaa595d 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/metrics.go +++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go @@ -5,10 +5,7 @@ import ( "crypto/tls" "net/http" "net/http/httptrace" -<<<<<<< HEAD "sync/atomic" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" "github.com/aws/smithy-go/metrics" @@ -46,17 +43,10 @@ type timedClientDo struct { } func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) { -<<<<<<< HEAD c.hm.doStart.Store(now()) resp, err := c.ClientDo.Do(r) c.hm.DoRequestDuration.Record(r.Context(), c.hm.doStart.Elapsed()) -======= - c.hm.doStart = now() - resp, err := c.ClientDo.Do(r) - - c.hm.DoRequestDuration.Record(r.Context(), elapsed(c.hm.doStart)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return resp, err } @@ -69,17 +59,10 @@ type httpMetrics struct { DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte -<<<<<<< HEAD doStart safeTime dnsStart safeTime connectStart safeTime tlsStart safeTime -======= - doStart time.Time - dnsStart time.Time - connectStart time.Time - tlsStart time.Time ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { @@ -133,7 +116,6 @@ func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { } func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) { -<<<<<<< HEAD m.dnsStart.Store(now()) } @@ -143,17 +125,6 @@ func (m *httpMetrics) ConnectStart(string, string) { func (m *httpMetrics) TLSHandshakeStart() { m.tlsStart.Store(now()) -======= - m.dnsStart = now() -} - -func (m *httpMetrics) ConnectStart(string, string) { - m.connectStart = now() -} - -func (m *httpMetrics) TLSHandshakeStart() { - m.tlsStart = now() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) { @@ -170,41 +141,25 @@ func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) { func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) { return func(httptrace.DNSDoneInfo) { -<<<<<<< HEAD m.DNSLookupDuration.Record(ctx, m.dnsStart.Elapsed()) -======= - m.DNSLookupDuration.Record(ctx, elapsed(m.dnsStart)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) { return func(string, string, error) { -<<<<<<< HEAD m.ConnectDuration.Record(ctx, m.connectStart.Elapsed()) -======= - m.ConnectDuration.Record(ctx, elapsed(m.connectStart)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) { return func(tls.ConnectionState, error) { -<<<<<<< HEAD m.TLSHandshakeDuration.Record(ctx, m.tlsStart.Elapsed()) -======= - m.TLSHandshakeDuration.Record(ctx, elapsed(m.tlsStart)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() { return func() { -<<<<<<< HEAD m.TimeToFirstByte.Record(ctx, m.doStart.Elapsed()) -======= - m.TimeToFirstByte.Record(ctx, elapsed(m.doStart)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -223,7 +178,6 @@ func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) { }) } -<<<<<<< HEAD type safeTime struct { atomic.Value // time.Time } @@ -240,10 +194,5 @@ func (st *safeTime) Load() time.Time { func (st *safeTime) Elapsed() float64 { end := now() elapsed := end.Sub(st.Load()) -======= -func elapsed(start time.Time) float64 { - end := now() - elapsed := end.Sub(start) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return float64(elapsed) / 1e9 } diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go index ab03f84c5d..914338f2e7 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -2,17 +2,10 @@ package http import ( "context" -<<<<<<< HEAD "io" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" -======= - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - "io" - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // AddErrorCloseResponseBodyMiddleware adds the middleware to automatically @@ -37,11 +30,7 @@ func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( if err != nil { if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { // Consume the full body to prevent TCP connection resets on some platforms -<<<<<<< HEAD _, _ = io.Copy(io.Discard, resp.Body) -======= - _, _ = io.Copy(ioutil.Discard, resp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Do not validate that the response closes successfully. resp.Body.Close() } @@ -75,11 +64,7 @@ func (m *closeResponseBody) HandleDeserialize( if resp, ok := out.RawResponse.(*Response); ok { // Consume the full body to prevent TCP connection resets on some platforms -<<<<<<< HEAD _, copyErr := io.Copy(io.Discard, resp.Body) -======= - _, copyErr := io.Copy(ioutil.Discard, resp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if copyErr != nil { middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") } diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go index 6f8fff497f..5cbf6f10ac 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -4,10 +4,6 @@ import ( "context" "fmt" "io" -<<<<<<< HEAD -======= - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strings" @@ -170,11 +166,7 @@ func (r *Request) Build(ctx context.Context) *http.Request { switch stream := r.stream.(type) { case *io.PipeReader: -<<<<<<< HEAD req.Body = io.NopCloser(stream) -======= - req.Body = ioutil.NopCloser(stream) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req.ContentLength = -1 default: // HTTP Client Request must only have a non-nil body if the @@ -182,11 +174,7 @@ func (r *Request) Build(ctx context.Context) *http.Request { // Client will interpret a non-nil body and ContentLength 0 as // "unknown". This is unwanted behavior. if req.ContentLength != 0 && r.stream != nil { -<<<<<<< HEAD req.Body = iointernal.NewSafeReadCloser(io.NopCloser(stream)) -======= - req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml index a5bb92c6b0..bc79b83961 100644 --- a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml +++ b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml @@ -39,27 +39,17 @@ linters: enable-all: true disable: - cyclop -<<<<<<< HEAD - depguard - dupl - dupword - exhaustruct - exportloopref -======= - - deadcode - - depguard - - dupl - - dupword - - exhaustivestruct - - exhaustruct ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - forbidigo - funlen - gci - gocognit - gocyclo - godox -<<<<<<< HEAD - mnd - lll - maintidx @@ -71,26 +61,6 @@ linters: - rowserrcheck - testpackage - tparallel -======= - - golint - - gomnd - - ifshort - - interfacer - - lll - - maintidx - - maligned - - nakedret - - nestif - - nlreturn - - nosnakecase - - paralleltest - - prealloc - - rowserrcheck - - scopelint - - structcheck - - testpackage - - varcheck ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - varnamelen - wastedassign @@ -98,8 +68,4 @@ issues: exclude-use-default: true max-issues-per-linter: 0 max-same-issues: 0 -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # vim: set sw=2 ts=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/vendor/github.com/bombsimon/wsl/v4/analyzer.go index 3048bf0db5..e51df89c6c 100644 --- a/vendor/github.com/bombsimon/wsl/v4/analyzer.go +++ b/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -3,10 +3,7 @@ package wsl import ( "flag" "go/ast" -<<<<<<< HEAD "go/token" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "golang.org/x/tools/go/analysis" @@ -82,7 +79,6 @@ func (wa *wslAnalyzer) flags() flag.FlagSet { func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { for _, file := range pass.Files { -<<<<<<< HEAD filename := getFilename(pass.Fset, file) if !strings.HasSuffix(filename, ".go") { continue @@ -94,14 +90,6 @@ func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { // The file is skipped if the "unadjusted" file is a Go file, and it's a generated file (ex: "_test.go" file). // The other non-Go files are skipped by the first 'if' with the adjusted position. if !wa.config.IncludeGenerated && ast.IsGenerated(file) && strings.HasSuffix(fn, ".go") { -======= - if !wa.config.IncludeGenerated && ast.IsGenerated(file) { - continue - } - - filename := pass.Fset.PositionFor(file.Pos(), false).Filename - if !strings.HasSuffix(filename, ".go") { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -145,11 +133,7 @@ type multiStringValue struct { // Set implements the flag.Value interface and will overwrite the pointer to the // slice with a new pointer after splitting the flag by comma. func (m *multiStringValue) Set(value string) error { -<<<<<<< HEAD var s []string -======= - s := []string{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, v := range strings.Split(value, ",") { s = append(s, strings.TrimSpace(v)) @@ -168,7 +152,6 @@ func (m *multiStringValue) String() string { return strings.Join(*m.slicePtr, ", ") } -<<<<<<< HEAD func getFilename(fset *token.FileSet, file *ast.File) string { filename := fset.PositionFor(file.Pos(), true).Filename @@ -178,5 +161,3 @@ func getFilename(fset *token.FileSet, file *ast.File) string { return filename } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/bombsimon/wsl/v4/wsl.go b/vendor/github.com/bombsimon/wsl/v4/wsl.go index 8ddd9f771d..44c7abe219 100644 --- a/vendor/github.com/bombsimon/wsl/v4/wsl.go +++ b/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -353,11 +353,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { return false } -<<<<<<< HEAD for j := range n { -======= - for j := 0; j < n; j++ { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s1 := statements[i+j] s2 := statements[i+j+1] @@ -1117,13 +1113,8 @@ func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne return } -<<<<<<< HEAD blockStartLine = p.fileSet.Position(blockStartPos).Line blockEndLine = p.fileSet.Position(blockEndPos).Line -======= - blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line - blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // No whitespace possible if LBrace and RBrace is on the same line. if blockStartLine == blockEndLine { @@ -1371,7 +1362,6 @@ func isExampleFunc(ident *ast.Ident) bool { } func (p *processor) nodeStart(node ast.Node) int { -<<<<<<< HEAD return p.fileSet.Position(node.Pos()).Line } @@ -1380,16 +1370,6 @@ func (p *processor) nodeEnd(node ast.Node) int { if isEmptyLabeledStmt(node) { return p.fileSet.Position(node.Pos()).Line -======= - return p.fileSet.PositionFor(node.Pos(), false).Line -} - -func (p *processor) nodeEnd(node ast.Node) int { - line := p.fileSet.PositionFor(node.End(), false).Line - - if isEmptyLabeledStmt(node) { - return p.fileSet.PositionFor(node.Pos(), false).Line ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return line @@ -1428,11 +1408,7 @@ func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string) } func (p *processor) addWarning(w string, pos token.Pos, t interface{}) { -<<<<<<< HEAD position := p.fileSet.Position(pos) -======= - position := p.fileSet.PositionFor(pos, false) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.warnings = append(p.warnings, fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), diff --git a/vendor/github.com/buildkite/agent/v3/api/artifacts.go b/vendor/github.com/buildkite/agent/v3/api/artifacts.go index 3d61432f9f..351345de9d 100644 --- a/vendor/github.com/buildkite/agent/v3/api/artifacts.go +++ b/vendor/github.com/buildkite/agent/v3/api/artifacts.go @@ -51,7 +51,6 @@ type Artifact struct { } type ArtifactBatch struct { -<<<<<<< HEAD ID string `json:"id"` Artifacts []*Artifact `json:"artifacts"` UploadDestination string `json:"upload_destination"` @@ -93,27 +92,6 @@ type ArtifactBatchCreateResponse struct { // uploads. It overrides InstructionTemplate and should not contain // interpolations. Map: artifact ID -> instructions for that artifact. PerArtifactInstructions map[string]*ArtifactUploadInstructions `json:"per_artifact_instructions"` -======= - ID string `json:"id"` - Artifacts []*Artifact `json:"artifacts"` - UploadDestination string `json:"upload_destination"` -} - -type ArtifactUploadInstructions struct { - Data map[string]string `json:"data"` - Action struct { - URL string `json:"url,omitempty"` - Method string `json:"method"` - Path string `json:"path"` - FileInput string `json:"file_input"` - } -} - -type ArtifactBatchCreateResponse struct { - ID string `json:"id"` - ArtifactIDs []string `json:"artifact_ids"` - UploadInstructions *ArtifactUploadInstructions `json:"upload_instructions"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ArtifactSearchOptions specifies the optional parameters to the @@ -126,7 +104,6 @@ type ArtifactSearchOptions struct { IncludeDuplicates bool `url:"include_duplicates,omitempty"` } -<<<<<<< HEAD // ArtifactState represents the state of a single artifact, when calling UpdateArtifacts. type ArtifactState struct { ID string `json:"id"` @@ -150,20 +127,6 @@ type ArtifactBatchUpdateRequest struct { // CreateArtifacts takes a slice of artifacts, and creates them on Buildkite as a batch. func (c *Client) CreateArtifacts(ctx context.Context, jobID string, batch *ArtifactBatch) (*ArtifactBatchCreateResponse, *Response, error) { u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobID)) -======= -type ArtifactBatchUpdateArtifact struct { - ID string `json:"id"` - State string `json:"state"` -} - -type ArtifactBatchUpdateRequest struct { - Artifacts []*ArtifactBatchUpdateArtifact `json:"artifacts"` -} - -// CreateArtifacts takes a slice of artifacts, and creates them on Buildkite as a batch. -func (c *Client) CreateArtifacts(ctx context.Context, jobId string, batch *ArtifactBatch) (*ArtifactBatchCreateResponse, *Response, error) { - u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobId)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req, err := c.newRequest(ctx, "POST", u, batch) if err != nil { @@ -179,21 +142,11 @@ func (c *Client) CreateArtifacts(ctx context.Context, jobId string, batch *Artif return createResponse, resp, err } -<<<<<<< HEAD // UpdateArtifacts updates Buildkite with one or more artifact states. func (c *Client) UpdateArtifacts(ctx context.Context, jobID string, artifactStates []ArtifactState) (*Response, error) { u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobID)) payload := ArtifactBatchUpdateRequest{ Artifacts: artifactStates, -======= -// Updates a particular artifact -func (c *Client) UpdateArtifacts(ctx context.Context, jobId string, artifactStates map[string]string) (*Response, error) { - u := fmt.Sprintf("jobs/%s/artifacts", railsPathEscape(jobId)) - payload := ArtifactBatchUpdateRequest{} - - for id, state := range artifactStates { - payload.Artifacts = append(payload.Artifacts, &ArtifactBatchUpdateArtifact{id, state}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } req, err := c.newRequest(ctx, "PUT", u, payload) @@ -201,26 +154,12 @@ func (c *Client) UpdateArtifacts(ctx context.Context, jobId string, artifactStat return nil, err } -<<<<<<< HEAD return c.doRequest(req, nil) } // SearchArtifacts searches Buildkite for a set of artifacts func (c *Client) SearchArtifacts(ctx context.Context, buildID string, opt *ArtifactSearchOptions) ([]*Artifact, *Response, error) { u := fmt.Sprintf("builds/%s/artifacts/search", railsPathEscape(buildID)) -======= - resp, err := c.doRequest(req, nil) - if err != nil { - return resp, err - } - - return resp, err -} - -// SearchArtifacts searches Buildkite for a set of artifacts -func (c *Client) SearchArtifacts(ctx context.Context, buildId string, opt *ArtifactSearchOptions) ([]*Artifact, *Response, error) { - u := fmt.Sprintf("builds/%s/artifacts/search", railsPathEscape(buildId)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) u, err := addOptions(u, opt) if err != nil { return nil, nil, err diff --git a/vendor/github.com/buildkite/agent/v3/api/auth.go b/vendor/github.com/buildkite/agent/v3/api/auth.go deleted file mode 100644 index 1fb28da103..0000000000 --- a/vendor/github.com/buildkite/agent/v3/api/auth.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "fmt" - "net/http" -) - -type canceler interface { - CancelRequest(*http.Request) -} - -// authenticatedTransport manages injection of the API token -type authenticatedTransport struct { - // The Token used for authentication. This can either the be - // organizations registration token, or the agents access token. - Token string - - // Delegate is the underlying HTTP transport - Delegate http.RoundTripper -} - -// RoundTrip invoked each time a request is made -func (t authenticatedTransport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Token == "" { - return nil, fmt.Errorf("Invalid token, empty string supplied") - } - - req.Header.Set("Authorization", fmt.Sprintf("Token %s", t.Token)) - - return t.Delegate.RoundTrip(req) -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *authenticatedTransport) CancelRequest(req *http.Request) { - cancelableTransport := t.Delegate.(canceler) - cancelableTransport.CancelRequest(req) -} diff --git a/vendor/github.com/buildkite/agent/v3/api/client.go b/vendor/github.com/buildkite/agent/v3/api/client.go index fd0a336949..9803437b2d 100644 --- a/vendor/github.com/buildkite/agent/v3/api/client.go +++ b/vendor/github.com/buildkite/agent/v3/api/client.go @@ -11,20 +11,13 @@ import ( "fmt" "io" "net/http" -<<<<<<< HEAD -======= - "net/http/httputil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/url" "reflect" "strconv" "strings" "time" -<<<<<<< HEAD "github.com/buildkite/agent/v3/internal/agenthttp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/buildkite/agent/v3/logger" "github.com/google/go-querystring/query" ) @@ -52,12 +45,9 @@ type Config struct { // If true, requests and responses will be dumped and set to the logger DebugHTTP bool -<<<<<<< HEAD // If true timings for each request will be logged TraceHTTP bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The http client used, leave nil for the default HTTPClient *http.Client @@ -87,55 +77,22 @@ func NewClient(l logger.Logger, conf Config) *Client { conf.UserAgent = defaultUserAgent } -<<<<<<< HEAD if conf.HTTPClient != nil { return &Client{ logger: l, client: conf.HTTPClient, conf: conf, -======= - httpClient := conf.HTTPClient - if conf.HTTPClient == nil { - - // use the default transport as it is optimized and configured for http2 - // and will avoid accidents in the future - tr := http.DefaultTransport.(*http.Transport).Clone() - - if conf.DisableHTTP2 { - tr.ForceAttemptHTTP2 = false - tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) - // The default TLSClientConfig has h2 in NextProtos, so the negotiated TLS connection will assume h2 support. - // see https://github.com/golang/go/issues/50571 - tr.TLSClientConfig.NextProtos = []string{"http/1.1"} - } - - if conf.TLSConfig != nil { - tr.TLSClientConfig = conf.TLSConfig - } - - httpClient = &http.Client{ - Timeout: 60 * time.Second, - Transport: &authenticatedTransport{ - Token: conf.Token, - Delegate: tr, - }, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return &Client{ logger: l, -<<<<<<< HEAD client: agenthttp.NewClient( agenthttp.WithAuthToken(conf.Token), agenthttp.WithAllowHTTP2(!conf.DisableHTTP2), agenthttp.WithTLSConfig(conf.TLSConfig), ), conf: conf, -======= - client: httpClient, - conf: conf, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -262,7 +219,6 @@ func newResponse(r *http.Response) *Response { // interface, the raw response body will be written to v, without attempting to // first decode it. func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { -<<<<<<< HEAD resp, err := agenthttp.Do(c.logger, c.client, req, agenthttp.WithDebugHTTP(c.conf.DebugHTTP), @@ -271,64 +227,12 @@ func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { if err != nil { return nil, err } -======= - var err error - - if c.conf.DebugHTTP { - // If the request is a multi-part form, then it's probably a - // file upload, in which case we don't want to spewing out the - // file contents into the debug log (especially if it's been - // gzipped) - var requestDump []byte - if strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") { - requestDump, err = httputil.DumpRequestOut(req, false) - } else { - requestDump, err = httputil.DumpRequestOut(req, true) - } - - if err != nil { - c.logger.Debug("ERR: %s\n%s", err, string(requestDump)) - } else { - c.logger.Debug("%s", string(requestDump)) - } - } - - ts := time.Now() - - c.logger.Debug("%s %s", req.Method, req.URL) - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - - c.logger.WithFields( - logger.StringField("proto", resp.Proto), - logger.IntField("status", resp.StatusCode), - logger.DurationField("Δ", time.Since(ts)), - ).Debug("↳ %s %s", req.Method, req.URL) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defer resp.Body.Close() defer io.Copy(io.Discard, resp.Body) response := newResponse(resp) -<<<<<<< HEAD if err := checkResponse(resp); err != nil { -======= - if c.conf.DebugHTTP { - responseDump, err := httputil.DumpResponse(resp, true) - if err != nil { - c.logger.Debug("\nERR: %s\n%s", err, string(responseDump)) - } else { - c.logger.Debug("\n%s", string(responseDump)) - } - } - - err = checkResponse(resp) - if err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // even though there was an error, we still return the response // in case the caller wants to inspect it further return response, err @@ -348,11 +252,7 @@ func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { } } -<<<<<<< HEAD return response, nil -======= - return response, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ErrorResponse provides a message. diff --git a/vendor/github.com/buildkite/agent/v3/api/oidc.go b/vendor/github.com/buildkite/agent/v3/api/oidc.go index 8a54f3947c..b28378894e 100644 --- a/vendor/github.com/buildkite/agent/v3/api/oidc.go +++ b/vendor/github.com/buildkite/agent/v3/api/oidc.go @@ -10,23 +10,15 @@ type OIDCToken struct { } type OIDCTokenRequest struct { -<<<<<<< HEAD Job string Audience string Lifetime int Claims []string AWSSessionTags []string -======= - Job string - Audience string - Lifetime int - Claims []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*OIDCToken, *Response, error) { m := &struct { -<<<<<<< HEAD Audience string `json:"audience,omitempty"` Lifetime int `json:"lifetime,omitempty"` Claims []string `json:"claims,omitempty"` @@ -36,15 +28,6 @@ func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*O Lifetime: methodReq.Lifetime, Claims: methodReq.Claims, AWSSessionTags: methodReq.AWSSessionTags, -======= - Audience string `json:"audience,omitempty"` - Lifetime int `json:"lifetime,omitempty"` - Claims []string `json:"claims,omitempty"` - }{ - Audience: methodReq.Audience, - Lifetime: methodReq.Lifetime, - Claims: methodReq.Claims, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } u := fmt.Sprintf("jobs/%s/oidc/tokens", railsPathEscape(methodReq.Job)) diff --git a/vendor/github.com/buildkite/agent/v3/api/steps.go b/vendor/github.com/buildkite/agent/v3/api/steps.go index e0c0712a52..dcf758478b 100644 --- a/vendor/github.com/buildkite/agent/v3/api/steps.go +++ b/vendor/github.com/buildkite/agent/v3/api/steps.go @@ -54,7 +54,6 @@ func (c *Client) StepUpdate(ctx context.Context, stepIdOrKey string, stepUpdate return c.doRequest(req, nil) } -<<<<<<< HEAD type StepCancel struct { Build string `json:"build_id"` @@ -83,5 +82,3 @@ func (c *Client) StepCancel(ctx context.Context, stepIdOrKey string, stepCancel return stepCancelResponse, resp, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/buildkite/agent/v3/version/VERSION b/vendor/github.com/buildkite/agent/v3/version/VERSION index dde8778627..7ca8b9836e 100644 --- a/vendor/github.com/buildkite/agent/v3/version/VERSION +++ b/vendor/github.com/buildkite/agent/v3/version/VERSION @@ -1,5 +1 @@ -<<<<<<< HEAD 3.91.0 -======= -3.81.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/buildkite/agent/v3/version/version.go b/vendor/github.com/buildkite/agent/v3/version/version.go index 0b34eea618..ac515e225a 100644 --- a/vendor/github.com/buildkite/agent/v3/version/version.go +++ b/vendor/github.com/buildkite/agent/v3/version/version.go @@ -38,13 +38,10 @@ func BuildNumber() string { return buildNumber } -<<<<<<< HEAD func IsDevelopmentBuild() bool { return buildNumber == "x" } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // commitInfo returns a string consisting of the commit hash and whether the the build was made in a // `dirty` working directory or not. A dirty working directory is one that has uncommitted changes // to files that git would track. diff --git a/vendor/github.com/buildkite/go-pipeline/README.md b/vendor/github.com/buildkite/go-pipeline/README.md index 17fb550fd7..0d7a10ea24 100644 --- a/vendor/github.com/buildkite/go-pipeline/README.md +++ b/vendor/github.com/buildkite/go-pipeline/README.md @@ -3,11 +3,7 @@ [![Build status](https://badge.buildkite.com/1fad7fb9610283e4955ea4ec4c88faca52162b637fea61821e.svg)](https://buildkite.com/buildkite/go-pipeline) [![Go Reference](https://pkg.go.dev/badge/github.com/buildkite/go-pipeline.svg)](https://pkg.go.dev/github.com/buildkite/go-pipeline) -<<<<<<< HEAD `go-pipeline` is a Go library used for building and modifying Buildkite pipelines in golang. It's used internally by the [Buildkite Agent](https://github.com/buildkite/agent) to inspect and sign pipelines prior to uploading them, but is also useful for building tools that generate pipelines. -======= -`go-pipeline` is a Go libary used for building and modifying Buildkite pipelines in golang. It's used internally by the [Buildkite Agent](https://github.com/buildkite/agent) to inspect and sign pipelines prior to uploading them, but is also useful for building tools that generate pipelines. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Installation diff --git a/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go b/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go index 2746c08b55..d528f348d2 100644 --- a/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go +++ b/vendor/github.com/buildkite/go-pipeline/ordered/unmarshal.go @@ -16,10 +16,7 @@ import ( var ( ErrIntoNonPointer = errors.New("cannot unmarshal into non-pointer") ErrIntoNil = errors.New("cannot unmarshal into nil") -<<<<<<< HEAD ErrNotSettable = errors.New("target value not settable") -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ErrIncompatibleTypes = errors.New("incompatible types") ErrUnsupportedSrc = errors.New("cannot unmarshal from src") ErrMultipleInlineFields = errors.New(`multiple fields tagged with yaml:",inline"`) @@ -167,7 +164,6 @@ func Unmarshal(src, dst any) error { if sdst.Kind() != reflect.Slice { return fmt.Errorf("%w: cannot unmarshal []any into %T", ErrIncompatibleTypes, dst) } -<<<<<<< HEAD stype := sdst.Type() // stype = []E = the type of the slice etype := stype.Elem() // etype = E = Type of the slice's elements if sdst.IsNil() { @@ -178,12 +174,6 @@ func Unmarshal(src, dst any) error { var warns []error for i, a := range tsrc { x := reflect.New(etype) // x := new(E) (type *E) -======= - etype := sdst.Type().Elem() // E = Type of the slice's elements - var warns []error - for i, a := range tsrc { - x := reflect.New(etype) // *E ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err := Unmarshal(a, x.Interface()) if w := warning.As(err); w != nil { warns = append(warns, w.Wrapf("while unmarshaling item at index %d of %d", i, len(tsrc))) @@ -253,15 +243,11 @@ func (m *Map[K, V]) decodeInto(target any) error { if !ok { return fmt.Errorf("%w: cannot unmarshal from %T, want K=string, V=any", ErrIncompatibleTypes, m) } -<<<<<<< HEAD // Note: m, and therefore tm, can be nil at this moment. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Work out the kind of target being used. // Dereference the target to find the inner value, if needed. targetValue := reflect.ValueOf(target) -<<<<<<< HEAD switch targetValue.Kind() { case reflect.Pointer: // Passed a pointer to something. @@ -282,45 +268,20 @@ func (m *Map[K, V]) decodeInto(target any) error { case reflect.Map: // Continue below. -======= - var innerValue reflect.Value - switch targetValue.Kind() { - case reflect.Pointer: - // Passed a pointer to something. - if targetValue.IsNil() { - return ErrIntoNil - } - innerValue = targetValue.Elem() - - case reflect.Map: - // Passed a map directly. - innerValue = targetValue - if innerValue.IsNil() { - return ErrIntoNil - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return fmt.Errorf("%w: cannot unmarshal %T into %T, want map or *struct{...}", ErrIncompatibleTypes, m, target) } -<<<<<<< HEAD switch targetValue.Kind() { case reflect.Map: // Process the map directly. mapType := targetValue.Type() -======= - switch innerValue.Kind() { - case reflect.Map: - // Process the map directly. - mapType := innerValue.Type() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // For simplicity, require the key type to be string. if keyType := mapType.Key(); keyType.Kind() != reflect.String { return fmt.Errorf("%w for map key: cannot unmarshal %T into %T", ErrIncompatibleTypes, m, target) } -<<<<<<< HEAD // If tm is nil, then set the target to nil. if tm == nil { if targetValue.IsNil() { @@ -339,11 +300,6 @@ func (m *Map[K, V]) decodeInto(target any) error { return ErrNotSettable } targetValue.Set(reflect.MakeMapWithSize(mapType, tm.Len())) -======= - // If target is a pointer to a nil map (with type), create a new map. - if innerValue.IsNil() { - innerValue.Set(reflect.MakeMapWithSize(mapType, tm.Len())) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } valueType := mapType.Elem() @@ -357,11 +313,7 @@ func (m *Map[K, V]) decodeInto(target any) error { return fmt.Errorf("unmarshaling value for key %q: %w", k, err) } -<<<<<<< HEAD targetValue.SetMapIndex(reflect.ValueOf(k), nv.Elem()) -======= - innerValue.SetMapIndex(reflect.ValueOf(k), nv.Elem()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil }); err != nil { return err @@ -376,11 +328,7 @@ func (m *Map[K, V]) decodeInto(target any) error { // These are the (accessible by reflection) fields it has. // This includes non-exported fields. -<<<<<<< HEAD fields := reflect.VisibleFields(targetValue.Type()) -======= - fields := reflect.VisibleFields(innerValue.Type()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var inlineField reflect.StructField outlineKeys := make(map[string]struct{}) @@ -442,11 +390,7 @@ func (m *Map[K, V]) decodeInto(target any) error { // Now load value into the field recursively. // Get a pointer to the field. This works because target is a pointer. -<<<<<<< HEAD ptrToField := targetValue.FieldByIndex(field.Index).Addr() -======= - ptrToField := innerValue.FieldByIndex(field.Index).Addr() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) err := Unmarshal(value, ptrToField.Interface()) if w := warning.As(err); w != nil { warns = append(warns, w.Wrapf("while unmarshaling the value for key %q into struct field %q", key, field.Name)) @@ -461,11 +405,7 @@ func (m *Map[K, V]) decodeInto(target any) error { // The rest is handling the ",inline" field. // We support any field that Unmarshal can unmarshal tm into. -<<<<<<< HEAD inlinePtr := targetValue.FieldByIndex(inlineField.Index).Addr() -======= - inlinePtr := innerValue.FieldByIndex(inlineField.Index).Addr() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copy all values that weren't non-inline fields into a temporary map. // This is just to avoid mutating tm. diff --git a/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go b/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go index e4e4f3e7fe..ba75e74f18 100644 --- a/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go +++ b/vendor/github.com/buildkite/go-pipeline/step_command_matrix.go @@ -147,13 +147,9 @@ func (m *Matrix) validatePermutation(p MatrixPermutation) error { // Check that the dimensions in the permutation are unique and defined in // the matrix setup. for dim := range p { -<<<<<<< HEAD // An empty but non-nil setup dimension is valid (all values may be // given by adjustment tuples). if m.Setup[dim] == nil { -======= - if len(m.Setup[dim]) == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("%w: %q", errPermutationUnknownDimension, dim) } } @@ -186,13 +182,9 @@ func (m *Matrix) validatePermutation(p MatrixPermutation) error { return fmt.Errorf("%w: %d != %d", errAdjustmentLengthMismatch, len(adj.With), len(m.Setup)) } for dim := range adj.With { -<<<<<<< HEAD // An empty but non-nil setup dimension is valid (all values may be // given by adjustment tuples). if m.Setup[dim] == nil { -======= - if len(m.Setup[dim]) == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("%w: %q", errAdjustmentUnknownDimension, dim) } } diff --git a/vendor/github.com/buildkite/interpolate/interpolate.go b/vendor/github.com/buildkite/interpolate/interpolate.go index 69de810f3c..307956491b 100644 --- a/vendor/github.com/buildkite/interpolate/interpolate.go +++ b/vendor/github.com/buildkite/interpolate/interpolate.go @@ -1,13 +1,8 @@ package interpolate import ( -<<<<<<< HEAD "fmt" "strings" -======= - "bytes" - "fmt" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Interpolate takes a set of environment and interpolates it into the provided string using shell script expansions @@ -33,7 +28,6 @@ func Identifiers(str string) ([]string, error) { // An expansion is something that takes in ENV and returns a string or an error type Expansion interface { -<<<<<<< HEAD // Expand expands the expansion using variables from env. Expand(env Env) (string, error) @@ -41,9 +35,6 @@ type Expansion interface { // Escaped expansions do something special and return identifiers // (starting with $) that *would* become referenced after a round of // unescaping. -======= - Expand(env Env) (string, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Identifiers() []string } @@ -99,7 +90,6 @@ func (e UnsetValueExpansion) Expand(env Env) (string, error) { // EscapedExpansion is an expansion that is delayed until later on (usually by a later process) type EscapedExpansion struct { -<<<<<<< HEAD // PotentialIdentifier is an identifier for the purpose of Identifiers, // but not for the purpose of Expand. PotentialIdentifier string @@ -111,17 +101,6 @@ func (e EscapedExpansion) Identifiers() []string { func (e EscapedExpansion) Expand(Env) (string, error) { return "$", nil -======= - Identifier string -} - -func (e EscapedExpansion) Identifiers() []string { - return []string{"$" + e.Identifier} -} - -func (e EscapedExpansion) Expand(Env) (string, error) { - return "$" + e.Identifier, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SubstringExpansion returns a substring (or slice) of the env @@ -222,11 +201,7 @@ func (e Expression) Identifiers() []string { } func (e Expression) Expand(env Env) (string, error) { -<<<<<<< HEAD var buf strings.Builder -======= - buf := &bytes.Buffer{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, item := range e { if item.Expansion != nil { @@ -234,15 +209,9 @@ func (e Expression) Expand(env Env) (string, error) { if err != nil { return "", err } -<<<<<<< HEAD buf.WriteString(result) } else { buf.WriteString(item.Text) -======= - _, _ = buf.WriteString(result) - } else { - _, _ = buf.WriteString(item.Text) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/buildkite/interpolate/parser.go b/vendor/github.com/buildkite/interpolate/parser.go index f5f6207f47..e28511b23e 100644 --- a/vendor/github.com/buildkite/interpolate/parser.go +++ b/vendor/github.com/buildkite/interpolate/parser.go @@ -87,11 +87,7 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { return nil, err } -<<<<<<< HEAD expr = append(expr, ExpressionItem{Expansion: ee}) -======= - expr = append(expr, ee) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -104,20 +100,12 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { // If we run into a dollar sign and it's not the last char, it's an expansion if c == '$' && p.pos < (len(p.input)-1) { -<<<<<<< HEAD expressionItem, err := p.parseExpansion() if err != nil { return nil, err } expr = append(expr, expressionItem) -======= - expansion, err := p.parseExpansion() - if err != nil { - return nil, err - } - expr = append(expr, ExpressionItem{Expansion: expansion}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -135,7 +123,6 @@ func (p *Parser) parseExpression(stop ...rune) (Expression, error) { return expr, nil } -<<<<<<< HEAD // parseEscapedExpansion attempts to extract a *potential* identifier or brace // expression from the text following the escaped dollarsign. func (p *Parser) parseEscapedExpansion() (EscapedExpansion, error) { @@ -204,49 +191,6 @@ func (p *Parser) parseExpansion() (ExpressionItem, error) { return ExpressionItem{Expansion: VariableExpansion{ Identifier: identifier, }}, nil -======= -func (p *Parser) parseEscapedExpansion() (ExpressionItem, error) { - next := p.peekRune() - switch { - case next == '{': - // if it's an escaped brace expansion, (eg $${MY_COOL_VAR:-5}) consume text until the close brace - id := p.scanUntil(func(r rune) bool { return r == '}' }) - id = id + string(p.nextRune()) // we know that the next rune is a close brace, chuck it on the end - return ExpressionItem{Expansion: EscapedExpansion{Identifier: id}}, nil - - case unicode.IsLetter(next): - // it's an escaped identifier (eg $$MY_COOL_VAR) - id, err := p.scanIdentifier() - if err != nil { - return ExpressionItem{}, err - } - - return ExpressionItem{Expansion: EscapedExpansion{Identifier: id}}, nil - - default: - // there's no identifier or brace afterward, so it's probably a literal escaped dollar sign - // just return a text item with the dollar sign - return ExpressionItem{Text: "$"}, nil - } -} - -func (p *Parser) parseExpansion() (Expansion, error) { - if c := p.nextRune(); c != '$' { - return nil, fmt.Errorf("Expected expansion to start with $, got %c", c) - } - - // if we have an open brace, this is a brace expansion - if c := p.peekRune(); c == '{' { - return p.parseBraceExpansion() - } - - identifier, err := p.scanIdentifier() - if err != nil { - return nil, err - } - - return VariableExpansion{Identifier: identifier}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (p *Parser) parseBraceExpansion() (Expansion, error) { @@ -261,13 +205,9 @@ func (p *Parser) parseBraceExpansion() (Expansion, error) { if c := p.peekRune(); c == '}' { _ = p.nextRune() -<<<<<<< HEAD return VariableExpansion{ Identifier: identifier, }, nil -======= - return VariableExpansion{Identifier: identifier}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var operator string @@ -388,13 +328,8 @@ func (p *Parser) scanIdentifier() (string, error) { if c := p.peekRune(); !unicode.IsLetter(c) { return "", fmt.Errorf("Expected identifier to start with a letter, got %c", c) } -<<<<<<< HEAD notIdentifierChar := func(r rune) bool { return !(unicode.IsLetter(r) || unicode.IsNumber(r) || r == '_') -======= - var notIdentifierChar = func(r rune) bool { - return (!unicode.IsLetter(r) && !unicode.IsNumber(r) && r != '_') ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return p.scanUntil(notIdentifierChar), nil } diff --git a/vendor/github.com/buildkite/roko/retrier.go b/vendor/github.com/buildkite/roko/retrier.go index 7c0a699fab..17213ab43d 100644 --- a/vendor/github.com/buildkite/roko/retrier.go +++ b/vendor/github.com/buildkite/roko/retrier.go @@ -10,20 +10,13 @@ import ( var defaultRandom = rand.New(rand.NewSource(time.Now().UnixNano())) -<<<<<<< HEAD const defaultJitterInterval = 1000 * time.Millisecond -======= -const jitterInterval = 1000 * time.Millisecond ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Retrier struct { maxAttempts int attemptCount int jitter bool -<<<<<<< HEAD jitterRange jitterRange -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) forever bool rand *rand.Rand @@ -32,17 +25,11 @@ type Retrier struct { intervalCalculator Strategy strategyType string -<<<<<<< HEAD nextInterval time.Duration } type jitterRange struct{ min, max time.Duration } -======= - manualInterval *time.Duration -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Strategy func(*Retrier) time.Duration const ( @@ -135,7 +122,6 @@ func WithStrategy(strategy Strategy, strategyType string) retrierOpt { func WithJitter() retrierOpt { return func(r *Retrier) { r.jitter = true -<<<<<<< HEAD r.jitterRange = jitterRange{min: 0, max: defaultJitterInterval} } } @@ -156,8 +142,6 @@ func WithJitterRange(min, max time.Duration) retrierOpt { min: min, max: max, } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -200,11 +184,7 @@ func NewRetrier(opts ...retrierOpt) *Retrier { oldJitter := r.jitter r.jitter = false // Temporarily turn off jitter while we check if the interval is 0 -<<<<<<< HEAD if r.forever && r.strategyType == constantStrategy && r.intervalCalculator(r) == 0 { -======= - if r.forever && r.strategyType == constantStrategy && r.NextInterval() == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic("retriers using the constant strategy that run forever must have an interval") } r.jitter = oldJitter // and now set it back to what it was previously @@ -212,24 +192,16 @@ func NewRetrier(opts ...retrierOpt) *Retrier { return r } -<<<<<<< HEAD // Jitter returns a duration in the interval in the range [0, r.jitterRange.max - r.jitterRange.min). When no jitter range // is defined, the default range is [0, 1 second). The jitter is recalculated for each retry. // If jitter is disabled, this method will always return 0. -======= -// Jitter returns a duration in the interval (0, 1] s if jitter is enabled, or 0 s if it's not ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (r *Retrier) Jitter() time.Duration { if !r.jitter { return 0 } -<<<<<<< HEAD min, max := float64(r.jitterRange.min), float64(r.jitterRange.max) return time.Duration(min + (max-min)*rand.Float64()) -======= - return time.Duration((1.0 - r.rand.Float64()) * float64(jitterInterval)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MarkAttempt increments the attempt count for the retrier. This affects ShouldGiveUp, and also affects the retry interval @@ -245,11 +217,7 @@ func (r *Retrier) Break() { // SetNextInterval overrides the strategy for the interval before the next try func (r *Retrier) SetNextInterval(d time.Duration) { -<<<<<<< HEAD r.nextInterval = d -======= - r.manualInterval = &d ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ShouldGiveUp returns whether the retrier should stop trying do do the thing it's been asked to do @@ -267,20 +235,9 @@ func (r *Retrier) ShouldGiveUp() bool { return r.attemptCount >= r.maxAttempts } -<<<<<<< HEAD // NextInterval returns the length of time that the retrier will wait before the next retry func (r *Retrier) NextInterval() time.Duration { return r.nextInterval -======= -// NextInterval returns the next interval that the retrier will use. Behind the scenes, it calls the function generated -// by either retrier's strategy -func (r *Retrier) NextInterval() time.Duration { - if r.manualInterval != nil { - return *r.manualInterval - } - - return r.intervalCalculator(r) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *Retrier) String() string { @@ -296,14 +253,8 @@ func (r *Retrier) String() string { return str } -<<<<<<< HEAD if r.nextInterval > 0 { str = str + fmt.Sprintf(" Retrying in %s", r.nextInterval) -======= - nextInterval := r.NextInterval() - if nextInterval > 0 { - str = str + fmt.Sprintf(" Retrying in %s", nextInterval) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { str = str + " Retrying immediately" } @@ -325,31 +276,16 @@ func (r *Retrier) Do(callback func(*Retrier) error) error { // DoWithContext is a context-aware variant of Do. func (r *Retrier) DoWithContext(ctx context.Context, callback func(*Retrier) error) error { for { -<<<<<<< HEAD // Calculate the next interval before we do work - this way, the calls to r.NextInterval() in the callback will be // accurate and include the calculated jitter, if present r.nextInterval = r.intervalCalculator(r) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Perform the action the user has requested we retry err := callback(r) if err == nil { return nil } -<<<<<<< HEAD -======= - // Calculate the next interval before we increment the attempt count - // In the exponential case, if we didn't do this, we'd skip the first interval - // ie, we would wait 2^1, 2^2, 2^3, ..., 2^n+1 seconds (bad) - // instead of 2^0, 2^1, 2^2, ..., 2^n seconds (good) - nextInterval := r.NextInterval() - - // Reset the manualInterval now that the nextInterval has been acquired. - r.manualInterval = nil - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.MarkAttempt() // If the last callback called r.Break(), or if we've hit our call limit, bail out and return the last error we got @@ -357,11 +293,7 @@ func (r *Retrier) DoWithContext(ctx context.Context, callback func(*Retrier) err return err } -<<<<<<< HEAD if err := r.sleepOrDone(ctx, r.nextInterval); err != nil { -======= - if err := r.sleepOrDone(ctx, nextInterval); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } } diff --git a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go index ed272661dc..ebf2a0dbea 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go @@ -8,21 +8,12 @@ import ( "strings" "sync" -<<<<<<< HEAD "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "github.com/butuzov/ireturn/analyzer/internal/config" "github.com/butuzov/ireturn/analyzer/internal/types" -======= - "github.com/butuzov/ireturn/analyzer/internal/config" - "github.com/butuzov/ireturn/analyzer/internal/types" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const name string = "ireturn" // linter name @@ -32,19 +23,11 @@ type validator interface { } type analyzer struct { -<<<<<<< HEAD once sync.Once mu sync.RWMutex handler validator err error disabledNolint bool -======= - once sync.Once - mu sync.RWMutex - handler validator - err error - diabledNolint bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) found []analysis.Diagnostic } @@ -80,11 +63,7 @@ func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { } // 003. Is it allowed to be checked? -<<<<<<< HEAD if !a.disabledNolint && hasDisallowDirective(f.Doc) { -======= - if !a.diabledNolint && hasDisallowDirective(f.Doc) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -136,11 +115,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { // First: checking nonolint directive val := fs.Lookup("nonolint") if val != nil { -<<<<<<< HEAD a.disabledNolint = fs.Lookup("nonolint").Value.String() == "true" -======= - a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Second: validators implementation next @@ -153,11 +128,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { } func NewAnalyzer() *analysis.Analyzer { -<<<<<<< HEAD a := analyzer{} -======= - a := analyzer{} //nolint: exhaustivestruct ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &analysis.Analyzer{ Name: name, @@ -225,11 +196,7 @@ func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{} typeParams := val.String() prefix, suffix := "interface{", "}" -<<<<<<< HEAD if strings.HasPrefix(typeParams, prefix) { //nolint:gosimple -======= - if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) typeParams = typeParams[len(prefix):] } if strings.HasSuffix(typeParams, suffix) { diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go index ed8e3eb8c3..da101c7862 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go @@ -2,11 +2,7 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" -<<<<<<< HEAD // allowConfig specifies a list of interfaces (keywords, patterns and regular expressions) -======= -// allowConfig specifies a list of interfaces (keywords, patters and regular expressions) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // that are allowed by ireturn as valid to return, any non listed interface are rejected. type allowConfig struct { *defaultConfig diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go index 0d0d9f4503..d6914af862 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go @@ -10,10 +10,6 @@ import ( var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time") -<<<<<<< HEAD -======= -// nolint: exhaustivestruct ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func DefaultValidatorConfig() *allowConfig { return allowAll([]string{ types.NameEmpty, // "empty": empty interfaces (interface{}) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go index 7cb9f3bfae..b2cde910ce 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go @@ -2,11 +2,7 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" -<<<<<<< HEAD // rejectConfig specifies a list of interfaces (keywords, patterns and regular expressions) -======= -// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // that are rejected by ireturn as valid to return, any non listed interface are allowed. type rejectConfig struct { *defaultConfig diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go index 52463de983..0f4286515f 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go @@ -47,11 +47,7 @@ func (i IFace) HashString() string { } func (i IFace) ExportDiagnostic() analysis.Diagnostic { -<<<<<<< HEAD return analysis.Diagnostic{ -======= - return analysis.Diagnostic{ //nolint: exhaustivestruct ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Pos: i.Pos, Message: i.String(), } diff --git a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md index d2288f08b4..da30c8e00f 100644 --- a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md +++ b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD | Function | Mirror | @@ -54,206 +53,3 @@ | `func utf8.FullRune([]byte) bool` | `func utf8.FullRuneInString(string) bool` | | `func utf8.RuneCount([]byte) int` | `func utf8.RuneCountInString(string) int` | | `func utf8.Valid([]byte) bool` | `func utf8.ValidString(string) bool` | -======= - -func (*bufio.Writer) Write([]byte) (int, error) -func (*bufio.Writer) WriteString(string) (int, error) - - -func (*bufio.Writer) WriteRune(rune) (int, error) -func (*bufio.Writer) WriteString(string) (int, error) - - -func (*bytes.Buffer) Write([]byte) (int, error) -func (*bytes.Buffer) WriteString(string) (int, error) - - -func (*bytes.Buffer) WriteRune(rune) (int, error) -func (*bytes.Buffer) WriteString(string) (int, error) - - -func bytes.Compare([]byte, []byte) int -func strings.Compare(string, string) int - - -func bytes.Contains([]byte, []byte) bool -func strings.Contains(string, string) bool - - -func bytes.ContainsAny([]byte, string) bool -func strings.ContainsAny(string, string) bool - - -func bytes.ContainsRune([]byte, byte) bool -func strings.ContainsRune(string, byte) bool - - -func bytes.Count([]byte, []byte) int -func strings.Count(string, string) int - - -func bytes.EqualFold([]byte, []byte) bool -func strings.EqualFold(string, string) bool - - -func bytes.HasPrefix([]byte, []byte) bool -func strings.HasPrefix(string, string) bool - - -func bytes.HasSuffix([]byte, []byte) bool -func strings.HasSuffix(string, string) bool - - -func bytes.Index([]byte, []byte) int -func strings.Index(string, string) int - - -func bytes.IndexAny([]byte, string) int -func strings.IndexAny(string, string) int - - -func bytes.IndexByte([]byte, byte) int -func strings.IndexByte(string, byte) int - - -func bytes.IndexFunc([]byte, func(rune) bool) int -func strings.IndexFunc(string, func(rune) bool) int - - -func bytes.IndexRune([]byte, rune) int -func strings.IndexRune(string, rune) int - - -func bytes.LastIndex([]byte, []byte) int -func strings.LastIndex(string, string) int - - -func bytes.LastIndexAny([]byte, string) int -func strings.LastIndexAny(string, string) int - - -func bytes.LastIndexByte([]byte, byte) int -func strings.LastIndexByte(string, byte) int - - -func bytes.LastIndexFunc([]byte, func(rune) bool) int -func strings.LastIndexFunc(string, func(rune) bool) int - - -func bytes.NewBuffer([]byte) *bytes.Buffer -func bytes.NewBufferString(string) *bytes.Buffer - - -func (*httptest.ResponseRecorder) Write([]byte) (int, error) -func (*httptest.ResponseRecorder) WriteString(string) (int, error) - - -func (*maphash.Hash) Write([]byte) (int, error) -func (*maphash.Hash) WriteString(string) (int, error) - - -func (*os.File) Write([]byte) (int, error) -func (*os.File) WriteString(string) (int, error) - - -func regexp.Match(string, []byte) (bool, error) -func regexp.MatchString(string, string) (bool, error) - - -func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int -func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int - - -func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int -func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int - - -func (*regexp.Regexp) FindIndex([]byte) []int -func (*regexp.Regexp) FindStringIndex(string) []int - - -func (*regexp.Regexp) FindSubmatchIndex([]byte) []int -func (*regexp.Regexp) FindStringSubmatchIndex(string) []int - - -func (*regexp.Regexp) Match([]byte) bool -func (*regexp.Regexp) MatchString(string) bool - - -func (*strings.Builder) Write([]byte) (int, error) -func (*strings.Builder) WriteString(string) (int, error) - - -func (*strings.Builder) WriteRune(rune) (int, error) -func (*strings.Builder) WriteString(string) (int, error) - - -func strings.Compare(string) int -func bytes.Compare([]byte) int - - -func strings.Contains(string) bool -func bytes.Contains([]byte) bool - - -func strings.ContainsAny(string) bool -func bytes.ContainsAny([]byte) bool - - -func strings.ContainsRune(string) bool -func bytes.ContainsRune([]byte) bool - - -func strings.EqualFold(string) bool -func bytes.EqualFold([]byte) bool - - -func strings.HasPrefix(string) bool -func bytes.HasPrefix([]byte) bool - - -func strings.HasSuffix(string) bool -func bytes.HasSuffix([]byte) bool - - -func strings.Index(string) int -func bytes.Index([]byte) int - - -func strings.IndexFunc(string, func(r rune) bool) int -func bytes.IndexFunc([]byte, func(r rune) bool) int - - -func strings.LastIndex(string) int -func bytes.LastIndex([]byte) int - - -func strings.LastIndexAny(string) int -func bytes.LastIndexAny([]byte) int - - -func strings.LastIndexFunc(string, func(r rune) bool) int -func bytes.LastIndexFunc([]byte, func(r rune) bool) int - - -func utf8.DecodeLastRune([]byte) (rune, int) -func utf8.DecodeLastRuneInString(string) (rune, int) - - -func utf8.DecodeRune([]byte) (rune, int) -func utf8.DecodeRuneInString(string) (rune, int) - - -func utf8.FullRune([]byte) bool -func utf8.FullRuneInString(string) bool - - -func utf8.RuneCount([]byte) int -func utf8.RuneCountInString(string) int - - -func utf8.Valid([]byte) bool -func utf8.ValidString(string) bool - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/butuzov/mirror/Makefile b/vendor/github.com/butuzov/mirror/Makefile index a27bc8a5bf..dab6f160ae 100644 --- a/vendor/github.com/butuzov/mirror/Makefile +++ b/vendor/github.com/butuzov/mirror/Makefile @@ -10,12 +10,8 @@ endef # Generate Artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ generate: ## Generate Assets -<<<<<<< HEAD $(MAKE) generate-tests $(MAKE) generate-mirror-table -======= - $(MAKE) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) generate-tests: ## Generates Assets at testdata go run ./cmd/internal/tests/ "$(PWD)/testdata" @@ -57,11 +53,7 @@ tests-summary: bin/tparse lints: ## Run golangci-lint lints: bin/golangci-lint lints: -<<<<<<< HEAD golangci-lint run --no-config ./... --exclude-dirs "^(cmd|testdata)" -======= - golangci-lint run --no-config ./... --skip-dirs "^(cmd|testdata)" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cover: ## Run Coverage @@ -80,13 +72,8 @@ bin/tparse: INSTALL_URL=github.com/mfridman/tparse@v0.13.2 bin/tparse: $(call install_go_bin, tparse, $(INSTALL_URL)) -<<<<<<< HEAD bin/golangci-lint: ## Installs golangci-lint@v1.62.0 (if not exists) bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.62.0 -======= -bin/golangci-lint: ## Installs golangci-lint@v1.55.2 (if not exists) -bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.55.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bin/golangci-lint: $(call install_go_bin, golangci-lint, $(INSTALL_URL)) @@ -113,11 +100,7 @@ help: dep-gawk @ echo "" -<<<<<<< HEAD # Helper Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -======= -# Helper Mehtods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dep-gawk: @ if [ -z "$(shell command -v gawk)" ]; then \ if [ -x /usr/local/bin/brew ]; then $(MAKE) _brew_gawk_install; exit 0; fi; \ @@ -129,37 +112,21 @@ dep-gawk: fi _brew_gawk_install: -<<<<<<< HEAD @ echo "Installing gawk using brew... " -======= - @ echo "Instaling gawk using brew... " ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ brew install gawk --quiet @ echo "done" _ubuntu_gawk_install: -<<<<<<< HEAD @ echo "Installing gawk using apt-get... " -======= - @ echo "Instaling gawk using apt-get... " ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ apt-get -q install gawk -y @ echo "done" _alpine_gawk_install: -<<<<<<< HEAD @ echo "Installing gawk using yum... " -======= - @ echo "Instaling gawk using yum... " ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ apk add --update --no-cache gawk @ echo "done" _centos_gawk_install: -<<<<<<< HEAD @ echo "Installing gawk using yum... " -======= - @ echo "Instaling gawk using yum... " ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) @ yum install -q -y gawk; @ echo "done" diff --git a/vendor/github.com/butuzov/mirror/analyzer.go b/vendor/github.com/butuzov/mirror/analyzer.go index 42c59b7618..b15019ce1f 100644 --- a/vendor/github.com/butuzov/mirror/analyzer.go +++ b/vendor/github.com/butuzov/mirror/analyzer.go @@ -44,15 +44,9 @@ func Run(pass *analysis.Pass, withTests bool) []*checker.Violation { BytesFunctions, BytesBufferMethods, RegexpFunctions, RegexpRegexpMethods, StringFunctions, StringsBuilderMethods, -<<<<<<< HEAD MaphashMethods, MaphashFunctions, BufioMethods, HTTPTestMethods, OsFileMethods, UTF8Functions, -======= - BufioMethods, HTTPTestMethods, - OsFileMethods, MaphashMethods, - UTF8Functions, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) check.Type = checker.WrapType(pass.TypesInfo) diff --git a/vendor/github.com/butuzov/mirror/checkers_maphash.go b/vendor/github.com/butuzov/mirror/checkers_maphash.go index 03bf490515..345a64123e 100644 --- a/vendor/github.com/butuzov/mirror/checkers_maphash.go +++ b/vendor/github.com/butuzov/mirror/checkers_maphash.go @@ -2,7 +2,6 @@ package mirror import "github.com/butuzov/mirror/internal/checker" -<<<<<<< HEAD var ( MaphashFunctions = []checker.Violation{ { // maphash.Bytes @@ -66,37 +65,3 @@ var ( }, } ) -======= -var MaphashMethods = []checker.Violation{ - { // (*hash/maphash).Write - Targets: checker.Bytes, - Type: checker.Method, - Package: "hash/maphash", - Struct: "Hash", - Caller: "Write", - Args: []int{0}, - AltCaller: "WriteString", - - Generate: &checker.Generate{ - PreCondition: `h := maphash.Hash{}`, - Pattern: `Write($0)`, - Returns: []string{"int", "error"}, - }, - }, - { // (*hash/maphash).WriteString - Targets: checker.Strings, - Type: checker.Method, - Package: "hash/maphash", - Struct: "Hash", - Caller: "WriteString", - Args: []int{0}, - AltCaller: "Write", - - Generate: &checker.Generate{ - PreCondition: `h := maphash.Hash{}`, - Pattern: `WriteString($0)`, - Returns: []string{"int", "error"}, - }, - }, -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/vendor/github.com/butuzov/mirror/internal/checker/checker.go index cddacf12ea..fb9ba41729 100644 --- a/vendor/github.com/butuzov/mirror/internal/checker/checker.go +++ b/vendor/github.com/butuzov/mirror/internal/checker/checker.go @@ -9,20 +9,12 @@ import ( "strings" ) -<<<<<<< HEAD // Checker will perform standard check on package and its methods. -======= -// Checker will perform standart check on package and its methods. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Checker struct { Violations []Violation // List of available violations Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct Type func(ast.Expr) string // Type Checker closure. -<<<<<<< HEAD Print func(ast.Node) []byte // String representation of the expression. -======= - Print func(ast.Node) []byte // String representation of the expresion. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func New(violations ...[]Violation) Checker { @@ -84,11 +76,7 @@ func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool continue } -<<<<<<< HEAD // is it conversion call -======= - // is it convertsion call ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !c.callConverts(call) { continue } diff --git a/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/vendor/github.com/butuzov/mirror/internal/checker/violation.go index 1827c30472..c2c1492086 100644 --- a/vendor/github.com/butuzov/mirror/internal/checker/violation.go +++ b/vendor/github.com/butuzov/mirror/internal/checker/violation.go @@ -28,11 +28,7 @@ const ( UntypedRune string = "untyped rune" ) -<<<<<<< HEAD // Violation describes what message we going to give to a particular code violation -======= -// Violation describs what message we going to give to a particular code violation ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Violation struct { Type ViolationType // Args []int // Indexes of the arguments needs to be checked @@ -147,11 +143,7 @@ func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic { v.AltPackage = v.Package } -<<<<<<< HEAD // Hooray! we don't need to change package and redo imports. -======= - // Hooray! we dont need to change package and redo imports. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if v.Type == Function && v.AltPackage == v.Package && noNl { diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ Message: "Fix Issue With", @@ -174,11 +166,7 @@ type GolangIssue struct { Original string } -<<<<<<< HEAD // Issue intended to be used only within `golangci-lint`, but you can use it -======= -// Issue intended to be used only within `golangci-lint`, bu you can use use it ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // alongside Diagnostic if you wish. func (v *Violation) Issue(fSet *token.FileSet) GolangIssue { issue := GolangIssue{ diff --git a/vendor/github.com/butuzov/mirror/readme.md b/vendor/github.com/butuzov/mirror/readme.md index c604d5e6cb..f5cfa47a68 100644 --- a/vendor/github.com/butuzov/mirror/readme.md +++ b/vendor/github.com/butuzov/mirror/readme.md @@ -2,7 +2,6 @@ `mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib. -<<<<<<< HEAD --- [![United 24](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner-personal-page.svg)](https://u24.gov.ua/) @@ -10,8 +9,6 @@ --- -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Linter Use Cases ### `github.com/argoproj/argo-cd` @@ -96,21 +93,13 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi - flag `--tests` (e.g. `--tests=false`) - flag `--skip-files` (e.g. `--skip-files="_test.go"`) -<<<<<<< HEAD - yaml configuration `run.skip-files`: -======= - - yaml confguration `run.skip-files`: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ```yaml run: skip-files: - '(.+)_test\.go' ``` -<<<<<<< HEAD - yaml configuration `issues.exclude-rules`: -======= - - yaml confguration `issues.exclude-rules`: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ```yaml issues: exclude-rules: @@ -124,11 +113,7 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi ```shell # Update Assets (testdata/(strings|bytes|os|utf8|maphash|regexp|bufio).go) -<<<<<<< HEAD (task|make) generate -======= -(task|make) generated ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Run Tests (task|make) tests # Lint Code diff --git a/vendor/github.com/ckaznocha/intrange/.golangci.yml b/vendor/github.com/ckaznocha/intrange/.golangci.yml index f78118874e..b240f85ce9 100644 --- a/vendor/github.com/ckaznocha/intrange/.golangci.yml +++ b/vendor/github.com/ckaznocha/intrange/.golangci.yml @@ -1,13 +1,9 @@ linters-settings: gci: -<<<<<<< HEAD sections: - standard - default - localmodule -======= - local-prefixes: github.com/ckaznocha/intrange ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocritic: enabled-tags: - diagnostic @@ -17,14 +13,7 @@ linters-settings: - style goimports: local-prefixes: github.com/ckaznocha/intrange -<<<<<<< HEAD - govet: -======= - golint: - min-confidence: 0 govet: - check-shadowing: true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) enable: - asmdecl - assign @@ -35,10 +24,7 @@ linters-settings: - cgocall - composite - copylock -<<<<<<< HEAD - copyloopvar -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - deepequalerrors - errorsas - fieldalignment @@ -72,28 +58,16 @@ linters: - dupl - errcheck - errorlint -<<<<<<< HEAD -======= - - exportloopref ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - gci - gochecknoinits - goconst - gocritic - godot - godox -<<<<<<< HEAD - err113 - gofmt - gofumpt - goimports -======= - - goerr113 - - gofmt - - gofumpt - - goimports - - gomnd ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - goprintffuncname - gosec - gosimple @@ -119,11 +93,6 @@ linters: - wastedassign - whitespace - wsl -<<<<<<< HEAD issues: exclude-dirs: -======= -run: - skip-dirs: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - testdata/ diff --git a/vendor/github.com/ckaznocha/intrange/intrange.go b/vendor/github.com/ckaznocha/intrange/intrange.go index 56c3569273..229c847d5a 100644 --- a/vendor/github.com/ckaznocha/intrange/intrange.go +++ b/vendor/github.com/ckaznocha/intrange/intrange.go @@ -79,11 +79,8 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } -<<<<<<< HEAD initAssign := init.Tok == token.ASSIGN -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(init.Lhs) != 1 || len(init.Rhs) != 1 { return } @@ -102,7 +99,6 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } -<<<<<<< HEAD var ( operand ast.Expr hasEquivalentOperator bool @@ -110,18 +106,6 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { switch cond.Op { case token.LSS, token.LEQ: // ;i < n; || ;i <= n; -======= - var nExpr ast.Expr - - switch cond.Op { - case token.LSS: // ;i < n; - if isBenchmark(cond.Y) { - return - } - - nExpr = findNExpr(cond.Y) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) x, ok := cond.X.(*ast.Ident) if !ok { return @@ -130,20 +114,10 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if x.Name != initIdent.Name { return } -<<<<<<< HEAD hasEquivalentOperator = cond.Op == token.LEQ operand = cond.Y case token.GTR, token.GEQ: // ;n > i; || ;n >= i; -======= - case token.GTR: // ;n > i; - if isBenchmark(cond.X) { - return - } - - nExpr = findNExpr(cond.X) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) y, ok := cond.Y.(*ast.Ident) if !ok { return @@ -152,12 +126,9 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if y.Name != initIdent.Name { return } -<<<<<<< HEAD hasEquivalentOperator = cond.Op == token.GEQ operand = cond.X -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return } @@ -256,11 +227,7 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { bc := &bodyChecker{ initIdent: initIdent, -<<<<<<< HEAD nExpr: findNExpr(operand), -======= - nExpr: nExpr, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ast.Inspect(forStmt.Body, bc.check) @@ -269,7 +236,6 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } -<<<<<<< HEAD if initAssign { pass.Report(analysis.Diagnostic{ Pos: forStmt.Pos(), @@ -314,11 +280,6 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { }, }, }, -======= - pass.Report(analysis.Diagnostic{ - Pos: forStmt.Pos(), - Message: msg, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } @@ -442,7 +403,6 @@ func findNExpr(expr ast.Expr) ast.Expr { } } -<<<<<<< HEAD func recursiveOperandToString( expr ast.Expr, incrementInt bool, @@ -482,28 +442,6 @@ func recursiveOperandToString( default: return "" } -======= -func isBenchmark(expr ast.Expr) bool { - selectorExpr, ok := expr.(*ast.SelectorExpr) - if !ok { - return false - } - - if selectorExpr.Sel.Name != "N" { - return false - } - - ident, ok := selectorExpr.X.(*ast.Ident) - if !ok { - return false - } - - if ident.Name == "b" { - return true - } - - return false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func identEqual(a, b ast.Expr) bool { @@ -549,10 +487,7 @@ type bodyChecker struct { initIdent *ast.Ident nExpr ast.Expr modified bool -<<<<<<< HEAD accessed bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *bodyChecker) check(n ast.Node) bool { @@ -571,19 +506,15 @@ func (b *bodyChecker) check(n ast.Node) bool { return false } -<<<<<<< HEAD case *ast.Ident: if identEqual(stmt, b.initIdent) { b.accessed = true } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return true } -<<<<<<< HEAD func isNumberLit(exp ast.Expr) bool { switch lit := exp.(type) { case *ast.BasicLit: @@ -624,8 +555,6 @@ func isNumberLit(exp ast.Expr) bool { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func compareNumberLit(exp ast.Expr, val int) bool { switch lit := exp.(type) { case *ast.BasicLit: @@ -672,7 +601,6 @@ func compareNumberLit(exp ast.Expr, val int) bool { return false } } -<<<<<<< HEAD func operandToString( pass *analysis.Pass, @@ -697,5 +625,3 @@ func operandToString( return t.String() + "(" + s + ")" } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 972c725ca4..6aba0ef1f6 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -436,14 +436,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) { if err != nil { if err == io.EOF { break -<<<<<<< HEAD } return nil, fmt.Errorf("failed to parse tar file, %w", err) -======= - } else { - return nil, fmt.Errorf("failed to parse tar file, %w", err) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } switch cleanEntryName(h.Name) { case PrefetchLandmark, NoPrefetchLandmark: diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 6476d5f02a..ba650b4d1d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -26,20 +26,13 @@ import ( "archive/tar" "bytes" "compress/gzip" -<<<<<<< HEAD "crypto/rand" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/sha256" "encoding/json" "errors" "fmt" "io" -<<<<<<< HEAD "math/big" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "path/filepath" "reflect" @@ -53,13 +46,6 @@ import ( digest "github.com/opencontainers/go-digest" ) -<<<<<<< HEAD -======= -func init() { - rand.Seed(time.Now().UnixNano()) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression @@ -931,17 +917,11 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } if sampleEntry == nil { t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") -<<<<<<< HEAD return } if targetEntry == nil { t.Fatalf("rewrite target not found") return -======= - } - if targetEntry == nil { - t.Fatalf("rewrite target not found") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } targetEntry.Offset = sampleEntry.Offset }, @@ -2310,15 +2290,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX func randomContents(n int) string { b := make([]rune, n) for i := range b { -<<<<<<< HEAD bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes)))) if err != nil { panic(err) } b[i] = runes[int(bi.Int64())] -======= - b[i] = runes[rand.Intn(len(runes))] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return string(b) } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go index dae389f98d..f6a7ea8a58 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -154,7 +154,6 @@ var supportedAlgorithms = map[string]bool{ EdDSA: true, } -<<<<<<< HEAD // ProviderConfig allows direct creation of a [Provider] from metadata // configuration. This is intended for interop with providers that don't support // discovery, or host the JSON discovery document at an off-spec path. @@ -179,15 +178,10 @@ var supportedAlgorithms = map[string]bool{ // For providers that implement discovery, use [NewProvider] instead. // // See: https://openid.net/specs/openid-connect-discovery-1_0.html -======= -// ProviderConfig allows creating providers when discovery isn't supported. It's -// generally easier to use NewProvider directly. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ProviderConfig struct { // IssuerURL is the identity of the provider, and the string it uses to sign // ID tokens with. For example "https://accounts.google.com". This value MUST // match ID tokens exactly. -<<<<<<< HEAD IssuerURL string `json:"issuer"` // AuthURL is the endpoint used by the provider to support the OAuth 2.0 // authorization endpoint. @@ -198,54 +192,27 @@ type ProviderConfig struct { // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0 // device authorization endpoint. DeviceAuthURL string `json:"device_authorization_endpoint"` -======= - IssuerURL string - // AuthURL is the endpoint used by the provider to support the OAuth 2.0 - // authorization endpoint. - AuthURL string - // TokenURL is the endpoint used by the provider to support the OAuth 2.0 - // token endpoint. - TokenURL string - // DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0 - // device authorization endpoint. - DeviceAuthURL string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // UserInfoURL is the endpoint used by the provider to support the OpenID // Connect UserInfo flow. // // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo -<<<<<<< HEAD UserInfoURL string `json:"userinfo_endpoint"` // JWKSURL is the endpoint used by the provider to advertise public keys to // verify issued ID tokens. This endpoint is polled as new keys are made // available. JWKSURL string `json:"jwks_uri"` -======= - UserInfoURL string - // JWKSURL is the endpoint used by the provider to advertise public keys to - // verify issued ID tokens. This endpoint is polled as new keys are made - // available. - JWKSURL string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign // ID tokens. If not provided, this defaults to the algorithms advertised by // the JWK endpoint, then the set of algorithms supported by this package. -<<<<<<< HEAD Algorithms []string `json:"id_token_signing_alg_values_supported"` -======= - Algorithms []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewProvider initializes a provider from a set of endpoints, rather than // through discovery. -<<<<<<< HEAD // // The provided context is only used for [http.Client] configuration through // [ClientContext], not cancelation. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { return &Provider{ issuer: p.IssuerURL, @@ -260,7 +227,6 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { } // NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. -<<<<<<< HEAD // The issuer is the URL identifier for the service. For example: "https://accounts.google.com" // or "https://login.salesforce.com". // @@ -269,11 +235,6 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { // should use [ProviderConfig] instead. // // See: https://openid.net/specs/openid-connect-discovery-1_0.html -======= -// -// The issuer is the URL identifier for the service. For example: "https://accounts.google.com" -// or "https://login.salesforce.com". ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewProvider(ctx context.Context, issuer string) (*Provider, error) { wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" req, err := http.NewRequest("GET", wellKnown, nil) diff --git a/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/vendor/github.com/curioswitch/go-reassign/.golangci.yml index 0354537103..fdf0bb2f22 100644 --- a/vendor/github.com/curioswitch/go-reassign/.golangci.yml +++ b/vendor/github.com/curioswitch/go-reassign/.golangci.yml @@ -5,23 +5,12 @@ linters: - bodyclose - decorder - durationcheck -<<<<<<< HEAD - err113 - errchkjson - errname - errorlint - exhaustive - gocritic -======= - - errchkjson - - errname - - errorlint - - execinquery - - exhaustive - - exportloopref - - gocritic - - goerr113 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - gofmt - goimports - goprintffuncname @@ -29,10 +18,6 @@ linters: - importas - misspell - nolintlint -<<<<<<< HEAD -======= - - nosnakecase ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - prealloc - predeclared - promlinter diff --git a/vendor/github.com/curioswitch/go-reassign/README.md b/vendor/github.com/curioswitch/go-reassign/README.md index 6e01aea4fd..190756f928 100644 --- a/vendor/github.com/curioswitch/go-reassign/README.md +++ b/vendor/github.com/curioswitch/go-reassign/README.md @@ -47,15 +47,8 @@ Package variable reassignment is generally confusing, though, and we recommend a The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is recommended if it works with your code. -<<<<<<< HEAD ## Development [mage](https://magefile.org/) is used for development. Run `go run mage.go -l` to see available targets. For example, to run checks before sending a PR, run `go run mage.go check`. -======= -## Limitations - -If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing -can be confusing, so it's recommended to rename the variable. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go index 21b22579a6..c2a29c5299 100644 --- a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go +++ b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go @@ -48,19 +48,11 @@ func run(pass *analysis.Pass) (interface{}, error) { func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) { switch x := expr.(type) { case *ast.SelectorExpr: -<<<<<<< HEAD -======= - if !checkRE.MatchString(x.Sel.Name) { - return - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) selectIdent, ok := x.X.(*ast.Ident) if !ok { return } -<<<<<<< HEAD var pkgPath string if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { pkg, ok := selectObj.(*types.PkgName) @@ -85,16 +77,6 @@ func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, if matches { pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) } -======= - if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { - if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg { - return - } - } - - pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *ast.Ident: use, ok := pass.TypesInfo.Uses[x].(*types.Var) if !ok { diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index 578051b86d..9540628150 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -25,7 +25,6 @@ func NewFileStore(file store) Store { return &fileStore{file: file} } -<<<<<<< HEAD // Erase removes the given credentials from the file store.This function is // idempotent and does not update the file if credentials did not change. func (c *fileStore) Erase(serverAddress string) error { @@ -33,10 +32,6 @@ func (c *fileStore) Erase(serverAddress string) error { // nothing to do; no credentials found for the given serverAddress return nil } -======= -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) delete(c.file.GetAuthConfigs(), serverAddress) return c.file.Save() } @@ -62,7 +57,6 @@ func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { return c.file.GetAuthConfigs(), nil } -<<<<<<< HEAD // Store saves the given credentials in the file store. This function is // idempotent and does not update the file if credentials did not change. func (c *fileStore) Store(authConfig types.AuthConfig) error { @@ -71,11 +65,6 @@ func (c *fileStore) Store(authConfig types.AuthConfig) error { // Credentials didn't change, so skip updating the configuration file. return nil } -======= -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - authConfigs := c.file.GetAuthConfigs() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) authConfigs[authConfig.ServerAddress] = authConfig return c.file.Save() } diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index 7d0c70584a..7ca5ab7222 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -16,17 +16,9 @@ func isValidCredsMessage(msg string) error { if credentials.IsCredentialsMissingServerURLMessage(msg) { return credentials.NewErrCredentialsMissingServerURL() } -<<<<<<< HEAD if credentials.IsCredentialsMissingUsernameMessage(msg) { return credentials.NewErrCredentialsMissingUsername() } -======= - - if credentials.IsCredentialsMissingUsernameMessage(msg) { - return credentials.NewErrCredentialsMissingUsername() - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -42,20 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { out, err := cmd.Output() if err != nil { -<<<<<<< HEAD if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil { err = isValidErr } return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) -======= - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil @@ -68,7 +50,6 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error out, err := cmd.Output() if err != nil { -<<<<<<< HEAD if credentials.IsErrCredentialsNotFoundMessage(string(out)) { return nil, credentials.NewErrCredentialsNotFound() } @@ -78,19 +59,6 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error } return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out))) -======= - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } resp := &credentials.Credentials{ diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index f710703480..2283d5a44c 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,13 +1,9 @@ package credentials -<<<<<<< HEAD import ( "errors" "strings" ) -======= -import "errors" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns @@ -54,11 +50,7 @@ func IsErrCredentialsNotFound(err error) bool { // This function helps to check messages returned by an // external program via its standard output. func IsErrCredentialsNotFoundMessage(err string) bool { -<<<<<<< HEAD return strings.TrimSpace(err) == errCredentialsNotFoundMessage -======= - return err == errCredentialsNotFoundMessage ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // errCredentialsMissingServerURL represents an error raised @@ -115,11 +107,7 @@ func IsCredentialsMissingServerURL(err error) bool { // IsCredentialsMissingServerURLMessage checks for an // errCredentialsMissingServerURL in the error message. func IsCredentialsMissingServerURLMessage(err string) bool { -<<<<<<< HEAD return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage -======= - return err == errCredentialsMissingServerURLMessage ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // IsCredentialsMissingUsername returns true if the error @@ -132,9 +120,5 @@ func IsCredentialsMissingUsername(err error) bool { // IsCredentialsMissingUsernameMessage checks for an // errCredentialsMissingUsername in the error message. func IsCredentialsMissingUsernameMessage(err string) bool { -<<<<<<< HEAD return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage -======= - return err == errCredentialsMissingUsernameMessage ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go index 8ae9c536d0..b82b06506a 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go @@ -672,11 +672,7 @@ func (x *ClusterCollection) GetEntries() *v3.CollectionEntry { } // Configuration for a single upstream cluster. -<<<<<<< HEAD // [#next-free-field: 59] -======= -// [#next-free-field: 58] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Cluster struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -863,7 +859,6 @@ type Cluster struct { // and :ref:`LOGICAL_DNS` // this setting is ignored. DnsRefreshRate *durationpb.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"` -<<<<<<< HEAD // DNS jitter can be optionally specified if the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`. @@ -874,8 +869,6 @@ type Cluster struct { // and :ref:`LOGICAL_DNS` // this setting is ignored. DnsJitter *durationpb.Duration `protobuf:"bytes,58,opt,name=dns_jitter,json=dnsJitter,proto3" json:"dns_jitter,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, @@ -1045,7 +1038,6 @@ type Cluster struct { // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] LrsServer *v32.ConfigSource `protobuf:"bytes,42,opt,name=lrs_server,json=lrsServer,proto3" json:"lrs_server,omitempty"` -<<<<<<< HEAD // A list of metric names from :ref:`ORCA load reports ` to propagate to LRS. // // If not specified, then ORCA load reports will not be propagated to LRS. @@ -1053,14 +1045,6 @@ type Cluster struct { // For map fields in the ORCA proto, the string will be of the form “.“. // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA // :ref:`named_metrics ` field. -======= - // [#not-implemented-hide:] - // A list of metric names from ORCA load reports to propagate to LRS. - // - // For map fields in the ORCA proto, the string will be of the form “.“. - // For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA - // “named_metrics“ field. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // The special map key “*“ means to report all entries in the map (e.g., “named_metrics.*“ means to // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted @@ -1288,7 +1272,6 @@ func (x *Cluster) GetDnsRefreshRate() *durationpb.Duration { return nil } -<<<<<<< HEAD func (x *Cluster) GetDnsJitter() *durationpb.Duration { if x != nil { return x.DnsJitter @@ -1296,8 +1279,6 @@ func (x *Cluster) GetDnsJitter() *durationpb.Duration { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *Cluster) GetDnsFailureRefreshRate() *Cluster_RefreshRate { if x != nil { return x.DnsFailureRefreshRate @@ -3408,11 +3389,7 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, -<<<<<<< HEAD 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x90, 0x54, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, -======= - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd6, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x18, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, @@ -3526,7 +3503,6 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0e, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, -<<<<<<< HEAD 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x6e, 0x73, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, @@ -3863,328 +3839,10 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, -======= - 0x12, 0x65, 0x0a, 0x18, 0x64, 0x6e, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, - 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, - 0x52, 0x15, 0x64, 0x6e, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x65, - 0x63, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x65, 0x63, 0x74, 0x44, 0x6e, 0x73, 0x54, 0x74, 0x6c, 0x12, - 0x66, 0x0a, 0x11, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x66, 0x61, - 0x6d, 0x69, 0x6c, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x44, 0x6e, 0x73, - 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x4f, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x92, - 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0c, 0x64, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x41, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, - 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x73, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, - 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x64, - 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, - 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74, 0x79, 0x70, 0x65, 0x64, - 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x37, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x44, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, - 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x5f, - 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x77, 0x61, 0x69, 0x74, 0x46, - 0x6f, 0x72, 0x57, 0x61, 0x72, 0x6d, 0x4f, 0x6e, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x56, 0x0a, 0x11, - 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, - 0x33, 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x5f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, - 0x02, 0x2a, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, - 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x6c, 0x62, 0x5f, 0x73, - 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x0e, 0x6c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x10, 0x72, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, - 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x6d, 0x61, 0x67, 0x6c, 0x65, - 0x76, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x34, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x48, 0x01, 0x52, 0x0e, 0x6d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x16, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, - 0x5f, 0x64, 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x22, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, - 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x13, 0x6f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x6c, 0x65, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x25, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x14, 0x6c, 0x65, 0x61, - 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x68, 0x0a, 0x15, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, - 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x01, 0x52, 0x12, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, - 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59, 0x0a, 0x10, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x75, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, - 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x72, 0x0a, 0x1b, 0x75, - 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x19, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x55, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x23, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x4f, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4f, 0x6e, 0x48, 0x6f, 0x73, - 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x28, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x15, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x72, 0x73, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c, - 0x72, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x72, 0x73, 0x5f, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x39, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6c, - 0x72, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x63, 0x6b, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, - 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, - 0x30, 0x18, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x30, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x75, - 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5a, 0x0a, - 0x13, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x11, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x32, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x58, 0x0a, 0x29, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x25, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x50, 0x65, 0x72, 0x44, - 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xe6, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x50, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, - 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, - 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x98, 0x01, 0x0a, - 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, - 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x10, 0x45, 0x64, 0x73, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x0a, - 0x65, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x65, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x45, 0x64, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x1a, 0xa4, 0x0a, 0x0a, 0x0e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, - 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, - 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0e, - 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, - 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x6b, - 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, - 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x61, - 0x77, 0x61, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x41, 0x77, 0x61, 0x72, 0x65, 0x12, - 0x32, 0x0a, 0x15, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, - 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x6d, 0x6f, 0x64, - 0x65, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x61, 0x6e, - 0x69, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1e, 0x0a, 0x0b, 0x6c, 0x69, 0x73, - 0x74, 0x5f, 0x61, 0x73, 0x5f, 0x61, 0x6e, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x41, 0x6e, 0x79, 0x12, 0x92, 0x01, 0x0a, 0x18, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4e, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, - 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, - 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0xda, - 0x03, 0x0a, 0x10, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x73, 0x69, 0x6e, 0x67, 0x6c, - 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x48, - 0x6f, 0x73, 0x74, 0x50, 0x65, 0x72, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x92, 0x01, 0x0a, - 0x0f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x0e, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, - 0x79, 0x73, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x12, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x73, 0x53, 0x75, 0x62, - 0x73, 0x65, 0x74, 0x22, 0x79, 0x0a, 0x1e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x53, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x44, 0x45, 0x46, - 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, - 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, - 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, - 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x0f, 0x0a, - 0x0b, 0x4b, 0x45, 0x59, 0x53, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x04, 0x3a, 0x3b, - 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, - 0x62, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, - 0x73, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x4f, 0x0a, 0x16, 0x4c, - 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x5f, 0x46, 0x41, 0x4c, 0x4c, - 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4e, 0x59, 0x5f, 0x45, 0x4e, - 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x46, 0x41, - 0x55, 0x4c, 0x54, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x02, 0x22, 0x4d, 0x0a, 0x1e, - 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, - 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4e, 0x4f, 0x5f, 0x46, 0x41, - 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x4c, 0x4c, - 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, - 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x62, 0x53, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe3, 0x01, 0x0a, 0x0f, 0x53, 0x6c, 0x6f, 0x77, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x11, 0x73, - 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x69, 0x6e, 0x64, - 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x67, 0x67, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x0a, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x77, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x10, 0x6d, 0x69, 0x6e, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x72, 0x0a, - 0x12, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, -<<<<<<< HEAD 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, @@ -4490,331 +4148,6 @@ var file_envoy_config_cluster_v3_cluster_proto_rawDesc = []byte{ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x67, 0x1a, 0xc5, 0x02, 0x0a, 0x14, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x68, - 0x6f, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x02, 0x52, 0x0b, 0x63, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x53, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x69, 0x61, 0x73, 0x12, 0x5c, 0x0a, 0x11, 0x73, 0x6c, 0x6f, - 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x73, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x91, 0x03, 0x0a, 0x10, 0x52, 0x69, - 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, - 0x0a, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0x80, - 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6d, 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, - 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, - 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, - 0x07, 0x32, 0x05, 0x18, 0x80, 0x80, 0x80, 0x04, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x48, 0x61, 0x73, - 0x68, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x58, 0x58, 0x5f, - 0x48, 0x41, 0x53, 0x48, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x55, 0x52, 0x4d, 0x55, 0x52, - 0x5f, 0x48, 0x41, 0x53, 0x48, 0x5f, 0x32, 0x10, 0x01, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, - 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x4c, - 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x1a, 0x59, 0x0a, - 0x0e, 0x4d, 0x61, 0x67, 0x6c, 0x65, 0x76, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x32, 0x05, 0x18, 0xcb, 0x96, 0xb1, 0x02, 0x52, 0x09, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xbf, 0x02, 0x0a, 0x13, 0x4f, 0x72, 0x69, - 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x73, 0x65, 0x48, 0x74, - 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x5d, 0x0a, 0x16, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x14, 0x75, 0x70, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, - 0x65, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, - 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x44, - 0x73, 0x74, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd5, 0x0b, 0x0a, 0x0e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, - 0x17, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x5f, 0x74, - 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x50, - 0x61, 0x6e, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x74, 0x0a, - 0x14, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, 0x61, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x6c, 0x62, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, - 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, - 0x52, 0x11, 0x7a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x49, 0x0a, 0x13, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x5f, - 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, - 0x65, 0x72, 0x67, 0x65, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x43, 0x0a, 0x1f, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x5f, 0x75, - 0x6e, 0x74, 0x69, 0x6c, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x68, 0x63, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4e, 0x65, 0x77, 0x48, 0x6f, - 0x73, 0x74, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x46, 0x69, 0x72, 0x73, 0x74, 0x48, 0x63, 0x12, - 0x4d, 0x0a, 0x24, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6f, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, - 0x6c, 0x6f, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, - 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x8a, - 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x19, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x14, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, - 0x52, 0x12, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x02, 0x0a, 0x11, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, - 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0f, 0x72, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x6d, - 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, - 0x69, 0x7a, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x74, 0x72, 0x61, 0x66, - 0x66, 0x69, 0x63, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x4f, - 0x6e, 0x50, 0x61, 0x6e, 0x69, 0x63, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x5a, 0x6f, 0x6e, 0x65, 0x41, 0x77, 0x61, 0x72, 0x65, 0x4c, 0x62, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x5f, 0x0a, 0x18, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x3a, 0x43, 0x9a, 0xc5, 0x88, 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x4c, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xf1, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x75, 0x73, 0x65, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x55, 0x0a, 0x13, - 0x68, 0x61, 0x73, 0x68, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x64, - 0x52, 0x11, 0x68, 0x61, 0x73, 0x68, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x61, 0x63, - 0x74, 0x6f, 0x72, 0x3a, 0x44, 0x9a, 0xc5, 0x88, 0x1e, 0x3f, 0x0a, 0x3d, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x69, 0x6e, - 0x67, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, - 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x62, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x1a, 0xd2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x52, 0x61, - 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x2a, 0x04, - 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x12, 0x4a, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x2a, 0x04, 0x10, 0xc0, 0x84, - 0x3d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x27, - 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x52, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x02, 0x0a, 0x10, 0x50, 0x72, 0x65, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x78, 0x0a, 0x1d, - 0x70, 0x65, 0x72, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x70, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x55, - 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x75, 0x0a, 0x1b, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x6f, - 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12, - 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xf0, 0x3f, 0x52, 0x19, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, - 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x66, 0x0a, - 0x22, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, - 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, - 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x5f, 0x44, 0x4e, 0x53, - 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x4e, - 0x53, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x44, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, - 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x53, 0x54, 0x10, 0x04, 0x22, 0xa4, - 0x01, 0x0a, 0x08, 0x4c, 0x62, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, - 0x4c, 0x45, 0x41, 0x53, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, - 0x0d, 0x0a, 0x09, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, - 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, - 0x47, 0x4c, 0x45, 0x56, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, - 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x06, 0x12, 0x20, 0x0a, 0x1c, - 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x07, 0x22, 0x04, - 0x08, 0x04, 0x10, 0x04, 0x2a, 0x0f, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x5f, 0x44, - 0x53, 0x54, 0x5f, 0x4c, 0x42, 0x22, 0x50, 0x0a, 0x0f, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x55, 0x54, 0x4f, - 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x34, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, - 0x0b, 0x0a, 0x07, 0x56, 0x36, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, - 0x56, 0x34, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x52, 0x45, 0x44, 0x10, 0x03, 0x12, 0x07, - 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x04, 0x22, 0x54, 0x0a, 0x18, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, - 0x47, 0x55, 0x52, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x00, - 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x53, 0x54, 0x52, 0x45, - 0x41, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x01, 0x3a, 0x1b, 0x9a, - 0xc5, 0x88, 0x1e, 0x16, 0x0a, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6c, 0x62, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, - 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x23, 0x10, 0x24, 0x52, - 0x05, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x52, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x52, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xda, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x60, 0x0a, 0x16, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x01, 0x10, - 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xbb, 0x05, 0x0a, - 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x63, - 0x70, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, - 0x61, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x0c, 0x74, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, - 0x69, 0x76, 0x65, 0x12, 0x64, 0x0a, 0x30, 0x73, 0x65, 0x74, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x6f, 0x6e, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x2a, 0x73, - 0x65, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x15, 0x68, 0x61, 0x70, - 0x70, 0x79, 0x5f, 0x65, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x61, 0x70, - 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x13, 0x68, 0x61, 0x70, 0x70, 0x79, 0x45, 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x89, 0x02, 0x0a, 0x13, 0x48, 0x61, 0x70, 0x70, 0x79, 0x45, - 0x79, 0x65, 0x62, 0x61, 0x6c, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x8d, 0x01, - 0x0a, 0x1c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, - 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x55, - 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x19, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, - 0x1a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x66, - 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x17, 0x66, 0x69, 0x72, 0x73, 0x74, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x38, 0x0a, 0x19, 0x46, 0x69, 0x72, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0b, - 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, - 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x36, 0x10, 0x02, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, - 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x11, 0x54, - 0x72, 0x61, 0x63, 0x6b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x75, 0x64, 0x67, - 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, - 0x2c, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x65, 0x72, - 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x89, 0x01, - 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, - 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -4915,7 +4248,6 @@ var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ 43, // 15: envoy.config.cluster.v3.Cluster.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions 27, // 16: envoy.config.cluster.v3.Cluster.typed_extension_protocol_options:type_name -> envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry 35, // 17: envoy.config.cluster.v3.Cluster.dns_refresh_rate:type_name -> google.protobuf.Duration -<<<<<<< HEAD 35, // 18: envoy.config.cluster.v3.Cluster.dns_jitter:type_name -> google.protobuf.Duration 25, // 19: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate 2, // 20: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily @@ -4990,81 +4322,6 @@ var file_envoy_config_cluster_v3_cluster_proto_depIdxs = []int32{ 87, // [87:87] is the sub-list for extension type_name 87, // [87:87] is the sub-list for extension extendee 0, // [0:87] is the sub-list for field type_name -======= - 25, // 18: envoy.config.cluster.v3.Cluster.dns_failure_refresh_rate:type_name -> envoy.config.cluster.v3.Cluster.RefreshRate - 2, // 19: envoy.config.cluster.v3.Cluster.dns_lookup_family:type_name -> envoy.config.cluster.v3.Cluster.DnsLookupFamily - 44, // 20: envoy.config.cluster.v3.Cluster.dns_resolvers:type_name -> envoy.config.core.v3.Address - 45, // 21: envoy.config.cluster.v3.Cluster.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig - 46, // 22: envoy.config.cluster.v3.Cluster.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 47, // 23: envoy.config.cluster.v3.Cluster.wait_for_warm_on_init:type_name -> google.protobuf.BoolValue - 48, // 24: envoy.config.cluster.v3.Cluster.outlier_detection:type_name -> envoy.config.cluster.v3.OutlierDetection - 35, // 25: envoy.config.cluster.v3.Cluster.cleanup_interval:type_name -> google.protobuf.Duration - 49, // 26: envoy.config.cluster.v3.Cluster.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig - 17, // 27: envoy.config.cluster.v3.Cluster.lb_subset_config:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig - 21, // 28: envoy.config.cluster.v3.Cluster.ring_hash_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig - 22, // 29: envoy.config.cluster.v3.Cluster.maglev_lb_config:type_name -> envoy.config.cluster.v3.Cluster.MaglevLbConfig - 23, // 30: envoy.config.cluster.v3.Cluster.original_dst_lb_config:type_name -> envoy.config.cluster.v3.Cluster.OriginalDstLbConfig - 20, // 31: envoy.config.cluster.v3.Cluster.least_request_lb_config:type_name -> envoy.config.cluster.v3.Cluster.LeastRequestLbConfig - 19, // 32: envoy.config.cluster.v3.Cluster.round_robin_lb_config:type_name -> envoy.config.cluster.v3.Cluster.RoundRobinLbConfig - 24, // 33: envoy.config.cluster.v3.Cluster.common_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig - 50, // 34: envoy.config.cluster.v3.Cluster.transport_socket:type_name -> envoy.config.core.v3.TransportSocket - 51, // 35: envoy.config.cluster.v3.Cluster.metadata:type_name -> envoy.config.core.v3.Metadata - 3, // 36: envoy.config.cluster.v3.Cluster.protocol_selection:type_name -> envoy.config.cluster.v3.Cluster.ClusterProtocolSelection - 12, // 37: envoy.config.cluster.v3.Cluster.upstream_connection_options:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions - 52, // 38: envoy.config.cluster.v3.Cluster.filters:type_name -> envoy.config.cluster.v3.Filter - 11, // 39: envoy.config.cluster.v3.Cluster.load_balancing_policy:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy - 53, // 40: envoy.config.cluster.v3.Cluster.lrs_server:type_name -> envoy.config.core.v3.ConfigSource - 46, // 41: envoy.config.cluster.v3.Cluster.upstream_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 13, // 42: envoy.config.cluster.v3.Cluster.track_cluster_stats:type_name -> envoy.config.cluster.v3.TrackClusterStats - 26, // 43: envoy.config.cluster.v3.Cluster.preconnect_policy:type_name -> envoy.config.cluster.v3.Cluster.PreconnectPolicy - 32, // 44: envoy.config.cluster.v3.LoadBalancingPolicy.policies:type_name -> envoy.config.cluster.v3.LoadBalancingPolicy.Policy - 54, // 45: envoy.config.cluster.v3.UpstreamConnectionOptions.tcp_keepalive:type_name -> envoy.config.core.v3.TcpKeepalive - 33, // 46: envoy.config.cluster.v3.UpstreamConnectionOptions.happy_eyeballs_config:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig - 55, // 47: envoy.config.cluster.v3.Cluster.TransportSocketMatch.match:type_name -> google.protobuf.Struct - 50, // 48: envoy.config.cluster.v3.Cluster.TransportSocketMatch.transport_socket:type_name -> envoy.config.core.v3.TransportSocket - 56, // 49: envoy.config.cluster.v3.Cluster.CustomClusterType.typed_config:type_name -> google.protobuf.Any - 53, // 50: envoy.config.cluster.v3.Cluster.EdsClusterConfig.eds_config:type_name -> envoy.config.core.v3.ConfigSource - 4, // 51: envoy.config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy - 55, // 52: envoy.config.cluster.v3.Cluster.LbSubsetConfig.default_subset:type_name -> google.protobuf.Struct - 28, // 53: envoy.config.cluster.v3.Cluster.LbSubsetConfig.subset_selectors:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector - 5, // 54: envoy.config.cluster.v3.Cluster.LbSubsetConfig.metadata_fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy - 35, // 55: envoy.config.cluster.v3.Cluster.SlowStartConfig.slow_start_window:type_name -> google.protobuf.Duration - 57, // 56: envoy.config.cluster.v3.Cluster.SlowStartConfig.aggression:type_name -> envoy.config.core.v3.RuntimeDouble - 58, // 57: envoy.config.cluster.v3.Cluster.SlowStartConfig.min_weight_percent:type_name -> envoy.type.v3.Percent - 18, // 58: envoy.config.cluster.v3.Cluster.RoundRobinLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig - 36, // 59: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.choice_count:type_name -> google.protobuf.UInt32Value - 57, // 60: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.active_request_bias:type_name -> envoy.config.core.v3.RuntimeDouble - 18, // 61: envoy.config.cluster.v3.Cluster.LeastRequestLbConfig.slow_start_config:type_name -> envoy.config.cluster.v3.Cluster.SlowStartConfig - 59, // 62: envoy.config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size:type_name -> google.protobuf.UInt64Value - 7, // 63: envoy.config.cluster.v3.Cluster.RingHashLbConfig.hash_function:type_name -> envoy.config.cluster.v3.Cluster.RingHashLbConfig.HashFunction - 59, // 64: envoy.config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size:type_name -> google.protobuf.UInt64Value - 59, // 65: envoy.config.cluster.v3.Cluster.MaglevLbConfig.table_size:type_name -> google.protobuf.UInt64Value - 36, // 66: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.upstream_port_override:type_name -> google.protobuf.UInt32Value - 60, // 67: envoy.config.cluster.v3.Cluster.OriginalDstLbConfig.metadata_key:type_name -> envoy.type.metadata.v3.MetadataKey - 58, // 68: envoy.config.cluster.v3.Cluster.CommonLbConfig.healthy_panic_threshold:type_name -> envoy.type.v3.Percent - 29, // 69: envoy.config.cluster.v3.Cluster.CommonLbConfig.zone_aware_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig - 30, // 70: envoy.config.cluster.v3.Cluster.CommonLbConfig.locality_weighted_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig - 35, // 71: envoy.config.cluster.v3.Cluster.CommonLbConfig.update_merge_window:type_name -> google.protobuf.Duration - 31, // 72: envoy.config.cluster.v3.Cluster.CommonLbConfig.consistent_hashing_lb_config:type_name -> envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig - 61, // 73: envoy.config.cluster.v3.Cluster.CommonLbConfig.override_host_status:type_name -> envoy.config.core.v3.HealthStatusSet - 35, // 74: envoy.config.cluster.v3.Cluster.RefreshRate.base_interval:type_name -> google.protobuf.Duration - 35, // 75: envoy.config.cluster.v3.Cluster.RefreshRate.max_interval:type_name -> google.protobuf.Duration - 62, // 76: envoy.config.cluster.v3.Cluster.PreconnectPolicy.per_upstream_preconnect_ratio:type_name -> google.protobuf.DoubleValue - 62, // 77: envoy.config.cluster.v3.Cluster.PreconnectPolicy.predictive_preconnect_ratio:type_name -> google.protobuf.DoubleValue - 56, // 78: envoy.config.cluster.v3.Cluster.TypedExtensionProtocolOptionsEntry.value:type_name -> google.protobuf.Any - 6, // 79: envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_policy:type_name -> envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy - 58, // 80: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.routing_enabled:type_name -> envoy.type.v3.Percent - 59, // 81: envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig.min_cluster_size:type_name -> google.protobuf.UInt64Value - 36, // 82: envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig.hash_balance_factor:type_name -> google.protobuf.UInt32Value - 46, // 83: envoy.config.cluster.v3.LoadBalancingPolicy.Policy.typed_extension_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 8, // 84: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_version:type_name -> envoy.config.cluster.v3.UpstreamConnectionOptions.FirstAddressFamilyVersion - 36, // 85: envoy.config.cluster.v3.UpstreamConnectionOptions.HappyEyeballsConfig.first_address_family_count:type_name -> google.protobuf.UInt32Value - 86, // [86:86] is the sub-list for method output_type - 86, // [86:86] is the sub-list for method input_type - 86, // [86:86] is the sub-list for extension type_name - 86, // [86:86] is the sub-list for extension extendee - 0, // [0:86] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_envoy_config_cluster_v3_cluster_proto_init() } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go index 1cd718c9d3..e651f5bd99 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.validate.go @@ -648,7 +648,6 @@ func (m *Cluster) validate(all bool) error { } if all { -<<<<<<< HEAD switch v := interface{}(m.GetDnsJitter()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { @@ -678,8 +677,6 @@ func (m *Cluster) validate(all bool) error { } if all { -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch v := interface{}(m.GetDnsFailureRefreshRate()).(type) { case interface{ ValidateAll() error }: if err := v.ValidateAll(); err != nil { diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go index 0a6399ee2a..c877196656 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster_vtproto.pb.go @@ -1295,7 +1295,6 @@ func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } -<<<<<<< HEAD if m.DnsJitter != nil { size, err := (*durationpb.Duration)(m.DnsJitter).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -1308,8 +1307,6 @@ func (m *Cluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) { i-- dAtA[i] = 0xd2 } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(m.LrsReportEndpointMetrics) > 0 { for iNdEx := len(m.LrsReportEndpointMetrics) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.LrsReportEndpointMetrics[iNdEx]) @@ -3257,13 +3254,10 @@ func (m *Cluster) SizeVT() (n int) { n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) } } -<<<<<<< HEAD if m.DnsJitter != nil { l = (*durationpb.Duration)(m.DnsJitter).SizeVT() n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go index 20ee6a6ed4..87af0321f9 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.go @@ -197,11 +197,7 @@ func (x *QuicKeepAliveSettings) GetInitialInterval() *durationpb.Duration { } // QUIC protocol options which apply to both downstream and upstream connections. -<<<<<<< HEAD // [#next-free-field: 10] -======= -// [#next-free-field: 9] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -247,12 +243,9 @@ type QuicProtocolOptions struct { // For internal corporate network, a long timeout is often fine. // But for client facing network, 30s is usually a good choice. IdleNetworkTimeout *durationpb.Duration `protobuf:"bytes,8,opt,name=idle_network_timeout,json=idleNetworkTimeout,proto3" json:"idle_network_timeout,omitempty"` -<<<<<<< HEAD // Maximum packet length for QUIC connections. It refers to the largest size of a QUIC packet that can be transmitted over the connection. // If not specified, one of the `default values in QUICHE `_ is used. MaxPacketLength *wrapperspb.UInt64Value `protobuf:"bytes,9,opt,name=max_packet_length,json=maxPacketLength,proto3" json:"max_packet_length,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QuicProtocolOptions) Reset() { @@ -343,7 +336,6 @@ func (x *QuicProtocolOptions) GetIdleNetworkTimeout() *durationpb.Duration { return nil } -<<<<<<< HEAD func (x *QuicProtocolOptions) GetMaxPacketLength() *wrapperspb.UInt64Value { if x != nil { return x.MaxPacketLength @@ -351,8 +343,6 @@ func (x *QuicProtocolOptions) GetMaxPacketLength() *wrapperspb.UInt64Value { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type UpstreamHttpProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -545,11 +535,7 @@ func (x *AlternateProtocolsCacheOptions) GetCanonicalSuffixes() []string { return nil } -<<<<<<< HEAD // [#next-free-field: 8] -======= -// [#next-free-field: 7] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type HttpProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -579,7 +565,6 @@ type HttpProtocolOptions struct { // if there are no active streams. See :ref:`drain_timeout // `. MaxConnectionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"` -<<<<<<< HEAD // The maximum number of headers (request headers if configured on HttpConnectionManager, // response headers when configured on a cluster). // If unconfigured, the default maximum number of headers allowed is 100. @@ -603,12 +588,6 @@ type HttpProtocolOptions struct { // HTTP/2 (when using nghttp2) limits a single header to around 100kb. // HTTP/3 limits a single header to around 1024kb. MaxResponseHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_response_headers_kb,json=maxResponseHeadersKb,proto3" json:"max_response_headers_kb,omitempty"` -======= - // The maximum number of headers. If unconfigured, the default - // maximum number of request headers allowed is 100. Requests that exceed this limit will receive - // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - MaxHeadersCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. MaxStreamDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"` @@ -678,7 +657,6 @@ func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrapperspb.UInt32Value { return nil } -<<<<<<< HEAD func (x *HttpProtocolOptions) GetMaxResponseHeadersKb() *wrapperspb.UInt32Value { if x != nil { return x.MaxResponseHeadersKb @@ -686,8 +664,6 @@ func (x *HttpProtocolOptions) GetMaxResponseHeadersKb() *wrapperspb.UInt32Value return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (x *HttpProtocolOptions) GetMaxStreamDuration() *durationpb.Duration { if x != nil { return x.MaxStreamDuration @@ -1791,11 +1767,7 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, -<<<<<<< HEAD 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xbb, 0x06, 0x0a, -======= - 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xf1, 0x05, 0x0a, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, @@ -1843,7 +1815,6 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0xaa, 0x01, 0x09, 0x22, 0x03, 0x08, 0xd8, 0x04, 0x32, 0x02, 0x08, 0x01, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, -<<<<<<< HEAD 0x12, 0x48, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, @@ -2207,360 +2178,6 @@ var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{ 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, - 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, - 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x18, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, - 0x42, 0x08, 0x72, 0x06, 0xd0, 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x6f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53, 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, - 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, - 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6b, 0x65, 0x79, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, - 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, - 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a, 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xd0, - 0x01, 0x01, 0xc0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, - 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, - 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, - 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, - 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, - 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, - 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, - 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, - 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, - 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, - 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, - 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, - 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, - 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e, 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, - 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, - 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, - 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, - 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f, 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, - 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, - 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, - 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, - 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, - 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, - 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5, 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, - 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, - 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, - 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, - 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, - 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, - 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xd0, 0x0e, 0x0a, - 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, - 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, - 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, - 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, - 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, - 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, - 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, - 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, - 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, - 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, - 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, - 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, - 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, - 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, - 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, - 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, - 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, - 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, - 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, - 0x18, 0x01, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, - 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, - 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, - 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, - 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, - 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, - 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, - 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, - 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, - 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x1a, 0xe2, 0x01, 0x0a, 0x11, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x12, 0x4e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x2a, 0x06, 0x18, - 0xff, 0xff, 0x03, 0x28, 0x00, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, - 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, - 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, - 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, - 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x9b, 0x01, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x6f, - 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, - 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f, 0x4f, 0x76, - 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x10, - 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x81, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x22, 0x69, 0x6f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, - 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, - 0x72, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -2597,16 +2214,10 @@ var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{ (*Http2ProtocolOptions_SettingsParameter)(nil), // 16: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter (*durationpb.Duration)(nil), // 17: google.protobuf.Duration (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value -<<<<<<< HEAD (*wrapperspb.UInt64Value)(nil), // 19: google.protobuf.UInt64Value (*TypedExtensionConfig)(nil), // 20: envoy.config.core.v3.TypedExtensionConfig (*wrapperspb.BoolValue)(nil), // 21: google.protobuf.BoolValue (*v3.Percent)(nil), // 22: envoy.type.v3.Percent -======= - (*TypedExtensionConfig)(nil), // 19: envoy.config.core.v3.TypedExtensionConfig - (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue - (*v3.Percent)(nil), // 21: envoy.type.v3.Percent ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 17, // 0: envoy.config.core.v3.QuicKeepAliveSettings.max_interval:type_name -> google.protobuf.Duration @@ -2617,7 +2228,6 @@ var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 18, // 5: envoy.config.core.v3.QuicProtocolOptions.num_timeouts_to_trigger_port_migration:type_name -> google.protobuf.UInt32Value 2, // 6: envoy.config.core.v3.QuicProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.QuicKeepAliveSettings 17, // 7: envoy.config.core.v3.QuicProtocolOptions.idle_network_timeout:type_name -> google.protobuf.Duration -<<<<<<< HEAD 19, // 8: envoy.config.core.v3.QuicProtocolOptions.max_packet_length:type_name -> google.protobuf.UInt64Value 18, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value 20, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig @@ -2662,50 +2272,6 @@ var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{ 47, // [47:47] is the sub-list for extension type_name 47, // [47:47] is the sub-list for extension extendee 0, // [0:47] is the sub-list for field type_name -======= - 18, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value - 19, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig - 13, // 10: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry - 17, // 11: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration - 17, // 12: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration - 18, // 13: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value - 17, // 14: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration - 0, // 15: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction - 18, // 16: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value - 20, // 17: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue - 14, // 18: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat - 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 20, // 20: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue - 17, // 21: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration - 17, // 22: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration - 21, // 23: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent - 17, // 24: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration - 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value - 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value - 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value - 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value - 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value - 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value - 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value - 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value - 18, // 33: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value - 20, // 34: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 16, // 35: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter - 8, // 36: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings - 20, // 37: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue - 9, // 38: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions - 3, // 39: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions - 20, // 40: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue - 15, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords - 19, // 42: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig - 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value - 18, // 44: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value - 45, // [45:45] is the sub-list for method output_type - 45, // [45:45] is the sub-list for method input_type - 45, // [45:45] is the sub-list for extension type_name - 45, // [45:45] is the sub-list for extension extendee - 0, // [0:45] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_envoy_config_core_v3_protocol_proto_init() } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go index bb1f07eae9..1b7d8342dd 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol.pb.validate.go @@ -448,7 +448,6 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } -<<<<<<< HEAD if all { switch v := interface{}(m.GetMaxPacketLength()).(type) { case interface{ ValidateAll() error }: @@ -478,8 +477,6 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } @@ -971,7 +968,6 @@ func (m *HttpProtocolOptions) validate(all bool) error { } -<<<<<<< HEAD if wrapper := m.GetMaxResponseHeadersKb(); wrapper != nil { if val := wrapper.GetValue(); val <= 0 || val > 8192 { @@ -987,8 +983,6 @@ func (m *HttpProtocolOptions) validate(all bool) error { } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if all { switch v := interface{}(m.GetMaxStreamDuration()).(type) { case interface{ ValidateAll() error }: diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go index 129807ef95..85f4d3e4eb 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/core/v3/protocol_vtproto.pb.go @@ -137,7 +137,6 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } -<<<<<<< HEAD if m.MaxPacketLength != nil { size, err := (*wrapperspb.UInt64Value)(m.MaxPacketLength).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -148,8 +147,6 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i-- dAtA[i] = 0x4a } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.IdleNetworkTimeout != nil { size, err := (*durationpb.Duration)(m.IdleNetworkTimeout).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -443,7 +440,6 @@ func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } -<<<<<<< HEAD if m.MaxResponseHeadersKb != nil { size, err := (*wrapperspb.UInt32Value)(m.MaxResponseHeadersKb).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -454,8 +450,6 @@ func (m *HttpProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i-- dAtA[i] = 0x3a } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.MaxRequestsPerConnection != nil { size, err := (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -1351,13 +1345,10 @@ func (m *QuicProtocolOptions) SizeVT() (n int) { l = (*durationpb.Duration)(m.IdleNetworkTimeout).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } -<<<<<<< HEAD if m.MaxPacketLength != nil { l = (*wrapperspb.UInt64Value)(m.MaxPacketLength).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } @@ -1462,13 +1453,10 @@ func (m *HttpProtocolOptions) SizeVT() (n int) { l = (*wrapperspb.UInt32Value)(m.MaxRequestsPerConnection).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } -<<<<<<< HEAD if m.MaxResponseHeadersKb != nil { l = (*wrapperspb.UInt32Value)(m.MaxResponseHeadersKb).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go index 1760301f09..ea97b5987e 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.go @@ -27,11 +27,7 @@ const ( ) // Configuration specific to the UDP QUIC listener. -<<<<<<< HEAD // [#next-free-field: 14] -======= -// [#next-free-field: 12] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type QuicProtocolOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -82,7 +78,6 @@ type QuicProtocolOptions struct { // If not specified, no debug visitor will be attached to connections. // [#extension-category: envoy.quic.connection_debug_visitor] ConnectionDebugVisitorConfig *v3.TypedExtensionConfig `protobuf:"bytes,11,opt,name=connection_debug_visitor_config,json=connectionDebugVisitorConfig,proto3" json:"connection_debug_visitor_config,omitempty"` -<<<<<<< HEAD // Configure a type of UDP cmsg to pass to listener filters via QuicReceivedPacket. // Both level and type must be specified for cmsg to be saved. // Cmsg may be truncated or omitted if expected size is not set. @@ -92,8 +87,6 @@ type QuicProtocolOptions struct { // QUIC layer by replying with an empty version negotiation packet to the // client. RejectNewConnections bool `protobuf:"varint,13,opt,name=reject_new_connections,json=rejectNewConnections,proto3" json:"reject_new_connections,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *QuicProtocolOptions) Reset() { @@ -205,7 +198,6 @@ func (x *QuicProtocolOptions) GetConnectionDebugVisitorConfig() *v3.TypedExtensi return nil } -<<<<<<< HEAD func (x *QuicProtocolOptions) GetSaveCmsgConfig() []*v3.SocketCmsgHeaders { if x != nil { return x.SaveCmsgConfig @@ -220,8 +212,6 @@ func (x *QuicProtocolOptions) GetRejectNewConnections() bool { return false } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var File_envoy_config_listener_v3_quic_config_proto protoreflect.FileDescriptor var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ @@ -236,12 +226,9 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, -<<<<<<< HEAD 0x74, 0x6f, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6d, 0x73, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -253,13 +240,8 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, -<<<<<<< HEAD 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x0a, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, -======= - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf6, - 0x08, 0x0a, 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75, 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, @@ -327,7 +309,6 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x56, 0x69, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x43, -<<<<<<< HEAD 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x73, 0x61, 0x76, 0x65, 0x5f, 0x63, 0x6d, 0x73, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, @@ -351,22 +332,6 @@ var file_envoy_config_listener_v3_quic_config_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x65, 0x72, 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x8f, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, - 0x10, 0x02, 0x0a, 0x26, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x51, 0x75, 0x69, 0x63, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4a, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x3b, 0x6c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -390,10 +355,7 @@ var file_envoy_config_listener_v3_quic_config_proto_goTypes = []interface{}{ (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value (*v3.TypedExtensionConfig)(nil), // 5: envoy.config.core.v3.TypedExtensionConfig (*wrapperspb.BoolValue)(nil), // 6: google.protobuf.BoolValue -<<<<<<< HEAD (*v3.SocketCmsgHeaders)(nil), // 7: envoy.config.core.v3.SocketCmsgHeaders -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ 1, // 0: envoy.config.listener.v3.QuicProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions @@ -407,20 +369,12 @@ var file_envoy_config_listener_v3_quic_config_proto_depIdxs = []int32{ 5, // 8: envoy.config.listener.v3.QuicProtocolOptions.server_preferred_address_config:type_name -> envoy.config.core.v3.TypedExtensionConfig 6, // 9: envoy.config.listener.v3.QuicProtocolOptions.send_disable_active_migration:type_name -> google.protobuf.BoolValue 5, // 10: envoy.config.listener.v3.QuicProtocolOptions.connection_debug_visitor_config:type_name -> envoy.config.core.v3.TypedExtensionConfig -<<<<<<< HEAD 7, // 11: envoy.config.listener.v3.QuicProtocolOptions.save_cmsg_config:type_name -> envoy.config.core.v3.SocketCmsgHeaders 12, // [12:12] is the sub-list for method output_type 12, // [12:12] is the sub-list for method input_type 12, // [12:12] is the sub-list for extension type_name 12, // [12:12] is the sub-list for extension extendee 0, // [0:12] is the sub-list for field type_name -======= - 11, // [11:11] is the sub-list for method output_type - 11, // [11:11] is the sub-list for method input_type - 11, // [11:11] is the sub-list for extension type_name - 11, // [11:11] is the sub-list for extension extendee - 0, // [0:11] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_envoy_config_listener_v3_quic_config_proto_init() } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go index cc25c3418a..efd3db9c46 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config.pb.validate.go @@ -363,7 +363,6 @@ func (m *QuicProtocolOptions) validate(all bool) error { } } -<<<<<<< HEAD if len(m.GetSaveCmsgConfig()) > 1 { err := QuicProtocolOptionsValidationError{ field: "SaveCmsgConfig", @@ -411,8 +410,6 @@ func (m *QuicProtocolOptions) validate(all bool) error { // no validation rules for RejectNewConnections -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(errors) > 0 { return QuicProtocolOptionsMultiError(errors) } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go index bf1d4e0c99..7dba379882 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/listener/v3/quic_config_vtproto.pb.go @@ -51,7 +51,6 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } -<<<<<<< HEAD if m.RejectNewConnections { i-- if m.RejectNewConnections { @@ -86,8 +85,6 @@ func (m *QuicProtocolOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, er dAtA[i] = 0x62 } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.ConnectionDebugVisitorConfig != nil { if vtmsg, ok := interface{}(m.ConnectionDebugVisitorConfig).(interface { MarshalToSizedBufferVTStrict([]byte) (int, error) @@ -377,7 +374,6 @@ func (m *QuicProtocolOptions) SizeVT() (n int) { } n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } -<<<<<<< HEAD if len(m.SaveCmsgConfig) > 0 { for _, e := range m.SaveCmsgConfig { if size, ok := interface{}(e).(interface { @@ -393,8 +389,6 @@ func (m *QuicProtocolOptions) SizeVT() (n int) { if m.RejectNewConnections { n += 2 } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go index 90e524f450..300c39a128 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/route/v3/route_components.pb.go @@ -8,10 +8,6 @@ package routev3 import ( _ "github.com/cncf/xds/go/udpa/annotations" -<<<<<<< HEAD -======= - _ "github.com/cncf/xds/go/xds/annotations/v3" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v3 "github.com/cncf/xds/go/xds/type/matcher/v3" _ "github.com/envoyproxy/go-control-plane/envoy/annotations" v31 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -383,10 +379,6 @@ type VirtualHost struct { // The first route that matches will be used. // Only one of this and “matcher“ can be specified. Routes []*Route `protobuf:"bytes,3,rep,name=routes,proto3" json:"routes,omitempty"` -<<<<<<< HEAD -======= - // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The match tree to use when resolving route actions for incoming requests. Only one of this and “routes“ // can be specified. Matcher *v3.Matcher `protobuf:"bytes,21,opt,name=matcher,proto3" json:"matcher,omitempty"` @@ -6620,7 +6612,6 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, -<<<<<<< HEAD 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, @@ -8039,1426 +8030,6 @@ var file_envoy_config_route_v3_route_components_proto_rawDesc = []byte{ 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, - 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x0f, 0x0a, 0x0b, 0x56, - 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, - 0x08, 0x01, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x07, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, - 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, - 0x06, 0x02, 0x08, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x60, 0x0a, - 0x0b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, - 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x2e, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x54, 0x6c, 0x73, 0x12, - 0x50, 0x0a, 0x10, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x0f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, - 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, - 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, - 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, - 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, - 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, - 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, - 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, - 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x73, 0x0a, 0x17, - 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, - 0x74, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x21, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, - 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, - 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x16, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, - 0x1f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, - 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x1e, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x3a, 0x0a, 0x12, 0x54, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, - 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x3a, 0x25, 0x9a, 0xc5, - 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, - 0x6f, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x52, - 0x11, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x64, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0x0a, 0x09, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0xaf, 0x0b, 0x0a, 0x05, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x05, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a, - 0x0f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x60, 0x0a, 0x15, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, - 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x13, - 0x6e, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x3e, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x64, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, - 0x6d, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x54, - 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, - 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x67, - 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, - 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, - 0x01, 0x0c, 0x22, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x16, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, - 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, - 0x64, 0x12, 0x4f, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, - 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x22, 0x0a, 0x72, - 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x69, 0x6e, 0x67, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, 0x60, 0x0a, 0x1e, - 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x66, - 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x1a, 0x70, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x75, - 0x66, 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, - 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1f, - 0x9a, 0xc5, 0x88, 0x1e, 0x1a, 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x42, - 0x0d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, - 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, 0x5f, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf3, 0x0a, - 0x0a, 0x0f, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, - 0x02, 0x08, 0x01, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, - 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0b, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x4b, 0x65, 0x79, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, - 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x92, 0x08, 0x0a, 0x0d, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2d, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, - 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x24, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, - 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x45, 0x0a, - 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, - 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, - 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, - 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, - 0x01, 0x01, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, - 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, - 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, - 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, - 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x17, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x14, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, - 0x65, 0x72, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, - 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x00, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x1a, 0x5d, 0x0a, - 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x37, 0x9a, 0xc5, - 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, - 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x52, 0x11, 0x70, 0x65, 0x72, - 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x29, - 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x72, 0x61, 0x6e, - 0x64, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x16, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x52, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x22, 0xc5, 0x0a, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x18, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x4e, 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, - 0x65, 0x78, 0x12, 0x5b, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, - 0x4b, 0x0a, 0x15, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x15, - 0xfa, 0x42, 0x12, 0x72, 0x10, 0x32, 0x0e, 0x5e, 0x5b, 0x5e, 0x3f, 0x23, 0x5d, 0x2b, 0x5b, 0x5e, - 0x3f, 0x23, 0x2f, 0x5d, 0x24, 0x48, 0x00, 0x52, 0x13, 0x70, 0x61, 0x74, 0x68, 0x53, 0x65, 0x70, - 0x61, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x58, 0x0a, 0x11, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, - 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x63, 0x61, 0x73, 0x65, 0x5f, 0x73, - 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x63, 0x61, 0x73, 0x65, - 0x53, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, - 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, - 0x04, 0x67, 0x72, 0x70, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, - 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, 0x67, 0x72, 0x70, 0x63, 0x12, 0x59, 0x0a, 0x0b, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x53, 0x0a, 0x15, 0x47, 0x72, 0x70, 0x63, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc9, 0x01, - 0x0a, 0x16, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, - 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, - 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, - 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x10, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x3a, 0x24, 0x9a, 0xc5, 0x88, - 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, - 0x08, 0x03, 0x10, 0x04, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0xa8, 0x06, 0x0a, 0x0a, - 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5f, 0x0a, 0x19, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x52, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, - 0x78, 0x70, 0x6f, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a, 0x07, - 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, - 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x57, - 0x0a, 0x0e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0d, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x64, 0x6f, - 0x77, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, - 0x0d, 0x73, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x5b, - 0x0a, 0x1c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x61, 0x0a, 0x1f, 0x66, - 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x1c, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x3a, 0x24, - 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x42, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, - 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xa6, 0x2d, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x0e, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, - 0x01, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x11, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x18, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x16, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x76, 0x0a, 0x1f, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x27, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x48, 0x00, - 0x52, 0x1c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x8e, - 0x01, 0x0a, 0x1f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x1b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, - 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x45, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, - 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0d, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, - 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, - 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, - 0x5a, 0x0a, 0x13, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3f, 0x0a, 0x14, 0x68, - 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x74, 0x65, - 0x72, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, - 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x12, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x48, 0x0a, 0x11, - 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x48, 0x6f, 0x73, 0x74, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x13, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x1d, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, - 0x48, 0x01, 0x52, 0x11, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x17, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, - 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, - 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, 0x01, 0x52, 0x14, 0x68, 0x6f, 0x73, 0x74, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x35, - 0x0a, 0x17, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x78, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x58, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x69, 0x64, - 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x69, 0x64, 0x6c, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x11, 0x65, 0x61, 0x72, 0x6c, - 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x28, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0f, 0x65, 0x61, 0x72, 0x6c, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x45, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4f, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x54, 0x79, 0x70, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x0a, 0x72, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x76, 0x68, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, - 0x01, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x68, 0x52, 0x61, 0x74, 0x65, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x73, 0x68, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x11, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x72, - 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, - 0x2e, 0x30, 0x18, 0x01, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x50, 0x0a, 0x10, 0x6d, 0x61, - 0x78, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x17, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x0e, 0x6d, 0x61, - 0x78, 0x47, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x56, 0x0a, 0x13, - 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, - 0x01, 0x52, 0x11, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x59, 0x0a, 0x0f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x67, 0x0a, 0x18, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x80, 0x01, 0x0a, 0x18, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, - 0x30, 0x18, 0x01, 0x52, 0x16, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x16, 0x6d, - 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0c, - 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x68, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x64, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcb, 0x03, 0x0a, 0x13, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x19, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, - 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0xf2, 0x98, 0xfe, 0x8f, 0x05, - 0x13, 0x12, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x59, 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, - 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, - 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x12, - 0x48, 0x0a, 0x21, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x64, 0x6f, - 0x77, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x61, 0x70, - 0x70, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x68, 0x61, 0x64, 0x6f, 0x77, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x75, 0x66, - 0x66, 0x69, 0x78, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, - 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x1a, 0xd6, 0x0b, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x48, 0x00, 0x52, 0x06, - 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x79, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x14, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x67, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, - 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x0c, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x1a, 0xc6, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, - 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x53, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, - 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, - 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, - 0x5f, 0x0a, 0x0f, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x12, 0x25, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, - 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, - 0x80, 0x80, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0xfe, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x5d, 0x0a, 0x0a, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, - 0x6b, 0x69, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, - 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, - 0x65, 0x1a, 0x7a, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x3a, 0x45, 0x9a, 0xc5, 0x88, 0x1e, 0x40, 0x0a, 0x3e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, - 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x6e, 0x0a, - 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, - 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x1a, 0x66, 0x0a, - 0x0b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, - 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x61, 0x73, - 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x17, 0x0a, 0x10, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, - 0x1a, 0xa3, 0x03, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, - 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x65, 0x0a, 0x0e, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x1a, 0x8d, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x6f, 0x73, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x6f, 0x73, - 0x74, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x88, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x78, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x13, - 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x17, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, - 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x56, 0x0a, 0x1a, 0x67, 0x72, 0x70, - 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x67, 0x72, 0x70, 0x63, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x22, 0x60, 0x0a, 0x1b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x46, - 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, - 0x12, 0x17, 0x0a, 0x13, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x41, 0x56, - 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, - 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54, 0x45, - 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x16, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, - 0x1e, 0x50, 0x41, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x52, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x49, 0x4e, - 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, - 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x01, 0x1a, - 0x02, 0x18, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x18, 0x0a, 0x11, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, - 0x03, 0xf8, 0x42, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, - 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, - 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x16, 0x10, 0x17, 0x4a, 0x04, 0x08, 0x15, - 0x10, 0x16, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x6d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, - 0xbf, 0x10, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x19, 0x0a, 0x08, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4f, 0x6e, 0x12, 0x52, 0x0a, 0x0b, 0x6e, 0x75, - 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xf2, - 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x41, - 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x54, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x4a, 0x0a, 0x14, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x70, 0x65, 0x72, 0x54, - 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, - 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, - 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, - 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, - 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x48, 0x6f, - 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x12, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, - 0x64, 0x0a, 0x18, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x21, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, - 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x1d, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, - 0x34, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x14, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, - 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x0c, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x78, 0x0a, 0x1b, - 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x74, - 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f, 0x66, 0x66, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x52, 0x17, 0x72, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, - 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x51, 0x0a, 0x11, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x19, 0x72, 0x65, 0x74, - 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0d, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, - 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xc3, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, - 0x74, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, - 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd6, 0x01, - 0x0a, 0x0c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x4a, - 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x0c, 0x62, 0x61, - 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, - 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x3a, 0x32, 0x9a, 0xc5, 0x88, 0x1e, 0x2d, 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, - 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, - 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x1a, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, - 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x1a, 0xc0, 0x01, 0x0a, 0x17, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x5d, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, - 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0c, - 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x22, 0x34, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x43, - 0x4f, 0x4e, 0x44, 0x53, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x49, 0x58, 0x5f, 0x54, - 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x01, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, - 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x22, 0x9c, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x50, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, - 0x28, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x12, 0x5c, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x63, - 0x65, 0x12, 0x36, 0x0a, 0x18, 0x68, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x70, 0x65, - 0x72, 0x5f, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x65, 0x64, 0x67, 0x65, 0x4f, 0x6e, 0x50, 0x65, 0x72, 0x54, - 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, - 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, 0x64, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0xe1, 0x05, 0x0a, 0x0e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x29, 0x0a, 0x0f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, - 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x52, 0x0c, 0x68, 0x6f, 0x73, - 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6f, 0x72, - 0x74, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x32, - 0x0a, 0x0d, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, - 0x01, 0x02, 0x48, 0x01, 0x52, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, - 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x02, 0x48, 0x01, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x65, - 0x78, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x41, 0x6e, 0x64, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x65, 0x48, - 0x01, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x65, 0x78, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, - 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0a, 0x73, 0x74, 0x72, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x77, 0x0a, 0x14, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x52, - 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x4c, 0x59, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x4f, - 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x5f, 0x4f, 0x54, 0x48, - 0x45, 0x52, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x52, - 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, - 0x50, 0x45, 0x52, 0x4d, 0x41, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, - 0x43, 0x54, 0x10, 0x04, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x1a, - 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x70, 0x61, - 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, - 0x42, 0x08, 0x2a, 0x06, 0x10, 0xd8, 0x04, 0x28, 0xc8, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, - 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x91, 0x01, 0x0a, 0x09, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, 0x74, 0x65, 0x3a, 0x23, - 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x72, 0x61, - 0x74, 0x6f, 0x72, 0x22, 0xd2, 0x02, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x12, - 0x49, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, - 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x61, - 0x6e, 0x64, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, - 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x4b, 0x0a, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, - 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x52, 0x0f, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x6c, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, - 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x67, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x61, 0x67, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x54, 0x61, 0x67, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x22, 0xb4, 0x01, 0x0a, 0x0e, 0x56, 0x69, 0x72, - 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, - 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x52, 0x07, - 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, - 0xc9, 0x1c, 0x0a, 0x09, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, - 0x02, 0x18, 0x0a, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x4b, 0x0a, 0x07, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, - 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0xb5, 0x18, 0x0a, 0x06, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x12, 0x6d, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x48, - 0x00, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x68, 0x0a, - 0x12, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x77, 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x42, 0x11, 0x92, 0xc7, 0x86, 0xd8, - 0x04, 0x03, 0x33, 0x2e, 0x30, 0xb8, 0xee, 0xf2, 0xd2, 0x05, 0x01, 0x18, 0x01, 0x48, 0x00, 0x52, - 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x4e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x4a, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x71, 0x0a, 0x15, - 0x6d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x13, 0x6d, 0x61, 0x73, 0x6b, - 0x65, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x81, 0x01, 0x0a, 0x1b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x18, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x1a, 0x49, 0x0a, 0x0d, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x53, - 0x0a, 0x12, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, - 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, - 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, - 0x66, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x73, 0x6b, 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x3a, 0x39, 0x9a, 0xc5, - 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, - 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x1a, 0xbe, 0x01, 0x0a, 0x13, 0x4d, 0x61, 0x73, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x12, 0x76, 0x34, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x5f, 0x6c, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x20, 0x52, 0x0f, 0x76, - 0x34, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, 0x4c, 0x65, 0x6e, 0x12, 0x53, - 0x0a, 0x12, 0x76, 0x36, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, - 0x80, 0x01, 0x52, 0x0f, 0x76, 0x36, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x73, 0x6b, - 0x4c, 0x65, 0x6e, 0x1a, 0x9e, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4b, - 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x3a, 0x35, 0x9a, - 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x4b, 0x65, 0x79, 0x1a, 0xb3, 0x02, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, - 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x12, 0x48, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, - 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x3a, 0x3b, 0x9a, - 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, - 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xb8, 0x01, 0x0a, 0x0f, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2e, - 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x50, - 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xda, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4b, - 0x65, 0x79, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, - 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, - 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x37, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x66, 0x5f, - 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, - 0x69, 0x70, 0x49, 0x66, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x22, 0x26, 0x0a, 0x06, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, - 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x01, 0x1a, 0x97, 0x02, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, - 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x65, 0x78, - 0x70, 0x65, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x65, 0x78, - 0x70, 0x65, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x61, 0x0a, 0x10, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x2a, 0x9a, 0xc5, - 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x17, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, - 0x01, 0x1a, 0xf2, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x66, - 0x0a, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, - 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x63, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x4b, 0x65, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x42, 0x19, 0x0a, 0x12, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe6, 0x05, 0x0a, 0x0d, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x21, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, - 0x72, 0x08, 0x10, 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x2e, 0x0a, 0x0b, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x12, 0x5c, 0x0a, 0x10, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, - 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, - 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3c, - 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, - 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, - 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, - 0x0b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x0c, - 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3b, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x73, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x42, 0x12, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, - 0x01, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x49, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x0a, - 0x0c, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x12, 0x40, 0x0a, 0x1d, 0x74, 0x72, 0x65, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, - 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x5f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x74, 0x72, 0x65, 0x61, 0x74, 0x4d, 0x69, - 0x73, 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x73, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x18, 0x0a, 0x16, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, - 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x22, 0xa1, 0x02, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x1e, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, - 0x07, 0x72, 0x05, 0x10, 0x01, 0x28, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x53, - 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, - 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x72, - 0x65, 0x73, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, - 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x21, 0x0a, 0x1f, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x4a, 0x04, - 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x22, 0x86, 0x03, 0x0a, 0x16, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x52, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x40, 0x0a, 0x17, 0x72, 0x65, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, - 0x10, 0x05, 0x52, 0x15, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, - 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x65, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x18, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x70, 0x79, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x92, 0x01, 0x0c, 0x18, 0x01, - 0x22, 0x08, 0x72, 0x06, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x15, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x70, - 0x79, 0x22, 0x79, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x8b, 0x01, 0xba, - 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, - 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, - 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, - 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go index fee3bad823..02027795c3 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.go @@ -48,14 +48,8 @@ const ( // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. // Prior knowledge is allowed). HttpConnectionManager_HTTP2 HttpConnectionManager_CodecType = 2 -<<<<<<< HEAD // The connection manager will assume that the client is speaking HTTP/3. // This needs to be consistent with listener and transport socket config. -======= - // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - // to distinguish HTTP1 and HTTP2 traffic. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) HttpConnectionManager_HTTP3 HttpConnectionManager_CodecType = 3 ) @@ -399,10 +393,6 @@ type HttpConnectionManager struct { // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. Http2ProtocolOptions *v3.Http2ProtocolOptions `protobuf:"bytes,9,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"` // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. -<<<<<<< HEAD -======= - // [#not-implemented-hide:] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Http3ProtocolOptions *v3.Http3ProtocolOptions `protobuf:"bytes,44,opt,name=http3_protocol_options,json=http3ProtocolOptions,proto3" json:"http3_protocol_options,omitempty"` // An optional override that the connection manager will write to the server // header in responses. If not set, the default is “envoy“. @@ -417,7 +407,6 @@ type HttpConnectionManager struct { SchemeHeaderTransformation *v3.SchemeHeaderTransformation `protobuf:"bytes,48,opt,name=scheme_header_transformation,json=schemeHeaderTransformation,proto3" json:"scheme_header_transformation,omitempty"` // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. -<<<<<<< HEAD // The default value can be overridden by setting runtime key “envoy.reloadable_features.max_request_headers_size_kb“. // Requests that exceed this limit will receive a 431 response. // @@ -425,9 +414,6 @@ type HttpConnectionManager struct { // // HTTP/2 (when using nghttp2) limits a single header to around 100kb. // HTTP/3 limits a single header to around 1024kb. -======= - // Requests that exceed this limit will receive a 431 response. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MaxRequestHeadersKb *wrapperspb.UInt32Value `protobuf:"bytes,29,opt,name=max_request_headers_kb,json=maxRequestHeadersKb,proto3" json:"max_request_headers_kb,omitempty"` // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected @@ -599,7 +585,6 @@ type HttpConnectionManager struct { // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. -<<<<<<< HEAD // // .. warning:: // @@ -629,8 +614,6 @@ type HttpConnectionManager struct { // cidr_ranges: // address_prefix: ::1 // prefix_len: 128 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) InternalAddressConfig *HttpConnectionManager_InternalAddressConfig `protobuf:"bytes,25,opt,name=internal_address_config,json=internalAddressConfig,proto3" json:"internal_address_config,omitempty"` // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go index 8ef3ccbf93..d21286cf88 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.pb.go @@ -37,21 +37,12 @@ const ( // regardless of result. Only failed queries count toward eps. A config // parameter error_utilization_penalty controls the penalty to adjust endpoint // weights using eps and qps. The weight of a given endpoint is computed as: -<<<<<<< HEAD // “qps / (utilization + eps/qps * error_utilization_penalty)“. // // See the :ref:`load balancing architecture // overview` for more information. // // [#next-free-field: 8] -======= -// -// qps / (utilization + eps/qps * error_utilization_penalty) -// -// See the :ref:`load balancing architecture overview` for more information. -// -// [#next-free-field: 7] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ClientSideWeightedRoundRobin struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -84,14 +75,11 @@ type ClientSideWeightedRoundRobin struct { // calculated as eps/qps. Configuration is rejected if this value is negative. // Default is 1.0. ErrorUtilizationPenalty *wrapperspb.FloatValue `protobuf:"bytes,6,opt,name=error_utilization_penalty,json=errorUtilizationPenalty,proto3" json:"error_utilization_penalty,omitempty"` -<<<<<<< HEAD // By default, endpoint weight is computed based on the :ref:`application_utilization ` field reported by the endpoint. // If that field is not set, then utilization will instead be computed by taking the max of the values of the metrics specified here. // For map fields in the ORCA proto, the string will be of the form “.“. For example, the string “named_metrics.foo“ will mean to look for the key “foo“ in the ORCA :ref:`named_metrics ` field. // If none of the specified metrics are present in the load report, then :ref:`cpu_utilization ` is used instead. MetricNamesForComputingUtilization []string `protobuf:"bytes,7,rep,name=metric_names_for_computing_utilization,json=metricNamesForComputingUtilization,proto3" json:"metric_names_for_computing_utilization,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientSideWeightedRoundRobin) Reset() { @@ -168,7 +156,6 @@ func (x *ClientSideWeightedRoundRobin) GetErrorUtilizationPenalty() *wrapperspb. return nil } -<<<<<<< HEAD func (x *ClientSideWeightedRoundRobin) GetMetricNamesForComputingUtilization() []string { if x != nil { return x.MetricNamesForComputingUtilization @@ -176,8 +163,6 @@ func (x *ClientSideWeightedRoundRobin) GetMetricNamesForComputingUtilization() [ return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var File_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto protoreflect.FileDescriptor var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_robin_v3_client_side_weighted_round_robin_proto_rawDesc = []byte{ @@ -200,11 +185,7 @@ var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_rob 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, -<<<<<<< HEAD 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdb, 0x04, 0x0a, 0x1c, -======= - 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x04, 0x0a, 0x1c, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x4f, 0x0a, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, @@ -237,7 +218,6 @@ var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_rob 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x0a, 0x05, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x17, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, -<<<<<<< HEAD 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x26, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, @@ -263,28 +243,6 @@ var file_envoy_extensions_load_balancing_policies_client_side_weighted_round_rob 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x42, 0xa2, 0x02, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, - 0x0a, 0x5a, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x73, 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, - 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x21, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x64, 0x65, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, - 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x96, 0x01, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, - 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x5f, - 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, - 0x6f, 0x62, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x69, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, - 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go index cb42a3cf51..f7e73831a5 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin_vtproto.pb.go @@ -50,7 +50,6 @@ func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } -<<<<<<< HEAD if len(m.MetricNamesForComputingUtilization) > 0 { for iNdEx := len(m.MetricNamesForComputingUtilization) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.MetricNamesForComputingUtilization[iNdEx]) @@ -60,8 +59,6 @@ func (m *ClientSideWeightedRoundRobin) MarshalToSizedBufferVTStrict(dAtA []byte) dAtA[i] = 0x3a } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if m.ErrorUtilizationPenalty != nil { size, err := (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).MarshalToSizedBufferVTStrict(dAtA[:i]) if err != nil { @@ -155,15 +152,12 @@ func (m *ClientSideWeightedRoundRobin) SizeVT() (n int) { l = (*wrapperspb.FloatValue)(m.ErrorUtilizationPenalty).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } -<<<<<<< HEAD if len(m.MetricNamesForComputingUtilization) > 0 { for _, s := range m.MetricNamesForComputingUtilization { l = len(s) n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n += len(m.unknownFields) return n } diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go index 3021c194e2..f9aa84130a 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.go @@ -705,11 +705,7 @@ type CertificateProviderPluginInstance struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -<<<<<<< HEAD // Provider instance name. -======= - // Provider instance name. If not present, defaults to "default". ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Instance names should generally be defined not in terms of the underlying provider // implementation (e.g., "file_watcher") but rather in terms of the function of the @@ -1349,7 +1345,6 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x54, 0x6c, 0x73, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, -<<<<<<< HEAD 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7c, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, @@ -1497,155 +1492,6 @@ var file_envoy_extensions_transport_sockets_tls_v3_common_proto_rawDesc = []byte 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x73, 0x0a, 0x21, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0xc6, 0x02, 0x0a, 0x15, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, - 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x08, 0x73, 0x61, 0x6e, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x48, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x53, 0x61, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, - 0x00, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, - 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6f, 0x69, 0x64, 0x22, 0x60, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x41, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, - 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, - 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x50, 0x5f, 0x41, - 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, - 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x05, 0x22, 0xa9, 0x0d, 0x0a, 0x1c, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x57, 0x0a, 0x0a, 0x74, 0x72, 0x75, - 0x73, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x16, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, - 0x43, 0x61, 0x12, 0xad, 0x01, 0x0a, 0x20, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x16, 0xf2, 0x98, 0xfe, - 0x8f, 0x05, 0x10, 0x12, 0x0e, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x52, 0x1d, 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x11, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x72, 0x6f, - 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, - 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, - 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x46, 0x0a, - 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x73, 0x70, 0x6b, 0x69, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, - 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, 0x72, 0x04, 0x10, 0x2c, 0x28, 0x2c, 0x52, 0x15, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x53, 0x70, 0x6b, 0x69, 0x12, 0x46, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x92, 0x01, 0x08, 0x22, 0x06, - 0x72, 0x04, 0x10, 0x40, 0x28, 0x5f, 0x52, 0x15, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x82, 0x01, - 0x0a, 0x1d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, - 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x19, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, - 0x70, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x12, 0x68, 0x0a, 0x17, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, - 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x6b, 0x0a, 0x24, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x72, 0x6c, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3a, 0x0a, - 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x18, 0x74, 0x72, - 0x75, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5e, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x16, 0x74, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x62, - 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x38, 0x0a, 0x19, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x72, 0x6c, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6f, 0x6e, 0x6c, 0x79, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x43, 0x72, 0x6c, 0x12, 0x4f, 0x0a, 0x10, - 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x18, 0x64, 0x52, 0x0e, 0x6d, - 0x61, 0x78, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x44, 0x65, 0x70, 0x74, 0x68, 0x1a, 0x11, 0x0a, - 0x0f, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, - 0x22, 0x46, 0x0a, 0x16, 0x54, 0x72, 0x75, 0x73, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x45, - 0x52, 0x49, 0x46, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x53, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x5f, 0x55, 0x4e, 0x54, - 0x52, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, - 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, - 0x74, 0x68, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x17, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x42, 0xa8, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, - 0x37, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, - 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2e, 0x74, 0x6c, 0x73, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x56, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, - 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, - 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x3b, 0x74, 0x6c, 0x73, 0x76, 0x33, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go index 23a3be78da..6e32f5af51 100644 --- a/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go +++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3/common.pb.validate.go @@ -826,7 +826,6 @@ func (m *CertificateProviderPluginInstance) validate(all bool) error { var errors []error -<<<<<<< HEAD if utf8.RuneCountInString(m.GetInstanceName()) < 1 { err := CertificateProviderPluginInstanceValidationError{ field: "InstanceName", @@ -837,9 +836,6 @@ func (m *CertificateProviderPluginInstance) validate(all bool) error { } errors = append(errors, err) } -======= - // no validation rules for InstanceName ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // no validation rules for CertificateName diff --git a/vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go b/vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go deleted file mode 100644 index ddad5c96d5..0000000000 --- a/vendor/github.com/go-jose/go-jose/v3/cryptosigner/cryptosigner.go +++ /dev/null @@ -1,147 +0,0 @@ -/*- - * Copyright 2018 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package cryptosigner implements an OpaqueSigner that wraps a "crypto".Signer -// -// https://godoc.org/crypto#Signer -package cryptosigner - -import ( - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "encoding/asn1" - "io" - "math/big" - - "github.com/go-jose/go-jose/v3" -) - -// Opaque creates an OpaqueSigner from a "crypto".Signer -func Opaque(s crypto.Signer) jose.OpaqueSigner { - pk := &jose.JSONWebKey{ - Key: s.Public(), - } - return &cryptoSigner{signer: s, rand: rand.Reader, pk: pk} -} - -type cryptoSigner struct { - pk *jose.JSONWebKey - signer crypto.Signer - rand io.Reader -} - -func (s *cryptoSigner) Public() *jose.JSONWebKey { - return s.pk -} - -func (s *cryptoSigner) Algs() []jose.SignatureAlgorithm { - switch key := s.signer.Public().(type) { - case ed25519.PublicKey: - return []jose.SignatureAlgorithm{jose.EdDSA} - case *ecdsa.PublicKey: - switch key.Curve { - case elliptic.P256(): - return []jose.SignatureAlgorithm{jose.ES256} - case elliptic.P384(): - return []jose.SignatureAlgorithm{jose.ES384} - case elliptic.P521(): - return []jose.SignatureAlgorithm{jose.ES512} - default: - return nil - } - case *rsa.PublicKey: - return []jose.SignatureAlgorithm{jose.RS256, jose.RS384, jose.RS512, jose.PS256, jose.PS384, jose.PS512} - default: - return nil - } -} - -func (s *cryptoSigner) SignPayload(payload []byte, alg jose.SignatureAlgorithm) ([]byte, error) { - var hash crypto.Hash - switch alg { - case jose.EdDSA: - case jose.RS256, jose.PS256, jose.ES256: - hash = crypto.SHA256 - case jose.RS384, jose.PS384, jose.ES384: - hash = crypto.SHA384 - case jose.RS512, jose.PS512, jose.ES512: - hash = crypto.SHA512 - default: - return nil, jose.ErrUnsupportedAlgorithm - } - - var hashed []byte - if hash != crypto.Hash(0) { - hasher := hash.New() - if _, err := hasher.Write(payload); err != nil { - return nil, err - } - hashed = hasher.Sum(nil) - } - - var ( - out []byte - err error - ) - switch alg { - case jose.EdDSA: - out, err = s.signer.Sign(s.rand, payload, crypto.Hash(0)) - case jose.ES256, jose.ES384, jose.ES512: - var byteLen int - switch alg { - case jose.ES256: - byteLen = 32 - case jose.ES384: - byteLen = 48 - case jose.ES512: - byteLen = 66 - } - var b []byte - b, err = s.signer.Sign(s.rand, hashed, hash) - if err != nil { - return nil, err - } - - sig := struct { - R, S *big.Int - }{} - if _, err = asn1.Unmarshal(b, &sig); err != nil { - return nil, err - } - - rBytes := sig.R.Bytes() - out = make([]byte, byteLen) - copy(out[byteLen-len(rBytes):], rBytes) - - sBytes := sig.S.Bytes() - sBytesPadded := make([]byte, byteLen) - copy(sBytesPadded[byteLen-len(sBytes):], sBytes) - - out = append(out, sBytesPadded...) - case jose.RS256, jose.RS384, jose.RS512: - out, err = s.signer.Sign(s.rand, hashed, hash) - case jose.PS256, jose.PS384, jose.PS512: - out, err = s.signer.Sign(s.rand, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: hash, - }) - } - return out, err -} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go index 1cf8bb5703..365a1d0477 100644 --- a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go +++ b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go @@ -14,14 +14,10 @@ import ( ) var ( -<<<<<<< HEAD reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) reXMLComments = regexp.MustCompile(`(?s)()`) reSpaces = regexp.MustCompile(`(?s)>\s+<`) reNewlines = regexp.MustCompile(`\r*\n`) -======= - reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NL is the newline string used in XML output. NL = "\n" ) @@ -40,33 +36,19 @@ func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string if len(nestedTagsInComments) > 0 { nestedTagsInComment = nestedTagsInComments[0] } -<<<<<<< HEAD src := reSpaces.ReplaceAllString(xmls, "><") if nestedTagsInComment { src = reXMLComments.ReplaceAllStringFunc(src, func(m string) string { parts := reXMLComments.FindStringSubmatch(m) p2 := reNewlines.ReplaceAllString(parts[2], " ") -======= - reXmlComments := regexp.MustCompile(`(?s)()`) - src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") - if nestedTagsInComment { - src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) - p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return parts[1] + html.EscapeString(p2) + parts[3] }) } rf := replaceTag(prefix, indent) r := prefix + reg.ReplaceAllStringFunc(src, rf) if nestedTagsInComment { -<<<<<<< HEAD r = reXMLComments.ReplaceAllStringFunc(r, func(m string) string { parts := reXMLComments.FindStringSubmatch(m) -======= - r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return parts[1] + html.UnescapeString(parts[2]) + parts[3] }) } diff --git a/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/vendor/github.com/golangci/gofmt/gofmt/golangci.go index 054028f5e6..459e872199 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/golangci.go +++ b/vendor/github.com/golangci/gofmt/gofmt/golangci.go @@ -14,14 +14,11 @@ import ( "github.com/golangci/gofmt/gofmt/internal/diff" ) -<<<<<<< HEAD type Options struct { NeedSimplify bool RewriteRules []RewriteRule } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var parserModeMu sync.RWMutex type RewriteRule struct { @@ -81,7 +78,6 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) return diff.Diff(oldName, src, newName, res), nil } -<<<<<<< HEAD func Source(filename string, src []byte, opts Options) ([]byte, error) { fset := token.NewFileSet() @@ -108,8 +104,6 @@ func Source(filename string, src []byte, opts Options) ([]byte, error) { return format(fset, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) { for _, rewriteRule := range rewriteRules { pattern, err := parseExpression(rewriteRule.Pattern, "pattern") diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go index 7f5f9cca05..bf235bf17f 100644 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -1,10 +1,7 @@ package main import ( -<<<<<<< HEAD "cmp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "os" "runtime/debug" @@ -67,23 +64,9 @@ func createBuildInfo() commands.BuildInfo { } } -<<<<<<< HEAD revision = cmp.Or(revision, "unknown") modified = cmp.Or(modified, "?") info.Date = cmp.Or(info.Date, "(unknown)") -======= - if revision == "" { - revision = "unknown" - } - - if modified == "" { - modified = "?" - } - - if info.Date == "" { - info.Date = "(unknown)" - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) info.Commit = fmt.Sprintf("(%s, modified: %s, mod sum: %q)", revision, modified, buildInfo.Main.Sum) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go index e4efa6b7c1..5a26c75aed 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go @@ -1,31 +1,20 @@ package commands import ( -<<<<<<< HEAD "context" "encoding/json" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "errors" "fmt" "net/http" "os" "path/filepath" -<<<<<<< HEAD "strconv" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "time" hcversion "github.com/hashicorp/go-version" "github.com/pelletier/go-toml/v2" -<<<<<<< HEAD "github.com/santhosh-tekuri/jsonschema/v6" -======= - "github.com/santhosh-tekuri/jsonschema/v5" - "github.com/santhosh-tekuri/jsonschema/v5/httploader" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v3" @@ -56,13 +45,7 @@ func (c *configCommand) executeVerify(cmd *cobra.Command, _ []string) error { return fmt.Errorf("[%s] validate: %w", usedConfigFile, err) } -<<<<<<< HEAD printValidationDetail(cmd, v.DetailedOutput()) -======= - detail := v.DetailedOutput() - - printValidationDetail(cmd, &detail) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return errors.New("the configuration contains invalid elements") } @@ -117,19 +100,12 @@ func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error) } func validateConfiguration(schemaPath, targetFile string) error { -<<<<<<< HEAD compiler := jsonschema.NewCompiler() compiler.UseLoader(jsonschema.SchemeURLLoader{ "file": jsonschema.FileLoader{}, "https": newJSONSchemaHTTPLoader(), }) compiler.DefaultDraft(jsonschema.Draft7) -======= - httploader.Client = &http.Client{Timeout: 2 * time.Second} - - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft7 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) schema, err := compiler.Compile(schemaPath) if err != nil { @@ -159,7 +135,6 @@ func validateConfiguration(schemaPath, targetFile string) error { return schema.Validate(m) } -<<<<<<< HEAD func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) { if detail.Error != nil { data, _ := json.Marshal(detail.Error) @@ -167,12 +142,6 @@ func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) { cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, details) -======= -func printValidationDetail(cmd *cobra.Command, detail *jsonschema.Detailed) { - if detail.Error != "" { - cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", - strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, detail.Error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, d := range detail.Errors { @@ -213,7 +182,6 @@ func decodeTomlFile(filename string) (any, error) { return m, nil } -<<<<<<< HEAD type jsonschemaHTTPLoader struct { *http.Client @@ -244,5 +212,3 @@ func (l jsonschemaHTTPLoader) Load(url string) (any, error) { return jsonschema.UnmarshalJSON(resp.Body) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go index 44bd56e81b..9f17018b2f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go @@ -28,19 +28,11 @@ func setupLintersFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Enable only fast linters from enabled linters set (first run won't be fast)")) internal.AddHackedStringSliceP(fs, "presets", "p", -<<<<<<< HEAD formatList("Enable presets of linters:", lintersdb.AllPresets(), "Run 'golangci-lint help linters' to see them.", "This option implies option --disable-all", ), ) -======= - color.GreenString(fmt.Sprintf("Enable presets (%s) of linters.\n"+ - "Run 'golangci-lint help linters' to see them.\n"+ - "This option implies option --disable-all", - strings.Join(lintersdb.AllPresets(), "|"), - ))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fs.StringSlice("enable-only", nil, color.GreenString("Override linters configuration section to only run the specific linter(s)")) // Flags only. @@ -57,22 +49,13 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "go", "run.go", "", color.GreenString("Targeted Go version")) internal.AddHackedStringSlice(fs, "build-tags", color.GreenString("Build tags")) -<<<<<<< HEAD internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work. If <= 0, the timeout is disabled")) -======= - internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work")) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.Bool, "tests", "run.tests", true, color.GreenString("Analyze tests (*_test.go)")) internal.AddDeprecatedHackedStringSlice(fs, "skip-files", color.GreenString("Regexps of files to skip")) internal.AddDeprecatedHackedStringSlice(fs, "skip-dirs", color.GreenString("Regexps of directories to skip")) -<<<<<<< HEAD -======= - internal.AddDeprecatedFlagAndBind(v, fs, fs.Bool, "skip-dirs-use-default", "run.skip-dirs-use-default", true, - getDefaultDirectoryExcludeHelp()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const allowParallelDesc = "Allow multiple parallel golangci-lint instances running.\n" + "If false (default) - golangci-lint acquires file lock on start." @@ -85,20 +68,11 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { func setupOutputFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "out-format", "output.formats", config.OutFormatColoredLineNumber, -<<<<<<< HEAD formatList("Formats of output:", config.AllOutputFormats)) -======= - color.GreenString(fmt.Sprintf("Formats of output: %s", strings.Join(config.AllOutputFormats, "|")))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.Bool, "print-issued-lines", "output.print-issued-lines", true, color.GreenString("Print lines of code with issue")) internal.AddFlagAndBind(v, fs, fs.Bool, "print-linter-name", "output.print-linter-name", true, color.GreenString("Print linter name in issue line")) -<<<<<<< HEAD -======= - internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "output.uniq-by-line", true, - color.GreenString("Make issues output unique by line")) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.Bool, "sort-results", "output.sort-results", false, color.GreenString("Sort linter results")) internal.AddFlagAndBind(v, fs, fs.StringSlice, "sort-order", "output.sort-order", nil, @@ -120,20 +94,13 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Maximum issues count per one linter. Set to 0 to disable")) internal.AddFlagAndBind(v, fs, fs.Int, "max-same-issues", "issues.max-same-issues", 3, color.GreenString("Maximum count of issues with the same text. Set to 0 to disable")) -<<<<<<< HEAD internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "issues.uniq-by-line", true, color.GreenString("Make issues output unique by line")) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddHackedStringSlice(fs, "exclude-files", color.GreenString("Regexps of files to exclude")) internal.AddHackedStringSlice(fs, "exclude-dirs", color.GreenString("Regexps of directories to exclude")) internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-dirs-use-default", "issues.exclude-dirs-use-default", true, -<<<<<<< HEAD formatList("Use or not use default excluded directories:", processors.StdExcludeDirRegexps)) -======= - getDefaultDirectoryExcludeHelp()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) internal.AddFlagAndBind(v, fs, fs.String, "exclude-generated", "issues.exclude-generated", processors.AutogeneratedModeLax, color.GreenString("Mode of the generated files analysis")) @@ -155,7 +122,6 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Fix found issues (if it's supported by the linter)")) } -<<<<<<< HEAD func formatList(head string, items []string, foot ...string) string { parts := []string{color.GreenString(head)} for _, p := range items { @@ -173,8 +139,6 @@ func formatList(head string, items []string, foot ...string) string { return strings.Join(parts, "\n") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func getDefaultIssueExcludeHelp() string { parts := []string{color.GreenString("Use or not use default excludes:")} @@ -187,15 +151,3 @@ func getDefaultIssueExcludeHelp() string { return strings.Join(parts, "\n") } -<<<<<<< HEAD -======= - -func getDefaultDirectoryExcludeHelp() string { - parts := []string{color.GreenString("Use or not use default excluded directories:")} - for _, dir := range processors.StdExcludeDirRegexps { - parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) - } - parts = append(parts, "") - return strings.Join(parts, "\n") -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go index 4a3fb56737..02a586f4c1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -1,19 +1,13 @@ package commands import ( -<<<<<<< HEAD "encoding/json" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "slices" "sort" "strings" -<<<<<<< HEAD "unicode" "unicode/utf8" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/fatih/color" "github.com/spf13/cobra" @@ -24,7 +18,6 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) -<<<<<<< HEAD type linterHelp struct { Name string `json:"name"` Desc string `json:"description"` @@ -46,11 +39,6 @@ type helpCommand struct { opts helpOptions -======= -type helpCommand struct { - cmd *cobra.Command - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dbManager *lintersdb.Manager log logutils.Log @@ -68,7 +56,6 @@ func newHelpCommand(logger logutils.Log) *helpCommand { }, } -<<<<<<< HEAD lintersCmd := &cobra.Command{ Use: "linters", Short: "Help about linters", @@ -84,18 +71,6 @@ func newHelpCommand(logger logutils.Log) *helpCommand { fs.SortFlags = false // sort them as they are defined here fs.BoolVar(&c.opts.JSON, "json", false, color.GreenString("Display as JSON")) -======= - helpCmd.AddCommand( - &cobra.Command{ - Use: "linters", - Short: "Help about linters", - Args: cobra.NoArgs, - ValidArgsFunction: cobra.NoFileCompletions, - Run: c.execute, - PreRunE: c.preRunE, - }, - ) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.cmd = helpCmd @@ -115,7 +90,6 @@ func (c *helpCommand) preRunE(_ *cobra.Command, _ []string) error { return nil } -<<<<<<< HEAD func (c *helpCommand) execute(_ *cobra.Command, _ []string) error { if c.opts.JSON { return c.printJSON() @@ -151,9 +125,6 @@ func (c *helpCommand) printJSON() error { } func (c *helpCommand) print() { -======= -func (c *helpCommand) execute(_ *cobra.Command, _ []string) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var enabledLCs, disabledLCs []*linter.Config for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { if lc.Internal { @@ -213,23 +184,13 @@ func printLinters(lcs []*linter.Config) { }) for _, lc := range lcs { -<<<<<<< HEAD desc := formatDescription(lc.Linter.Desc()) -======= - // If the linter description spans multiple lines, truncate everything following the first newline - linterDescription := lc.Linter.Desc() - firstNewline := strings.IndexRune(linterDescription, '\n') - if firstNewline > 0 { - linterDescription = linterDescription[:firstNewline] - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) deprecatedMark := "" if lc.IsDeprecated() { deprecatedMark = " [" + color.RedString("deprecated") + "]" } -<<<<<<< HEAD var capabilities []string if !lc.IsSlowLinter() { capabilities = append(capabilities, color.BlueString("fast")) @@ -266,9 +227,3 @@ func formatDescription(desc string) string { return string(rawDesc) } -======= - _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", - color.YellowString(lc.Name()), deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) - } -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index 72bbb4a7ba..d9aa7578cd 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -238,31 +238,21 @@ func (c *runCommand) execute(_ *cobra.Command, args []string) { needTrackResources := logutils.IsVerbose() || c.opts.PrintResourcesUsage trackResourcesEndCh := make(chan struct{}) -<<<<<<< HEAD // Note: this defer must be before ctx.cancel defer defer func() { // wait until resource tracking finished to print properly if needTrackResources { -======= - defer func() { // XXX: this defer must be before ctx.cancel defer - if needTrackResources { // wait until resource tracking finished to print properly ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) <-trackResourcesEndCh } }() -<<<<<<< HEAD ctx := context.Background() if c.cfg.Run.Timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, c.cfg.Run.Timeout) defer cancel() } -======= - ctx, cancel := context.WithTimeout(context.Background(), c.cfg.Run.Timeout) - defer cancel() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if needTrackResources { go watchResources(ctx, trackResourcesEndCh, c.log, c.debugf) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 571e8e304a..579eddf594 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -1,7 +1,6 @@ package config import ( -<<<<<<< HEAD "cmp" "fmt" "os" @@ -12,13 +11,6 @@ import ( hcversion "github.com/hashicorp/go-version" "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" -======= - "os" - "strings" - - hcversion "github.com/hashicorp/go-version" - "github.com/ldez/gomoddirectives" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Config encapsulates the config data specified in the golangci-lint YAML config file. @@ -93,16 +85,7 @@ func detectGoVersion() string { return goVersion } -<<<<<<< HEAD return cmp.Or(os.Getenv("GOVERSION"), "1.17") -======= - v := os.Getenv("GOVERSION") - if v != "" { - return v - } - - return "1.17" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // detectGoVersionFromGoMod tries to get Go version from go.mod. @@ -110,7 +93,6 @@ func detectGoVersion() string { // else it returns `go` version if present, // else it returns empty. func detectGoVersionFromGoMod() string { -<<<<<<< HEAD modPath, err := gomod.GetGoModPath() if err != nil { modPath = detectGoModFallback() @@ -121,10 +103,6 @@ func detectGoVersionFromGoMod() string { file, err := parseGoMod(modPath) if err != nil { -======= - file, _ := gomoddirectives.GetModuleFile() - if file == nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "" } @@ -140,7 +118,6 @@ func detectGoVersionFromGoMod() string { return "" } -<<<<<<< HEAD func parseGoMod(goMod string) (*modfile.File, error) { raw, err := os.ReadFile(filepath.Clean(goMod)) @@ -179,5 +156,3 @@ func detectGoModFallback() string { return goMod.GoMod } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go index 5cfba4ce6e..081b87624d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -117,14 +117,9 @@ type Issues struct { UseDefaultExcludeDirs bool `mapstructure:"exclude-dirs-use-default"` -<<<<<<< HEAD MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` MaxSameIssues int `mapstructure:"max-same-issues"` UniqByLine bool `mapstructure:"uniq-by-line"` -======= - MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` - MaxSameIssues int `mapstructure:"max-same-issues"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) DiffFromRevision string `mapstructure:"new-from-rev"` DiffPatchFilePath string `mapstructure:"new-from-patch"` @@ -133,11 +128,7 @@ type Issues struct { NeedFix bool `mapstructure:"fix"` -<<<<<<< HEAD ExcludeGeneratedStrict *bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. -======= - ExcludeGeneratedStrict bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (i *Issues) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go index a91b293de8..8e6c184ca4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -105,10 +105,7 @@ var defaultLintersSettings = LintersSettings{ Kitlog: true, Klog: true, Logr: true, -<<<<<<< HEAD Slog: true, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Zap: true, RequireStringKey: false, NoPrintfLike: false, @@ -172,10 +169,6 @@ var defaultLintersSettings = LintersSettings{ Unused: UnusedSettings{ FieldWritesAreUses: true, PostStatementsAreReads: false, -<<<<<<< HEAD -======= - ExportedIsUsed: true, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExportedFieldsAreUsed: true, ParametersAreUsed: true, LocalVariablesAreUsed: true, @@ -185,7 +178,6 @@ var defaultLintersSettings = LintersSettings{ HTTPMethod: true, HTTPStatusCode: true, }, -<<<<<<< HEAD UseTesting: UseTestingSettings{ ContextBackground: true, ContextTodo: true, @@ -195,8 +187,6 @@ var defaultLintersSettings = LintersSettings{ OSTempDir: false, OSCreateTemp: true, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Varnamelen: VarnamelenSettings{ MaxDistance: 5, MinNameLength: 3, @@ -280,10 +270,7 @@ type LintersSettings struct { Promlinter PromlinterSettings ProtoGetter ProtoGetterSettings Reassign ReassignSettings -<<<<<<< HEAD Recvcheck RecvcheckSettings -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Revive ReviveSettings RowsErrCheck RowsErrCheckSettings SlogLint SlogLintSettings @@ -300,10 +287,7 @@ type LintersSettings struct { Unparam UnparamSettings Unused UnusedSettings UseStdlibVars UseStdlibVarsSettings -<<<<<<< HEAD UseTesting UseTestingSettings -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Varnamelen VarnamelenSettings Whitespace WhitespaceSettings Wrapcheck WrapcheckSettings @@ -345,15 +329,10 @@ type BiDiChkSettings struct { } type CopyLoopVarSettings struct { -<<<<<<< HEAD CheckAlias bool `mapstructure:"check-alias"` // Deprecated: use CheckAlias IgnoreAlias *bool `mapstructure:"ignore-alias"` -======= - IgnoreAlias bool `mapstructure:"ignore-alias"` // Deprecated: use CheckAlias - CheckAlias bool `mapstructure:"check-alias"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type Cyclop struct { @@ -500,19 +479,12 @@ type FunlenSettings struct { } type GciSettings struct { -<<<<<<< HEAD Sections []string `mapstructure:"sections"` NoInlineComments bool `mapstructure:"no-inline-comments"` NoPrefixComments bool `mapstructure:"no-prefix-comments"` SkipGenerated bool `mapstructure:"skip-generated"` CustomOrder bool `mapstructure:"custom-order"` NoLexOrder bool `mapstructure:"no-lex-order"` -======= - Sections []string `mapstructure:"sections"` - SkipGenerated bool `mapstructure:"skip-generated"` - CustomOrder bool `mapstructure:"custom-order"` - NoLexOrder bool `mapstructure:"no-lex-order"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated: use Sections instead. LocalPrefixes string `mapstructure:"local-prefixes"` @@ -535,10 +507,7 @@ type GinkgoLinterSettings struct { type GoChecksumTypeSettings struct { DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` -<<<<<<< HEAD IncludeSharedInterfaces bool `mapstructure:"include-shared-interfaces"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type GocognitSettings struct { @@ -581,11 +550,7 @@ type GodotSettings struct { Period bool `mapstructure:"period"` // Deprecated: use Scope instead -<<<<<<< HEAD CheckAll *bool `mapstructure:"check-all"` -======= - CheckAll bool `mapstructure:"check-all"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type GodoxSettings struct { @@ -625,14 +590,11 @@ type GoModDirectivesSettings struct { ReplaceLocal bool `mapstructure:"replace-local"` ExcludeForbidden bool `mapstructure:"exclude-forbidden"` RetractAllowNoExplanation bool `mapstructure:"retract-allow-no-explanation"` -<<<<<<< HEAD ToolchainForbidden bool `mapstructure:"toolchain-forbidden"` ToolchainPattern string `mapstructure:"toolchain-pattern"` ToolForbidden bool `mapstructure:"tool-forbidden"` GoDebugForbidden bool `mapstructure:"go-debug-forbidden"` GoVersionPattern string `mapstructure:"go-version-pattern"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type GoModGuardSettings struct { @@ -681,11 +643,7 @@ type GovetSettings struct { Settings map[string]map[string]any // Deprecated: the linter should be enabled inside Enable. -<<<<<<< HEAD CheckShadowing *bool `mapstructure:"check-shadowing"` -======= - CheckShadowing bool `mapstructure:"check-shadowing"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (cfg *GovetSettings) Validate() error { @@ -750,10 +708,7 @@ type LoggerCheckSettings struct { Kitlog bool `mapstructure:"kitlog"` Klog bool `mapstructure:"klog"` Logr bool `mapstructure:"logr"` -<<<<<<< HEAD Slog bool `mapstructure:"slog"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Zap bool `mapstructure:"zap"` RequireStringKey bool `mapstructure:"require-string-key"` NoPrintfLike bool `mapstructure:"no-printf-like"` @@ -865,14 +820,11 @@ type ReassignSettings struct { Patterns []string `mapstructure:"patterns"` } -<<<<<<< HEAD type RecvcheckSettings struct { DisableBuiltin bool `mapstructure:"disable-builtin"` Exclusions []string `mapstructure:"exclusions"` } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ReviveSettings struct { Go string `mapstructure:"-"` MaxOpenFiles int `mapstructure:"max-open-files"` @@ -912,11 +864,7 @@ type SlogLintSettings struct { ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"` // Deprecated: use Context instead. -<<<<<<< HEAD ContextOnly *bool `mapstructure:"context-only"` -======= - ContextOnly bool `mapstructure:"context-only"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type SpancheckSettings struct { @@ -947,7 +895,6 @@ type TagAlignSettings struct { } type TagliatelleSettings struct { -<<<<<<< HEAD Case TagliatelleCase } @@ -973,12 +920,6 @@ type TagliatelleExtendedRule struct { Case string ExtraInitialisms bool InitialismOverrides map[string]bool -======= - Case struct { - Rules map[string]string - UseFieldName bool `mapstructure:"use-field-name"` - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type TestifylintSettings struct { @@ -1043,7 +984,6 @@ type UseStdlibVarsSettings struct { TimeLayout bool `mapstructure:"time-layout"` CryptoHash bool `mapstructure:"crypto-hash"` DefaultRPCPath bool `mapstructure:"default-rpc-path"` -<<<<<<< HEAD SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` ConstantKind bool `mapstructure:"constant-kind"` @@ -1062,13 +1002,6 @@ type UseTestingSettings struct { OSSetenv bool `mapstructure:"os-setenv"` OSTempDir bool `mapstructure:"os-temp-dir"` OSCreateTemp bool `mapstructure:"os-create-temp"` -======= - OSDevNull bool `mapstructure:"os-dev-null"` // Deprecated - SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` - TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` - ConstantKind bool `mapstructure:"constant-kind"` - SyslogPriority bool `mapstructure:"syslog-priority"` // Deprecated ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type UnconvertSettings struct { @@ -1084,20 +1017,13 @@ type UnparamSettings struct { type UnusedSettings struct { FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"` PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"` -<<<<<<< HEAD -======= - ExportedIsUsed bool `mapstructure:"exported-is-used"` // Deprecated ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"` ParametersAreUsed bool `mapstructure:"parameters-are-used"` LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"` GeneratedIsUsed bool `mapstructure:"generated-is-used"` -<<<<<<< HEAD // Deprecated ExportedIsUsed *bool `mapstructure:"exported-is-used"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type VarnamelenSettings struct { @@ -1119,10 +1045,7 @@ type WhitespaceSettings struct { } type WrapcheckSettings struct { -<<<<<<< HEAD ExtraIgnoreSigs []string `mapstructure:"extra-ignore-sigs"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(ldez): v2 the options must be renamed to use hyphen. IgnoreSigs []string `mapstructure:"ignoreSigs"` IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"` diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go index 4697a177f5..56f57d9d5d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go @@ -1,10 +1,7 @@ package config import ( -<<<<<<< HEAD "cmp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "errors" "fmt" "os" @@ -296,13 +293,7 @@ func (l *Loader) handleGoVersion() { l.cfg.LintersSettings.ParallelTest.Go = l.cfg.Run.Go -<<<<<<< HEAD l.cfg.LintersSettings.Gofumpt.LangVersion = cmp.Or(l.cfg.LintersSettings.Gofumpt.LangVersion, l.cfg.Run.Go) -======= - if l.cfg.LintersSettings.Gofumpt.LangVersion == "" { - l.cfg.LintersSettings.Gofumpt.LangVersion = l.cfg.Run.Go - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) trimmedGoVersion := goutil.TrimGoVersion(l.cfg.Run.Go) @@ -330,7 +321,6 @@ func (l *Loader) handleDeprecation() error { l.cfg.Issues.ExcludeDirs = l.cfg.Run.SkipDirs } -<<<<<<< HEAD // Deprecated since v1.57.0 if l.cfg.Run.UseDefaultSkipDirs != nil { l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") @@ -348,21 +338,6 @@ func (l *Loader) handleDeprecation() error { l.log.Warnf("The configuration option `output.uniq-by-line` is deprecated, please use `issues.uniq-by-line`") l.cfg.Issues.UniqByLine = *l.cfg.Output.UniqByLine } -======= - // The 2 options are true by default. - // Deprecated since v1.57.0 - if !l.cfg.Run.UseDefaultSkipDirs { - l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") - } - l.cfg.Issues.UseDefaultExcludeDirs = l.cfg.Run.UseDefaultSkipDirs && l.cfg.Issues.UseDefaultExcludeDirs - - // The 2 options are false by default. - // Deprecated since v1.57.0 - if l.cfg.Run.ShowStats { - l.log.Warnf("The configuration option `run.show-stats` is deprecated, please use `output.show-stats`") - } - l.cfg.Output.ShowStats = l.cfg.Run.ShowStats || l.cfg.Output.ShowStats ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated since v1.57.0 if l.cfg.Output.Format != "" { @@ -385,17 +360,11 @@ func (l *Loader) handleDeprecation() error { } // Deprecated since v1.59.0 -<<<<<<< HEAD if l.cfg.Issues.ExcludeGeneratedStrict != nil { l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`") if !*l.cfg.Issues.ExcludeGeneratedStrict { l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. } -======= - if l.cfg.Issues.ExcludeGeneratedStrict { - l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`") - l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } l.handleLinterOptionDeprecations() @@ -403,27 +372,15 @@ func (l *Loader) handleDeprecation() error { return nil } -<<<<<<< HEAD func (l *Loader) handleLinterOptionDeprecations() { // Deprecated since v1.57.0, // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). if l.cfg.LintersSettings.Govet.CheckShadowing != nil { -======= -//nolint:gocyclo // the complexity cannot be reduced. -func (l *Loader) handleLinterOptionDeprecations() { - // Deprecated since v1.57.0, - // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). - if l.cfg.LintersSettings.Govet.CheckShadowing { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.govet.check-shadowing` is deprecated. " + "Please enable `shadow` instead, if you are not using `enable-all`.") } -<<<<<<< HEAD if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias != nil { -======= - if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.copyloopvar.ignore-alias` is deprecated and ignored," + "please use `linters.copyloopvar.check-alias`.") } @@ -445,11 +402,7 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.33.0. -<<<<<<< HEAD if l.cfg.LintersSettings.Godot.CheckAll != nil { -======= - if l.cfg.LintersSettings.Godot.CheckAll { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.godot.check-all` is deprecated, please use `linters.godot.scope: all`.") } @@ -474,16 +427,11 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.60.0 -<<<<<<< HEAD if l.cfg.LintersSettings.Unused.ExportedIsUsed != nil { -======= - if !l.cfg.LintersSettings.Unused.ExportedIsUsed { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.unused.exported-is-used` is deprecated.") } // Deprecated since v1.58.0 -<<<<<<< HEAD if l.cfg.LintersSettings.SlogLint.ContextOnly != nil { l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.") l.cfg.LintersSettings.SlogLint.Context = cmp.Or(l.cfg.LintersSettings.SlogLint.Context, "all") @@ -491,26 +439,11 @@ func (l *Loader) handleLinterOptionDeprecations() { // Deprecated since v1.51.0 if l.cfg.LintersSettings.UseStdlibVars.OSDevNull != nil { -======= - if l.cfg.LintersSettings.SlogLint.ContextOnly { - l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.") - if l.cfg.LintersSettings.SlogLint.Context == "" { - l.cfg.LintersSettings.SlogLint.Context = "all" - } - } - - // Deprecated since v1.51.0 - if l.cfg.LintersSettings.UseStdlibVars.OSDevNull { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.usestdlibvars.os-dev-null` is deprecated.") } // Deprecated since v1.51.0 -<<<<<<< HEAD if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority != nil { -======= - if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.log.Warnf("The configuration option `linters.usestdlibvars.syslog-priority` is deprecated.") } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go index 8cfc95c59a..aaa5183ec4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -43,10 +43,6 @@ type Output struct { Formats OutputFormats `mapstructure:"formats"` PrintIssuedLine bool `mapstructure:"print-issued-lines"` PrintLinterName bool `mapstructure:"print-linter-name"` -<<<<<<< HEAD -======= - UniqByLine bool `mapstructure:"uniq-by-line"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) SortResults bool `mapstructure:"sort-results"` SortOrder []string `mapstructure:"sort-order"` PathPrefix string `mapstructure:"path-prefix"` @@ -54,12 +50,9 @@ type Output struct { // Deprecated: use Formats instead. Format string `mapstructure:"format"` -<<<<<<< HEAD // Deprecated: use [Issues.UniqByLine] instead. UniqByLine *bool `mapstructure:"uniq-by-line"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (o *Output) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go index c0b648e0e3..784e8c2fad 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -29,17 +29,10 @@ type Run struct { // Deprecated: use Issues.ExcludeDirs instead. SkipDirs []string `mapstructure:"skip-dirs"` // Deprecated: use Issues.UseDefaultExcludeDirs instead. -<<<<<<< HEAD UseDefaultSkipDirs *bool `mapstructure:"skip-dirs-use-default"` // Deprecated: use Output.ShowStats instead. ShowStats *bool `mapstructure:"show-stats"` -======= - UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` - - // Deprecated: use Output.ShowStats instead. - ShowStats bool `mapstructure:"show-stats"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *Run) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go index 3a7415d8c4..91d12e5cd3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -61,11 +61,7 @@ func EvalSymlinks(path string) (string, error) { } var er evalSymlinkRes -<<<<<<< HEAD er.path, er.err = evalSymlinks(path) -======= - er.path, er.err = filepath.EvalSymlinks(path) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) evalSymlinkCache.Store(path, er) return er.path, er.err diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go index 7db8c589f9..854e7d15f0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go @@ -26,11 +26,7 @@ type EncodingIssue struct { Severity string Pos token.Position LineRange *result.Range -<<<<<<< HEAD SuggestedFixes []analysis.SuggestedFix -======= - Replacement *result.Replacement ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExpectNoLint bool ExpectedNoLintLinter string } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go index 75e33dab38..3a8652486c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go @@ -42,10 +42,7 @@ type Diagnostic struct { Analyzer *analysis.Analyzer Position token.Position Pkg *packages.Package -<<<<<<< HEAD File *token.File -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type runner struct { @@ -125,15 +122,9 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, } act = actAlloc.alloc() -<<<<<<< HEAD act.Analyzer = a act.Package = pkg act.runner = r -======= - act.a = a - act.pkg = pkg - act.r = r ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) act.isInitialPkg = initialPkgs[pkg] act.needAnalyzeSource = initialPkgs[pkg] act.analysisDoneCh = make(chan struct{}) @@ -142,19 +133,11 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, if len(a.FactTypes) > 0 { depsCount += len(pkg.Imports) } -<<<<<<< HEAD act.Deps = make([]*action, 0, depsCount) // Add a dependency on each required analyzers. for _, req := range a.Requires { act.Deps = append(act.Deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) -======= - act.deps = make([]*action, 0, depsCount) - - // Add a dependency on each required analyzers. - for _, req := range a.Requires { - act.deps = append(act.deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } r.buildActionFactDeps(act, a, pkg, initialPkgs, actions, actAlloc) @@ -180,11 +163,7 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac sort.Strings(paths) // for determinism for _, path := range paths { dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) -<<<<<<< HEAD act.Deps = append(act.Deps, dep) -======= - act.deps = append(act.deps, dep) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Need to register fact types for pkgcache proper gob encoding. @@ -225,11 +204,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, for _, a := range analyzers { for _, pkg := range pkgs { root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc) -<<<<<<< HEAD root.IsRoot = true -======= - root.isroot = true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) roots = append(roots, root) } } @@ -246,11 +221,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze actionPerPkg := map[*packages.Package][]*action{} for _, act := range actions { -<<<<<<< HEAD actionPerPkg[act.Package] = append(actionPerPkg[act.Package], act) -======= - actionPerPkg[act.pkg] = append(actionPerPkg[act.pkg], act) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Fill Imports field. @@ -280,11 +251,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze } } for _, act := range actions { -<<<<<<< HEAD dfs(act.Package) -======= - dfs(act.pkg) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Limit memory and IO usage. @@ -316,11 +283,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err for _, act := range actions { if !extracted[act] { extracted[act] = true -<<<<<<< HEAD visitAll(act.Deps) -======= - visitAll(act.deps) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) extract(act) } } @@ -337,7 +300,6 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err seen := make(map[key]bool) extract = func(act *action) { -<<<<<<< HEAD if act.Err != nil { if pe, ok := act.Err.(*errorutil.PanicError); ok { panic(pe) @@ -355,41 +317,17 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err file := act.Package.Fset.File(diag.Pos) k := key{Position: position, Analyzer: act.Analyzer, message: diag.Message} -======= - if act.err != nil { - if pe, ok := act.err.(*errorutil.PanicError); ok { - panic(pe) - } - retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err)) - return - } - - if act.isroot { - for _, diag := range act.diagnostics { - // We don't display a.Name/f.Category - // as most users don't care. - - posn := act.pkg.Fset.Position(diag.Pos) - k := key{posn, act.a, diag.Message} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if seen[k] { continue // duplicate } seen[k] = true retDiag := Diagnostic{ -<<<<<<< HEAD File: file, Diagnostic: diag, Analyzer: act.Analyzer, Position: position, Pkg: act.Package, -======= - Diagnostic: diag, - Analyzer: act.a, - Position: posn, - Pkg: act.pkg, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } retDiags = append(retDiags, retDiag) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go index ed488e2e40..2e1c414228 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go @@ -29,13 +29,8 @@ func (actAlloc *actionAllocator) alloc() *action { } func (act *action) waitUntilDependingAnalyzersWorked() { -<<<<<<< HEAD for _, dep := range act.Deps { if dep.Package == act.Package { -======= - for _, dep := range act.deps { - if dep.pkg == act.pkg { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) <-dep.analysisDoneCh } } @@ -44,7 +39,6 @@ func (act *action) waitUntilDependingAnalyzersWorked() { func (act *action) analyzeSafe() { defer func() { if p := recover(); p != nil { -<<<<<<< HEAD if !act.IsRoot { // This line allows to display "hidden" panic with analyzers like buildssa. // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, @@ -58,33 +52,13 @@ func (act *action) analyzeSafe() { }() act.runner.sw.TrackStage(act.Analyzer.Name, act.analyze) -======= - if !act.isroot { - // This line allows to display "hidden" panic with analyzers like buildssa. - // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, - // this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA"). - act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack())) - } - - act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", - act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) - } - }() - - act.r.sw.TrackStage(act.a.Name, act.analyze) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (act *action) markDepsForAnalyzingSource() { // Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing // this action. -<<<<<<< HEAD for _, dep := range act.Deps { if dep.Package == act.Package { -======= - for _, dep := range act.deps { - if dep.pkg == act.pkg { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Analyze source only for horizontal dependencies, e.g. from "buildssa". dep.needAnalyzeSource = true // can't be set in parallel } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go index 7333cfc207..e06ea2979c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go @@ -26,11 +26,7 @@ func (act *action) loadCachedFacts() bool { return true // load cached facts only for non-initial packages } -<<<<<<< HEAD if len(act.Analyzer.FactTypes) == 0 { -======= - if len(act.a.FactTypes) == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true // no need to load facts } @@ -42,11 +38,7 @@ func (act *action) loadCachedFacts() bool { } func (act *action) persistFactsToCache() error { -<<<<<<< HEAD analyzer := act.Analyzer -======= - analyzer := act.a ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(analyzer.FactTypes) == 0 { return nil } @@ -54,11 +46,7 @@ func (act *action) persistFactsToCache() error { // Merge new facts into the package and persist them. var facts []Fact for key, fact := range act.packageFacts { -<<<<<<< HEAD if key.pkg != act.Package.Types { -======= - if key.pkg != act.pkg.Types { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The fact is from inherited facts from another package continue } @@ -69,11 +57,7 @@ func (act *action) persistFactsToCache() error { } for key, fact := range act.objectFacts { obj := key.obj -<<<<<<< HEAD if obj.Pkg() != act.Package.Types { -======= - if obj.Pkg() != act.pkg.Types { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The fact is from inherited facts from another package continue } @@ -90,21 +74,14 @@ func (act *action) persistFactsToCache() error { }) } -<<<<<<< HEAD factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name) return act.runner.pkgCache.Put(act.Package, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) -======= - factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) - - return act.r.pkgCache.Put(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (act *action) loadPersistedFacts() bool { var facts []Fact -<<<<<<< HEAD err := act.runner.pkgCache.Get(act.Package, cache.HashModeNeedAllDeps, factCacheKey(act.Analyzer), &facts) if err != nil { if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) { @@ -124,27 +101,6 @@ func (act *action) loadPersistedFacts() bool { continue } obj, err := objectpath.Object(act.Package.Types, objectpath.Path(f.Path)) -======= - err := act.r.pkgCache.Get(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(act.a), &facts) - if err != nil { - if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) { - act.r.log.Warnf("Failed to get persisted facts: %s", err) - } - - factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) - return false - } - - factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) - - for _, f := range facts { - if f.Path == "" { // this is a package fact - key := packageFactKey{act.pkg.Types, act.factType(f.Fact)} - act.packageFacts[key] = f.Fact - continue - } - obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { // Be lenient about these errors. For example, when // analyzing io/ioutil from source, we may get a fact diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go deleted file mode 100644 index d868f8f5da..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Partial copy of https://github.com/golang/tools/blob/dba5486c2a1d03519930812112b23ed2c45c04fc/go/analysis/internal/checker/checker.go - -package goanalysis - -import ( - "bytes" - "encoding/gob" - "errors" - "fmt" - "go/types" - "reflect" - "time" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - - "github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors" -) - -// NOTE(ldez) altered: custom fields; remove 'once' and 'duration'. -// An action represents one unit of analysis work: the application of -// one analysis to one package. Actions form a DAG, both within a -// package (as different analyzers are applied, either in sequence or -// parallel), and across packages (as dependencies are analyzed). -type action struct { - a *analysis.Analyzer - pkg *packages.Package - pass *analysis.Pass - isroot bool - deps []*action - objectFacts map[objectFactKey]analysis.Fact - packageFacts map[packageFactKey]analysis.Fact - result any - diagnostics []analysis.Diagnostic - err error - - // NOTE(ldez) custom fields. - r *runner - analysisDoneCh chan struct{} - loadCachedFactsDone bool - loadCachedFactsOk bool - isInitialPkg bool - needAnalyzeSource bool -} - -// NOTE(ldez) no alteration. -type objectFactKey struct { - obj types.Object - typ reflect.Type -} - -// NOTE(ldez) no alteration. -type packageFactKey struct { - pkg *types.Package - typ reflect.Type -} - -// NOTE(ldez) no alteration. -func (act *action) String() string { - return fmt.Sprintf("%s@%s", act.a, act.pkg) -} - -// NOTE(ldez) altered version of `func (act *action) execOnce()`. -func (act *action) analyze() { - defer close(act.analysisDoneCh) // unblock actions depending on this action - - if !act.needAnalyzeSource { - return - } - - defer func(now time.Time) { - analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) - }(time.Now()) - - // Report an error if any dependency failures. - var depErrors error - for _, dep := range act.deps { - if dep.err != nil { - depErrors = errors.Join(depErrors, errors.Unwrap(dep.err)) - } - } - - if depErrors != nil { - act.err = fmt.Errorf("failed prerequisites: %w", depErrors) - return - } - - // Plumb the output values of the dependencies - // into the inputs of this action. Also facts. - inputs := make(map[*analysis.Analyzer]any) - act.objectFacts = make(map[objectFactKey]analysis.Fact) - act.packageFacts = make(map[packageFactKey]analysis.Fact) - startedAt := time.Now() - - for _, dep := range act.deps { - if dep.pkg == act.pkg { - // Same package, different analysis (horizontal edge): - // in-memory outputs of prerequisite analyzers - // become inputs to this analysis pass. - inputs[dep.a] = dep.result - } else if dep.a == act.a { // (always true) - // Same analysis, different package (vertical edge): - // serialized facts produced by prerequisite analysis - // become available to this analysis pass. - inheritFacts(act, dep) - } - } - - factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt)) - - module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. - if mod := act.pkg.Module; mod != nil { - module.Path = mod.Path - module.Version = mod.Version - module.GoVersion = mod.GoVersion - } - - // Run the analysis. - pass := &analysis.Pass{ - Analyzer: act.a, - Fset: act.pkg.Fset, - Files: act.pkg.Syntax, - OtherFiles: act.pkg.OtherFiles, - IgnoredFiles: act.pkg.IgnoredFiles, - Pkg: act.pkg.Types, - TypesInfo: act.pkg.TypesInfo, - TypesSizes: act.pkg.TypesSizes, - TypeErrors: act.pkg.TypeErrors, - Module: module, - - ResultOf: inputs, - Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, - ImportObjectFact: act.importObjectFact, - ExportObjectFact: act.exportObjectFact, - ImportPackageFact: act.importPackageFact, - ExportPackageFact: act.exportPackageFact, - AllObjectFacts: act.allObjectFacts, - AllPackageFacts: act.allPackageFacts, - } - - act.pass = pass - act.r.passToPkgGuard.Lock() - act.r.passToPkg[pass] = act.pkg - act.r.passToPkgGuard.Unlock() - - if act.pkg.IllTyped { - // It looks like there should be !pass.Analyzer.RunDespiteErrors - // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, - // but it exits before it if packages.Load have failed. - act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg}) - } else { - startedAt = time.Now() - - act.result, act.err = pass.Analyzer.Run(pass) - - analyzedIn := time.Since(startedAt) - if analyzedIn > time.Millisecond*10 { - debugf("%s: run analyzer in %s", act, analyzedIn) - } - } - - // disallow calls after Run - pass.ExportObjectFact = nil - pass.ExportPackageFact = nil - - err := act.persistFactsToCache() - if err != nil { - act.r.log.Warnf("Failed to persist facts to cache: %s", err) - } -} - -// NOTE(ldez) altered: logger; serialize. -// inheritFacts populates act.facts with -// those it obtains from its dependency, dep. -func inheritFacts(act, dep *action) { - const serialize = false - - for key, fact := range dep.objectFacts { - // Filter out facts related to objects - // that are irrelevant downstream - // (equivalently: not in the compiler export data). - if !exportedFrom(key.obj, dep.pkg.Types) { - factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) - continue - } - - // Optionally serialize/deserialize fact - // to verify that it works across address spaces. - if serialize { - encodedFact, err := codeFact(fact) - if err != nil { - act.r.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) - } - fact = encodedFact - } - - factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) - - act.objectFacts[key] = fact - } - - for key, fact := range dep.packageFacts { - // TODO: filter out facts that belong to - // packages not mentioned in the export data - // to prevent side channels. - - // Optionally serialize/deserialize fact - // to verify that it works across address spaces - // and is deterministic. - if serialize { - encodedFact, err := codeFact(fact) - if err != nil { - act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) - } - fact = encodedFact - } - - factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) - - act.packageFacts[key] = fact - } -} - -// NOTE(ldez) no alteration. -// codeFact encodes then decodes a fact, -// just to exercise that logic. -func codeFact(fact analysis.Fact) (analysis.Fact, error) { - // We encode facts one at a time. - // A real modular driver would emit all facts - // into one encoder to improve gob efficiency. - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(fact); err != nil { - return nil, err - } - - // Encode it twice and assert that we get the same bits. - // This helps detect nondeterministic Gob encoding (e.g. of maps). - var buf2 bytes.Buffer - if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { - return nil, err - } - if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { - return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) - } - - newFact := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) - if err := gob.NewDecoder(&buf).Decode(newFact); err != nil { - return nil, err - } - return newFact, nil -} - -// NOTE(ldez) no alteration. -// exportedFrom reports whether obj may be visible to a package that imports pkg. -// This includes not just the exported members of pkg, but also unexported -// constants, types, fields, and methods, perhaps belonging to other packages, -// that find there way into the API. -// This is an over-approximation of the more accurate approach used by -// gc export data, which walks the type graph, but it's much simpler. -// -// TODO(adonovan): do more accurate filtering by walking the type graph. -func exportedFrom(obj types.Object, pkg *types.Package) bool { - switch obj := obj.(type) { - case *types.Func: - return obj.Exported() && obj.Pkg() == pkg || - obj.Type().(*types.Signature).Recv() != nil - case *types.Var: - if obj.IsField() { - return true - } - // we can't filter more aggressively than this because we need - // to consider function parameters exported, but have no way - // of telling apart function parameters from local variables. - return obj.Pkg() == pkg - case *types.TypeName, *types.Const: - return true - } - return false // Nil, Builtin, Label, or PkgName -} - -// NOTE(ldez) altered: logger; `act.factType` -// importObjectFact implements Pass.ImportObjectFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// importObjectFact copies the fact value to *ptr. -func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { - if obj == nil { - panic("nil object") - } - key := objectFactKey{obj, act.factType(ptr)} - if v, ok := act.objectFacts[key]; ok { - reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) - return true - } - return false -} - -// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. -// exportObjectFact implements Pass.ExportObjectFact. -func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { - if obj.Pkg() != act.pkg.Types { - act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", - act.a, act.pkg, obj, fact) - } - - key := objectFactKey{obj, act.factType(fact)} - act.objectFacts[key] = fact // clobber any existing entry - if isFactsExportDebug { - objstr := types.ObjectString(obj, (*types.Package).Name) - - factsExportDebugf("%s: object %s has fact %s\n", - act.pkg.Fset.Position(obj.Pos()), objstr, fact) - } -} - -// NOTE(ldez) no alteration. -func (act *action) allObjectFacts() []analysis.ObjectFact { - facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) - for k := range act.objectFacts { - facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) - } - return facts -} - -// NOTE(ldez) altered: `act.factType` -// importPackageFact implements Pass.ImportPackageFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// fact copies the fact value to *ptr. -func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { - if pkg == nil { - panic("nil package") - } - key := packageFactKey{pkg, act.factType(ptr)} - if v, ok := act.packageFacts[key]; ok { - reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) - return true - } - return false -} - -// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. -// exportPackageFact implements Pass.ExportPackageFact. -func (act *action) exportPackageFact(fact analysis.Fact) { - key := packageFactKey{act.pass.Pkg, act.factType(fact)} - act.packageFacts[key] = fact // clobber any existing entry - - factsDebugf("%s: package %s has fact %s\n", - act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) -} - -// NOTE(ldez) altered: add receiver to handle logs. -func (act *action) factType(fact analysis.Fact) reflect.Type { - t := reflect.TypeOf(fact) - if t.Kind() != reflect.Ptr { - act.r.log.Fatalf("invalid Fact type: got %T, want pointer", fact) - } - return t -} - -// NOTE(ldez) no alteration. -func (act *action) allPackageFacts() []analysis.PackageFact { - facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) - for k := range act.packageFacts { - facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) - } - return facts -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go index c84aaeb9bb..fca4b8c3ad 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go @@ -67,11 +67,7 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { // Unblock depending on actions and propagate error. for _, act := range lp.actions { close(act.analysisDoneCh) -<<<<<<< HEAD act.Err = werr -======= - act.err = werr ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return } @@ -129,7 +125,6 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { pkg.IllTyped = true pkg.TypesInfo = &types.Info{ -<<<<<<< HEAD Types: make(map[ast.Expr]types.TypeAndValue), Instances: make(map[*ast.Ident]types.Instance), Defs: make(map[*ast.Ident]types.Object), @@ -138,15 +133,6 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { Selections: make(map[*ast.SelectorExpr]*types.Selection), Scopes: make(map[ast.Node]*types.Scope), FileVersions: make(map[*ast.File]string), -======= - Types: make(map[ast.Expr]types.TypeAndValue), - Instances: make(map[*ast.Ident]types.Instance), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } importer := func(path string) (*types.Package, error) { @@ -378,21 +364,12 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { pass.ImportPackageFact = nil pass.ExportPackageFact = nil act.pass = nil -<<<<<<< HEAD act.Deps = nil if act.Result != nil { if isMemoryDebug { debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.Result)) } act.Result = nil -======= - act.deps = nil - if act.result != nil { - if isMemoryDebug { - debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result)) - } - act.result = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -423,11 +400,7 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { for _, act := range lp.actions { if !lp.isInitial { -<<<<<<< HEAD act.Package = nil -======= - act.pkg = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } act.packageFacts = nil act.objectFacts = nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go index 3b94b0da5a..3a9a35dec1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go @@ -2,11 +2,8 @@ package goanalysis import ( "fmt" -<<<<<<< HEAD "go/token" "strings" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -86,10 +83,7 @@ func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Iss func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { var issues []result.Issue -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i := range diags { diag := &diags[i] linterName := linterNameBuilder(diag) @@ -101,7 +95,6 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) } -<<<<<<< HEAD var suggestedFixes []analysis.SuggestedFix for _, sf := range diag.SuggestedFixes { @@ -139,13 +132,6 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st Pos: diag.Position, Pkg: diag.Pkg, SuggestedFixes: suggestedFixes, -======= - issues = append(issues, result.Issue{ - FromLinter: linterName, - Text: text, - Pos: diag.Position, - Pkg: diag.Pkg, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if len(diag.Related) > 0 { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go index 8e86b70eec..4366155b02 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go @@ -48,11 +48,7 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages. Severity: i.Severity, Pos: i.Pos, LineRange: i.LineRange, -<<<<<<< HEAD SuggestedFixes: i.SuggestedFixes, -======= - Replacement: i.Replacement, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ExpectNoLint: i.ExpectNoLint, ExpectedNoLintLinter: i.ExpectedNoLintLinter, }) @@ -127,11 +123,7 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, Severity: issue.Severity, Pos: issue.Pos, LineRange: issue.LineRange, -<<<<<<< HEAD SuggestedFixes: issue.SuggestedFixes, -======= - Replacement: issue.Replacement, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Pkg: pkg, ExpectNoLint: issue.ExpectNoLint, ExpectedNoLintLinter: issue.ExpectedNoLintLinter, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go index 7c0846405b..ccc58fee40 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go @@ -9,21 +9,12 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) -<<<<<<< HEAD func New(settings *config.AsasalintSettings) *goanalysis.Linter { cfg := asasalint.LinterSetting{} if settings != nil { cfg.Exclude = settings.Exclude cfg.NoBuiltinExclusions = !settings.UseBuiltinExclusions cfg.IgnoreTest = settings.IgnoreTest -======= -func New(setting *config.AsasalintSettings) *goanalysis.Linter { - cfg := asasalint.LinterSetting{} - if setting != nil { - cfg.Exclude = setting.Exclude - cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions - cfg.IgnoreTest = setting.IgnoreTest ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } a, err := asasalint.NewAnalyzer(cfg) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go index eda1298957..c6315965c4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go @@ -10,7 +10,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.BiDiChkSettings) *goanalysis.Linter { a := bidichk.NewAnalyzer() @@ -47,44 +46,6 @@ func New(settings *config.BiDiChkSettings) *goanalysis.Linter { } cfg[a.Name] = map[string]any{ -======= -func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { - a := bidichk.NewAnalyzer() - - cfgMap := map[string]map[string]any{} - if cfg != nil { - var opts []string - - if cfg.LeftToRightEmbedding { - opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING") - } - if cfg.RightToLeftEmbedding { - opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING") - } - if cfg.PopDirectionalFormatting { - opts = append(opts, "POP-DIRECTIONAL-FORMATTING") - } - if cfg.LeftToRightOverride { - opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE") - } - if cfg.RightToLeftOverride { - opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE") - } - if cfg.LeftToRightIsolate { - opts = append(opts, "LEFT-TO-RIGHT-ISOLATE") - } - if cfg.RightToLeftIsolate { - opts = append(opts, "RIGHT-TO-LEFT-ISOLATE") - } - if cfg.FirstStrongIsolate { - opts = append(opts, "FIRST-STRONG-ISOLATE") - } - if cfg.PopDirectionalIsolate { - opts = append(opts, "POP-DIRECTIONAL-ISOLATE") - } - - cfgMap[a.Name] = map[string]any{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "disallowed-runes": strings.Join(opts, ","), } } @@ -93,10 +54,6 @@ func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { a.Name, "Checks for dangerous unicode character sequences", []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go index 6f7f7d6aaa..c520e88db3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go @@ -12,11 +12,7 @@ func New() *goanalysis.Linter { return goanalysis.NewLinter( a.Name, -<<<<<<< HEAD a.Doc, -======= - "checks whether HTTP response body is closed successfully", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go index ca9e2b4be7..772b5601ca 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go @@ -33,9 +33,5 @@ func New(settings *config.Cyclop) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, cfg, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) -======= - ).WithLoadMode(goanalysis.LoadModeTypesInfo) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go index f46f68affe..afa8152fac 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go @@ -1,7 +1,6 @@ package dogsled import ( -<<<<<<< HEAD "go/ast" "golang.org/x/tools/go/analysis" @@ -10,52 +9,18 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -======= - "fmt" - "go/ast" - "go/token" - "sync" - - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "dogsled" func New(settings *config.DogsledSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD return run(pass, settings.MaxBlankIdentifiers) }, Requires: []*analysis.Analyzer{inspect.Analyzer}, -======= - issues := runDogsled(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - }, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return goanalysis.NewLinter( @@ -63,7 +28,6 @@ func New(settings *config.DogsledSettings) *goanalysis.Linter { "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -111,70 +75,4 @@ func run(pass *analysis.Pass, maxBlanks int) (any, error) { }) return nil, nil -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue { - var reports []goanalysis.Issue - for _, f := range pass.Files { - v := &returnsVisitor{ - maxBlanks: settings.MaxBlankIdentifiers, - f: pass.Fset, - } - - ast.Walk(v, f) - - for i := range v.issues { - reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass)) - } - } - - return reports -} - -type returnsVisitor struct { - f *token.FileSet - maxBlanks int - issues []result.Issue -} - -func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { - funcDecl, ok := node.(*ast.FuncDecl) - if !ok { - return v - } - if funcDecl.Body == nil { - return v - } - - for _, expr := range funcDecl.Body.List { - assgnStmt, ok := expr.(*ast.AssignStmt) - if !ok { - continue - } - - numBlank := 0 - for _, left := range assgnStmt.Lhs { - ident, ok := left.(*ast.Ident) - if !ok { - continue - } - if ident.Name == "_" { - numBlank++ - } - } - - if numBlank > v.maxBlanks { - v.issues = append(v.issues, result.Issue{ - FromLinter: linterName, - Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), - Pos: v.f.Position(assgnStmt.Pos()), - }) - } - } - return v ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go index c7fee28e05..d2bb3d8d84 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go @@ -54,13 +54,7 @@ func New(settings *config.DuplSettings) *goanalysis.Linter { } func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) { -<<<<<<< HEAD issues, err := duplAPI.Run(internal.GetGoFileNames(pass), settings.Threshold) -======= - fileNames := internal.GetFileNames(pass) - - issues, err := duplAPI.Run(fileNames, settings.Threshold) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go index e2820587a8..a2bcc34d40 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go @@ -10,7 +10,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.DupWordSettings) *goanalysis.Linter { a := dupword.NewAnalyzer() @@ -19,16 +18,6 @@ func New(settings *config.DupWordSettings) *goanalysis.Linter { cfg[a.Name] = map[string]any{ "keyword": strings.Join(settings.Keywords, ","), "ignore": strings.Join(settings.Ignore, ","), -======= -func New(setting *config.DupWordSettings) *goanalysis.Linter { - a := dupword.NewAnalyzer() - - cfgMap := map[string]map[string]any{} - if setting != nil { - cfgMap[a.Name] = map[string]any{ - "keyword": strings.Join(setting.Keywords, ","), - "ignore": strings.Join(setting.Ignore, ","), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -36,10 +25,6 @@ func New(setting *config.DupWordSettings) *goanalysis.Linter { a.Name, "checks for duplicate words in the source code", []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go index 6ff6cb9cea..67a1b2ca8d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go @@ -2,10 +2,7 @@ package errcheck import ( "bufio" -<<<<<<< HEAD "cmp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "os" "os/user" @@ -94,14 +91,7 @@ func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck text := "Error return value is not checked" if err.FuncName != "" { -<<<<<<< HEAD code := cmp.Or(err.SelectorName, err.FuncName) -======= - code := err.SelectorName - if err.SelectorName == "" { - code = err.FuncName - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) text = fmt.Sprintf("Error return value of %s is not checked", internal.FormatCode(code, lintCtx.Cfg)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go index 37d04f2d9d..506113d6d5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go @@ -8,7 +8,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { a := errchkjson.NewAnalyzer() @@ -20,19 +19,6 @@ func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { cfg[a.Name] = map[string]any{ "omit-safe": !settings.CheckErrorFreeEncoding, "report-no-exported": settings.ReportNoExported, -======= -func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { - a := errchkjson.NewAnalyzer() - - cfgMap := map[string]map[string]any{} - cfgMap[a.Name] = map[string]any{ - "omit-safe": true, - } - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - "omit-safe": !cfg.CheckErrorFreeEncoding, - "report-no-exported": cfg.ReportNoExported, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -40,10 +26,6 @@ func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go index 40fa7d44ea..14851adc28 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go @@ -8,28 +8,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.ErrorLintSettings) *goanalysis.Linter { var opts []errorlint.Option if settings != nil { ae := toAllowPairs(settings.AllowedErrors) -======= -func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { - var opts []errorlint.Option - - if cfg != nil { - ae := toAllowPairs(cfg.AllowedErrors) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(ae) > 0 { opts = append(opts, errorlint.WithAllowedErrors(ae)) } -<<<<<<< HEAD aew := toAllowPairs(settings.AllowedErrorsWildcard) -======= - aew := toAllowPairs(cfg.AllowedErrorsWildcard) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(aew) > 0 { opts = append(opts, errorlint.WithAllowedWildcard(aew)) } @@ -37,7 +25,6 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { a := errorlint.NewAnalyzer(opts...) -<<<<<<< HEAD cfg := map[string]map[string]any{} if settings != nil { @@ -46,16 +33,6 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { "errorf-multi": settings.ErrorfMulti, "asserts": settings.Asserts, "comparison": settings.Comparison, -======= - cfgMap := map[string]map[string]any{} - - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - "errorf": cfg.Errorf, - "errorf-multi": cfg.ErrorfMulti, - "asserts": cfg.Asserts, - "comparison": cfg.Comparison, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -64,11 +41,7 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { "errorlint is a linter for that can be used to find code "+ "that will cause problems with the error wrapping scheme introduced in Go 1.13.", []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go index 7f7e68a783..3b410359d0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go @@ -2,57 +2,27 @@ package forbidigo import ( "fmt" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ashanbrown/forbidigo/forbidigo" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/logutils" -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "forbidigo" func New(settings *config.ForbidigoSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runForbidigo(pass, settings) -======= - issues, err := runForbidigo(pass, settings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -65,19 +35,10 @@ func New(settings *config.ForbidigoSettings) *goanalysis.Linter { "Forbids identifiers", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) } func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error { -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} - -func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) options := []forbidigo.Option{ forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples), // disable "//permit" directives so only "//nolint" directives matters within golangci-lint @@ -90,35 +51,22 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]go for _, pattern := range settings.Forbid { buffer, err := pattern.MarshalString() if err != nil { -<<<<<<< HEAD return err } -======= - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) patterns = append(patterns, string(buffer)) } forbid, err := forbidigo.NewLinter(patterns, options...) if err != nil { -<<<<<<< HEAD return fmt.Errorf("failed to create linter %q: %w", linterName, err) } -======= - return nil, fmt.Errorf("failed to create linter %q: %w", linterName, err) - } - - var issues []goanalysis.Issue ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, file := range pass.Files { runConfig := forbidigo.RunConfig{ Fset: pass.Fset, DebugLog: logutils.Debug(logutils.DebugKeyForbidigo), } -<<<<<<< HEAD if settings.AnalyzeTypes { runConfig.TypesInfo = pass.TypesInfo @@ -138,24 +86,4 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]go } return nil -======= - if settings != nil && settings.AnalyzeTypes { - runConfig.TypesInfo = pass.TypesInfo - } - hints, err := forbid.RunWithConfig(runConfig, file) - if err != nil { - return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) - } - - for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) - } - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go index e0ed551587..bdadcece46 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go @@ -1,19 +1,11 @@ package funlen import ( -<<<<<<< HEAD -======= - "go/token" - "strings" - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ultraware/funlen" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD ) type Config struct { @@ -38,68 +30,4 @@ func New(settings *config.FunlenSettings) *goanalysis.Linter { []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const linterName = "funlen" - -func New(settings *config.FunlenSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - issues := runFunlen(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - }, - } - - return goanalysis.NewLinter( - linterName, - "Tool for detection of long functions", - []*analysis.Analyzer{analyzer}, - nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue { - var lintIssues []funlen.Message - for _, file := range pass.Files { - fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments) - lintIssues = append(lintIssues, fileIssues...) - } - - if len(lintIssues) == 0 { - return nil - } - - issues := make([]goanalysis.Issue, len(lintIssues)) - for k, i := range lintIssues { - issues[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: linterName, - }, pass) - } - - return issues ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go index e63bc6a0c8..841ee81b0d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go @@ -1,7 +1,6 @@ package gci import ( -<<<<<<< HEAD "bytes" "fmt" "io" @@ -11,22 +10,6 @@ import ( "github.com/daixiang0/gci/pkg/gci" "github.com/daixiang0/gci/pkg/log" "github.com/shazow/go-diff/difflib" -======= - "fmt" - "sort" - "strings" - "sync" - - gcicfg "github.com/daixiang0/gci/pkg/config" - "github.com/daixiang0/gci/pkg/gci" - "github.com/daixiang0/gci/pkg/io" - "github.com/daixiang0/gci/pkg/log" - "github.com/daixiang0/gci/pkg/section" - "github.com/golangci/modinfo" - "github.com/hexops/gotextdiff" - "github.com/hexops/gotextdiff/myers" - "github.com/hexops/gotextdiff/span" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -37,7 +20,6 @@ import ( const linterName = "gci" -<<<<<<< HEAD type differ interface { Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error } @@ -62,60 +44,10 @@ func New(settings *config.GciSettings) *goanalysis.Linter { ).WithContextSetter(func(lintCtx *linter.Context) { a.Run = func(pass *analysis.Pass) (any, error) { err := run(lintCtx, pass, settings, diff) -======= -func New(settings *config.GciSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: goanalysis.DummyRun, - Requires: []*analysis.Analyzer{ - modinfo.Analyzer, - }, - } - - var cfg *gcicfg.Config - if settings != nil { - rawCfg := gcicfg.YamlConfig{ - Cfg: gcicfg.BoolConfig{ - SkipGenerated: settings.SkipGenerated, - CustomOrder: settings.CustomOrder, - NoLexOrder: settings.NoLexOrder, - }, - SectionStrings: settings.Sections, - } - - if settings.LocalPrefixes != "" { - prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)} - rawCfg.SectionStrings = prefix - } - - var err error - cfg, err = YamlConfig{origin: rawCfg}.Parse() - if err != nil { - internal.LinterLogger.Fatalf("gci: configuration parsing: %v", err) - } - } - - var lock sync.Mutex - - return goanalysis.NewLinter( - linterName, - "Gci controls Go package import order and makes it always deterministic.", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (any, error) { - var err error - cfg.Sections, err = hackSectionList(pass, cfg) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -181,177 +113,4 @@ func run(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GciSetti } return nil -======= - issues, err := runGci(pass, lintCtx, cfg, &lock) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var diffs []string - err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock) - if err != nil { - return nil, err - } - - var issues []goanalysis.Issue - - for _, diff := range diffs { - if diff == "" { - continue - } - - is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGci) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGci(settings *config.LintersSettings) string { - text := "File is not `gci`-ed" - - hasOptions := settings.Gci.SkipGenerated || len(settings.Gci.Sections) > 0 - if !hasOptions { - return text - } - - text += " with" - - if settings.Gci.SkipGenerated { - text += " --skip-generated" - } - - if len(settings.Gci.Sections) > 0 { - for _, sect := range settings.Gci.Sections { - text += " -s " + sect - } - } - - if settings.Gci.CustomOrder { - text += " --custom-order" - } - - return text -} - -func hackSectionList(pass *analysis.Pass, cfg *gcicfg.Config) (section.SectionList, error) { - var sections section.SectionList - - for _, sect := range cfg.Sections { - // local module hack - if v, ok := sect.(*section.LocalModule); ok { - info, err := modinfo.FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - if info.Path == "" { - continue - } - - v.Path = info.Path - } - - sections = append(sections, sect) - } - - return sections, nil -} - -// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator. -// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI. -// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75 -// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80 -func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error { - log.InitLogger() - defer func() { _ = log.L().Sync() }() - - return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { - fileURI := span.URIFromPath(filePath) - edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) - unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) - lock.Lock() - *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) - lock.Unlock() - return nil - }) -} - -// Code below this comment is borrowed and modified from gci. -// https://github.com/daixiang0/gci/blob/v0.13.5/pkg/config/config.go - -var defaultOrder = map[string]int{ - section.StandardType: 0, - section.DefaultType: 1, - section.CustomType: 2, - section.BlankType: 3, - section.DotType: 4, - section.AliasType: 5, - section.LocalModuleType: 6, -} - -type YamlConfig struct { - origin gcicfg.YamlConfig -} - -//nolint:gocritic // code borrowed from gci and modified to fix LocalModule section behavior. -func (g YamlConfig) Parse() (*gcicfg.Config, error) { - var err error - - sections, err := section.Parse(g.origin.SectionStrings) - if err != nil { - return nil, err - } - - if sections == nil { - sections = section.DefaultSections() - } - - // if default order sorted sections - if !g.origin.Cfg.CustomOrder { - sort.Slice(sections, func(i, j int) bool { - sectionI, sectionJ := sections[i].Type(), sections[j].Type() - - if g.origin.Cfg.NoLexOrder || strings.Compare(sectionI, sectionJ) != 0 { - return defaultOrder[sectionI] < defaultOrder[sectionJ] - } - - return strings.Compare(sections[i].String(), sections[j].String()) < 0 - }) - } - - sectionSeparators, err := section.Parse(g.origin.SectionSeparatorStrings) - if err != nil { - return nil, err - } - if sectionSeparators == nil { - sectionSeparators = section.DefaultSectionSeparators() - } - - return &gcicfg.Config{BoolConfig: g.origin.Cfg, Sections: sections, SectionSeparators: sectionSeparators}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go index be6121c46b..6826b77b6b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go @@ -14,7 +14,6 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter { if settings != nil { cfg = &types.Config{ -<<<<<<< HEAD SuppressLen: settings.SuppressLenAssertion, SuppressNil: settings.SuppressNilAssertion, SuppressErr: settings.SuppressErrAssertion, @@ -27,20 +26,6 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter { ValidateAsyncIntervals: settings.ValidateAsyncIntervals, ForbidSpecPollution: settings.ForbidSpecPollution, ForceSucceedForFuncs: settings.ForceSucceedForFuncs, -======= - SuppressLen: types.Boolean(settings.SuppressLenAssertion), - SuppressNil: types.Boolean(settings.SuppressNilAssertion), - SuppressErr: types.Boolean(settings.SuppressErrAssertion), - SuppressCompare: types.Boolean(settings.SuppressCompareAssertion), - SuppressAsync: types.Boolean(settings.SuppressAsyncAssertion), - ForbidFocus: types.Boolean(settings.ForbidFocusContainer), - SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning), - AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero), - ForceExpectTo: types.Boolean(settings.ForceExpectTo), - ValidateAsyncIntervals: types.Boolean(settings.ValidateAsyncIntervals), - ForbidSpecPollution: types.Boolean(settings.ForbidSpecPollution), - ForceSucceedForFuncs: types.Boolean(settings.ForceSucceedForFuncs), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go index 9555bfdc29..510a06c91d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go @@ -1,7 +1,6 @@ package gochecknoinits import ( -<<<<<<< HEAD "go/ast" "golang.org/x/tools/go/analysis" @@ -10,56 +9,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" -======= - "fmt" - "go/ast" - "go/token" - "sync" - - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "gochecknoinits" func New() *goanalysis.Linter { -<<<<<<< HEAD analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: run, Requires: []*analysis.Analyzer{inspect.Analyzer}, -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - var res []goanalysis.Issue - for _, file := range pass.Files { - fileIssues := checkFileForInits(file, pass.Fset) - for i := range fileIssues { - res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) - } - } - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - }, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return goanalysis.NewLinter( @@ -67,7 +26,6 @@ func New() *goanalysis.Linter { "Checks that no init functions are present in Go code", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -85,38 +43,13 @@ func run(pass *analysis.Pass) (any, error) { funcDecl, ok := decl.(*ast.FuncDecl) if !ok { return -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func checkFileForInits(f *ast.File, fset *token.FileSet) []result.Issue { - var res []result.Issue - for _, decl := range f.Decls { - funcDecl, ok := decl.(*ast.FuncDecl) - if !ok { - continue ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } fnName := funcDecl.Name.Name if fnName == "init" && funcDecl.Recv.NumFields() == 0 { -<<<<<<< HEAD pass.Reportf(funcDecl.Pos(), "don't use %s function", internal.FormatCode(fnName, nil)) } }) return nil, nil -======= - res = append(res, result.Issue{ - Pos: fset.Position(funcDecl.Pos()), - Text: fmt.Sprintf("don't use %s function", internal.FormatCode(fnName, nil)), - FromLinter: linterName, - }) - } - } - - return res ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go index 2f88bb7e9d..cbc5873126 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go @@ -61,7 +61,6 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti TypesInfo: pass.TypesInfo, } -<<<<<<< HEAD cfg := gochecksumtype.Config{ DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive, IncludeSharedInterfaces: settings.IncludeSharedInterfaces, @@ -69,11 +68,6 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti var unknownError error errors := gochecksumtype.Run([]*packages.Package{pkg}, cfg) -======= - var unknownError error - errors := gochecksumtype.Run([]*packages.Package{pkg}, - gochecksumtype.Config{DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, err := range errors { err, ok := err.(gochecksumtype.Error) if !ok { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go index ad996a3b7d..087ddc1df0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go @@ -5,10 +5,6 @@ import ( "fmt" "go/ast" "go/types" -<<<<<<< HEAD -======= - "path/filepath" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "reflect" "runtime" "slices" @@ -26,10 +22,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "gocritic" @@ -40,12 +32,6 @@ var ( ) func New(settings *config.GoCriticSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) wrapper := &goCriticWrapper{ sizes: types.SizesFor("gc", runtime.GOARCH), } @@ -54,26 +40,11 @@ func New(settings *config.GoCriticSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := wrapper.run(pass) -======= - issues, err := wrapper.run(pass) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -91,12 +62,6 @@ Dynamic rules are written declaratively with AST patterns, filters, report messa wrapper.init(context.Log, settings) }). -<<<<<<< HEAD -======= - WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -130,15 +95,9 @@ func (w *goCriticWrapper) init(logger logutils.Log, settings *config.GoCriticSet w.settingsWrapper = settingsWrapper } -<<<<<<< HEAD func (w *goCriticWrapper) run(pass *analysis.Pass) error { if w.settingsWrapper == nil { return errors.New("the settings wrapper is nil") -======= -func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { - if w.settingsWrapper == nil { - return nil, errors.New("the settings wrapper is nil") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes) @@ -147,29 +106,14 @@ func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { enabledCheckers, err := w.buildEnabledCheckers(linterCtx) if err != nil { -<<<<<<< HEAD return err -======= - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) -<<<<<<< HEAD runOnPackage(pass, enabledCheckers, pass.Files) return nil -======= - pkgIssues := runOnPackage(linterCtx, enabledCheckers, pass.Files) - - issues := make([]goanalysis.Issue, 0, len(pkgIssues)) - for i := range pkgIssues { - issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass)) - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { @@ -249,7 +193,6 @@ func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any { } } -<<<<<<< HEAD func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files []*ast.File) { for _, f := range files { runOnFile(pass, f, checks) @@ -257,28 +200,10 @@ func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files [ } func runOnFile(pass *analysis.Pass, f *ast.File, checks []*gocriticlinter.Checker) { -======= -func runOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue { - var res []result.Issue - for _, f := range files { - filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) - linterCtx.SetFileInfo(filename, f) - - issues := runOnFile(linterCtx, f, checks) - res = append(res, issues...) - } - return res -} - -func runOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue { - var res []result.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, c := range checks { // All checkers are expected to use *lint.Context // as read-only structure, so no copying is required. for _, warn := range c.Check(f) { -<<<<<<< HEAD diag := analysis.Diagnostic{ Pos: warn.Pos, Category: c.Info.Name, @@ -298,30 +223,6 @@ func runOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriti pass.Report(diag) } } -======= - pos := linterCtx.FileSet.Position(warn.Pos) - issue := result.Issue{ - Pos: pos, - Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), - FromLinter: linterName, - } - - if warn.HasQuickFix() { - issue.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: pos.Column - 1, - Length: int(warn.Suggestion.To - warn.Suggestion.From), - NewString: string(warn.Suggestion.Replacement), - }, - } - } - - res = append(res, issue) - } - } - - return res ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type goCriticChecks[T any] map[string]T diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go index 25bb92b1f3..3194b3d3ac 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go @@ -1,33 +1,18 @@ package godot import ( -<<<<<<< HEAD "cmp" -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/tetafro/godot" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "godot" func New(settings *config.GodotSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var dotSettings godot.Settings if settings != nil { @@ -39,47 +24,22 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { } // Convert deprecated setting -<<<<<<< HEAD if settings.CheckAll != nil && *settings.CheckAll { -======= - if settings.CheckAll { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dotSettings.Scope = godot.AllScope } } -<<<<<<< HEAD dotSettings.Scope = cmp.Or(dotSettings.Scope, godot.DeclScope) -======= - if dotSettings.Scope == "" { - dotSettings.Scope = godot.DeclScope - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runGodot(pass, dotSettings) -======= - issues, err := runGodot(pass, dotSettings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -89,7 +49,6 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { "Check if comments end in a period", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -126,40 +85,4 @@ func runGodot(pass *analysis.Pass, settings godot.Settings) error { } return nil -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) { - var lintIssues []godot.Issue - for _, file := range pass.Files { - iss, err := godot.Run(file, pass.Fset, settings) - if err != nil { - return nil, err - } - lintIssues = append(lintIssues, iss...) - } - - if len(lintIssues) == 0 { - return nil, nil - } - - issues := make([]goanalysis.Issue, len(lintIssues)) - for k, i := range lintIssues { - issue := result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: linterName, - Replacement: &result.Replacement{ - NewLines: []string{i.Replacement}, - }, - } - - issues[k] = goanalysis.NewIssue(&issue, pass) - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go index 10f5dc845c..181e0a73ab 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go @@ -3,49 +3,22 @@ package godox import ( "go/token" "strings" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/matoous/godox" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "godox" func New(settings *config.GodoxSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD runGodox(pass, settings) -======= - issues := runGodox(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, @@ -56,7 +29,6 @@ func New(settings *config.GodoxSettings) *goanalysis.Linter { "Tool for detection of FIXME, TODO and other comment keywords", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -83,35 +55,4 @@ func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) { }) } } -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue { - var messages []godox.Message - for _, file := range pass.Files { - messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...) - } - - if len(messages) == 0 { - return nil - } - - issues := make([]goanalysis.Issue, len(messages)) - - for k, i := range messages { - issues[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: linterName, - }, pass) - } - - return issues ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go index 1d993f5cc6..b6531d5314 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go @@ -2,10 +2,6 @@ package gofmt import ( "fmt" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gofmtAPI "github.com/golangci/gofmt/gofmt" "golang.org/x/tools/go/analysis" @@ -19,12 +15,6 @@ import ( const linterName = "gofmt" func New(settings *config.GoFmtSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -33,58 +23,27 @@ func New(settings *config.GoFmtSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, -<<<<<<< HEAD "Checks if the code is formatted according to 'gofmt' command.", -======= - "Gofmt checks whether code was gofmt-ed. By default "+ - "this tool runs with -s option to check for code simplification", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runGofmt(lintCtx, pass, settings) -======= - issues, err := runGofmt(lintCtx, pass, settings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) } func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) error { -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var rewriteRules []gofmtAPI.RewriteRule for _, rule := range settings.RewriteRules { rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule)) } -<<<<<<< HEAD for _, file := range pass.Files { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -94,20 +53,11 @@ func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoF diff, err := gofmtAPI.RunRewrite(position.Filename, settings.Simplify, rewriteRules) if err != nil { // TODO: skip return err -======= - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules) - if err != nil { // TODO: skip - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if diff == nil { continue } -<<<<<<< HEAD err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx) if err != nil { return fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) @@ -115,29 +65,4 @@ func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoF } return nil -======= - is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoFmt) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGoFmt(settings *config.LintersSettings) string { - text := "File is not `gofmt`-ed" - if settings.Gofmt.Simplify { - text += " with `-s`" - } - for _, rule := range settings.Gofmt.RewriteRules { - text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement) - } - - return text ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go index a148d00bff..7a11a9074d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go @@ -6,10 +6,6 @@ import ( "io" "os" "strings" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" @@ -28,12 +24,6 @@ type differ interface { } func New(settings *config.GofumptSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff := difflib.New() var options format.Options @@ -54,25 +44,16 @@ func New(settings *config.GofumptSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, -<<<<<<< HEAD "Checks if code and import statements are formatted, with additional rules.", -======= - "Gofumpt checks whether code was gofumpt-ed.", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runGofumpt(lintCtx, pass, diff, options) -======= - issues, err := runGofumpt(lintCtx, pass, diff, options) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -88,37 +69,10 @@ func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, optio input, err := os.ReadFile(position.Filename) if err != nil { return fmt.Errorf("unable to open file %s: %w", position.Filename, err) -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - - for _, f := range fileNames { - input, err := os.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to open file %s: %w", f, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } output, err := format.Source(input, options) if err != nil { -<<<<<<< HEAD return fmt.Errorf("error while running gofumpt: %w", err) } @@ -135,36 +89,11 @@ func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, optio err = internal.ExtractDiagnosticFromPatch(pass, file, diff, lintCtx) if err != nil { return fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) -======= - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - if !bytes.Equal(input, output) { - out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) - - err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - diff := out.String() - is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGoFumpt) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } -<<<<<<< HEAD return nil -======= - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func getLangVersion(settings *config.GofumptSettings) string { @@ -175,16 +104,3 @@ func getLangVersion(settings *config.GofumptSettings) string { return "go" + strings.TrimPrefix(settings.LangVersion, "go") } -<<<<<<< HEAD -======= - -func getIssuedTextGoFumpt(settings *config.LintersSettings) string { - text := "File is not `gofumpt`-ed" - - if settings.Gofumpt.ExtraRules { - text += " with `-extra`" - } - - return text -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go index 330207a9e2..5043381143 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go @@ -2,33 +2,18 @@ package goheader import ( "go/token" -<<<<<<< HEAD "strings" -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goheader "github.com/denis-tingaikin/go-header" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "goheader" func New(settings *config.GoHeaderSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) conf := &goheader.Configuration{} if settings != nil { conf = &goheader.Configuration{ @@ -42,26 +27,11 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runGoHeader(pass, conf) -======= - issues, err := runGoHeader(pass, conf) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -71,7 +41,6 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { "Checks if file header matches to pattern", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -79,40 +48,20 @@ func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) error { if conf.TemplatePath == "" && conf.Template == "" { // User did not pass template, so then do not run go-header linter return nil -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) { - if conf.TemplatePath == "" && conf.Template == "" { - // User did not pass template, so then do not run go-header linter - return nil, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } template, err := conf.GetTemplate() if err != nil { -<<<<<<< HEAD return err -======= - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } values, err := conf.GetValues() if err != nil { -<<<<<<< HEAD return err -======= - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) -<<<<<<< HEAD for _, file := range pass.Files { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -179,41 +128,4 @@ func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysi } return nil -======= - var issues []goanalysis.Issue - for _, file := range pass.Files { - path := pass.Fset.Position(file.Pos()).Filename - - i := a.Analyze(&goheader.Target{File: file, Path: path}) - - if i == nil { - continue - } - - issue := result.Issue{ - Pos: token.Position{ - Line: i.Location().Line + 1, - Column: i.Location().Position, - Filename: path, - }, - Text: i.Message(), - FromLinter: linterName, - } - - if fix := i.Fix(); fix != nil { - issue.LineRange = &result.Range{ - From: issue.Line(), - To: issue.Line() + len(fix.Actual) - 1, - } - issue.Replacement = &result.Replacement{ - NeedOnlyDelete: len(fix.Expected) == 0, - NewLines: fix.Expected, - } - } - - issues = append(issues, goanalysis.NewIssue(&issue, pass)) - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go index 433130430d..6ddc9a75b1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go @@ -2,10 +2,6 @@ package goimports import ( "fmt" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goimportsAPI "github.com/golangci/gofmt/goimports" "golang.org/x/tools/go/analysis" @@ -20,12 +16,6 @@ import ( const linterName = "goimports" func New(settings *config.GoImportsSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -34,28 +24,18 @@ func New(settings *config.GoImportsSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, -<<<<<<< HEAD "Checks if the code and import statements are formatted according to the 'goimports' command.", -======= - "Check import statements are formatted according to the 'goimport' command. "+ - "Reformat imports in autofix mode.", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { imports.LocalPrefix = settings.LocalPrefixes analyzer.Run = func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runGoImports(lintCtx, pass) -======= - issues, err := runGoImports(lintCtx, pass) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -71,38 +51,11 @@ func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) error { diff, err := goimportsAPI.Run(position.Filename) if err != nil { // TODO: skip return err -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := goimportsAPI.Run(f) - if err != nil { // TODO: skip - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if diff == nil { continue } -<<<<<<< HEAD err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx) if err != nil { return fmt.Errorf("can't extract issues from goimports diff output %q: %w", string(diff), err) @@ -110,27 +63,4 @@ func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Is } return nil -======= - is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoImports) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGoImports(settings *config.LintersSettings) string { - text := "File is not `goimports`-ed" - - if settings.Goimports.LocalPrefixes != "" { - text += " with -local " + settings.Goimports.LocalPrefixes - } - - return text ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go index 9d09239750..f8f47ba2b4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go @@ -1,10 +1,7 @@ package gomoddirectives import ( -<<<<<<< HEAD "regexp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "github.com/ldez/gomoddirectives" @@ -12,10 +9,7 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/internal" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) @@ -32,7 +26,6 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { opts.ReplaceAllowList = settings.ReplaceAllowList opts.RetractAllowNoExplanation = settings.RetractAllowNoExplanation opts.ExcludeForbidden = settings.ExcludeForbidden -<<<<<<< HEAD opts.ToolchainForbidden = settings.ToolchainForbidden opts.ToolForbidden = settings.ToolForbidden opts.GoDebugForbidden = settings.GoDebugForbidden @@ -54,8 +47,6 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { opts.GoVersionPattern = exp } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } analyzer := &analysis.Analyzer{ @@ -72,11 +63,7 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { once.Do(func() { -<<<<<<< HEAD results, err := gomoddirectives.AnalyzePass(pass, opts) -======= - results, err := gomoddirectives.Analyze(opts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { lintCtx.Log.Warnf("running %s failed: %s: "+ "if you are not using go modules it is suggested to disable this linter", linterName, err) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go index 1194a74b85..8bddebc162 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go @@ -73,11 +73,7 @@ func New(settings *config.GoModGuardSettings) *goanalysis.Linter { } analyzer.Run = func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD gomodguardIssues := processor.ProcessFiles(internal.GetGoFileNames(pass)) -======= - gomodguardIssues := processor.ProcessFiles(internal.GetFileNames(pass)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) mu.Lock() defer mu.Unlock() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go index cc8fd26ed6..6b46beaccf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go @@ -184,7 +184,6 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { } for k, v := range globalOptionMap { -<<<<<<< HEAD option := gosec.GlobalOption(k) // Set nosec global option only if the value is true @@ -194,9 +193,6 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { } conf.SetGlobal(option, fmt.Sprintf("%v", v)) -======= - conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go index 77f9ae57be..bf9b19f129 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go @@ -10,7 +10,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { a := gosmopolitan.NewAnalyzer() @@ -21,18 +20,6 @@ func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { "escapehatches": strings.Join(settings.EscapeHatches, ","), "lookattests": !settings.IgnoreTests, "watchforscripts": strings.Join(settings.WatchForScripts, ","), -======= -func New(s *config.GosmopolitanSettings) *goanalysis.Linter { - a := gosmopolitan.NewAnalyzer() - - cfgMap := map[string]map[string]any{} - if s != nil { - cfgMap[a.Name] = map[string]any{ - "allowtimelocal": s.AllowTimeLocal, - "escapehatches": strings.Join(s.EscapeHatches, ","), - "lookattests": !s.IgnoreTests, - "watchforscripts": strings.Join(s.WatchForScripts, ","), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -40,10 +27,6 @@ func New(s *config.GosmopolitanSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go index e6155e5049..73bf63af73 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go @@ -40,10 +40,7 @@ import ( "golang.org/x/tools/go/analysis/passes/slog" "golang.org/x/tools/go/analysis/passes/sortslice" "golang.org/x/tools/go/analysis/passes/stdmethods" -<<<<<<< HEAD "golang.org/x/tools/go/analysis/passes/stdversion" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis/passes/stringintconv" "golang.org/x/tools/go/analysis/passes/structtag" "golang.org/x/tools/go/analysis/passes/testinggoroutine" @@ -54,10 +51,7 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/unusedwrite" -<<<<<<< HEAD "golang.org/x/tools/go/analysis/passes/waitgroup" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" @@ -97,10 +91,7 @@ var ( slog.Analyzer, sortslice.Analyzer, stdmethods.Analyzer, -<<<<<<< HEAD stdversion.Analyzer, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -111,16 +102,10 @@ var ( unsafeptr.Analyzer, unusedresult.Analyzer, unusedwrite.Analyzer, -<<<<<<< HEAD waitgroup.Analyzer, } // https://github.com/golang/go/blob/go1.23.0/src/cmd/vet/main.go#L55-L87 -======= - } - - // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defaultAnalyzers = []*analysis.Analyzer{ appends.Analyzer, asmdecl.Analyzer, @@ -145,10 +130,7 @@ var ( sigchanyzer.Analyzer, slog.Analyzer, stdmethods.Analyzer, -<<<<<<< HEAD stdversion.Analyzer, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -208,11 +190,7 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } // Keeping for backward compatibility. -<<<<<<< HEAD if cfg.CheckShadowing != nil && *cfg.CheckShadowing && name == shadow.Analyzer.Name { -======= - if cfg.CheckShadowing && name == shadow.Analyzer.Name { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go index 8e1e66ed47..e0a3f794a7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go @@ -11,15 +11,9 @@ import ( func New(settings *config.GrouperSettings) *goanalysis.Linter { a := grouper.New() -<<<<<<< HEAD cfg := map[string]map[string]any{} if settings != nil { cfg[a.Name] = map[string]any{ -======= - linterCfg := map[string]map[string]any{} - if settings != nil { - linterCfg[a.Name] = map[string]any{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "const-require-single-const": settings.ConstRequireSingleConst, "const-require-grouping": settings.ConstRequireGrouping, "import-require-single-import": settings.ImportRequireSingleImport, @@ -35,10 +29,6 @@ func New(settings *config.GrouperSettings) *goanalysis.Linter { a.Name, "Analyze expression groups.", []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - linterCfg, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go index 833fb2d337..b7c6c35aea 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go @@ -51,16 +51,11 @@ func New(settings *config.ImportAsSettings) *goanalysis.Linter { uniqPackages[a.Pkg] = a } -<<<<<<< HEAD // Skips the duplication check when: // - the alias is empty. // - the alias is a regular expression replacement pattern (ie. contains `$`). v, ok := uniqAliases[a.Alias] if ok && a.Alias != "" && !strings.Contains(a.Alias, "$") { -======= - // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`). - if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg) } else { uniqAliases[a.Alias] = a diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go index e72f7d4948..8e5e1f0e73 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go @@ -3,7 +3,6 @@ package internal import ( "bytes" "fmt" -<<<<<<< HEAD "go/ast" "go/token" "slices" @@ -20,22 +19,6 @@ import ( type Change struct { From, To int NewLines []string -======= - "go/token" - "strings" - - diffpkg "github.com/sourcegraph/go-diff/diff" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" -) - -type Change struct { - LineRange result.Range - Replacement result.Replacement ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type diffLineType string @@ -46,11 +29,6 @@ const ( diffLineDeleted diffLineType = "deleted" ) -<<<<<<< HEAD -======= -type fmtTextFormatter func(settings *config.LintersSettings) string - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type diffLine struct { originalNumber int // 1-based original line number typ diffLineType @@ -66,7 +44,6 @@ type hunkChangesParser struct { log logutils.Log -<<<<<<< HEAD changes []Change } @@ -109,60 +86,6 @@ func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { } func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, i *int) { -======= - lines []diffLine - - ret []Change -} - -func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { - lines := bytes.Split(h.Body, []byte{'\n'}) - currentOriginalLineNumber := int(h.OrigStartLine) - var ret []diffLine - - for i, line := range lines { - dl := diffLine{ - originalNumber: currentOriginalLineNumber, - } - - lineStr := string(line) - - if strings.HasPrefix(lineStr, "-") { - dl.typ = diffLineDeleted - dl.data = strings.TrimPrefix(lineStr, "-") - currentOriginalLineNumber++ - } else if strings.HasPrefix(lineStr, "+") { - dl.typ = diffLineAdded - dl.data = strings.TrimPrefix(lineStr, "+") - } else { - if i == len(lines)-1 && lineStr == "" { - // handle last \n: don't add an empty original line - break - } - - dl.typ = diffLineOriginal - dl.data = strings.TrimPrefix(lineStr, " ") - currentOriginalLineNumber++ - } - - ret = append(ret, dl) - } - - // if > 0, then the original file had a 'No newline at end of file' mark - if h.OrigNoNewlineAt > 0 { - dl := diffLine{ - originalNumber: currentOriginalLineNumber + 1, - typ: diffLineAdded, - data: "", - } - ret = append(ret, dl) - } - - p.lines = ret -} - -func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(p.replacementLinesToPrepend) == 0 { p.lastOriginalLine = &line *i++ @@ -176,7 +99,6 @@ func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { *i++ var followingAddedLines []string -<<<<<<< HEAD for ; *i < len(lines) && lines[*i].typ == diffLineAdded; *i++ { followingAddedLines = append(followingAddedLines, lines[*i].data) } @@ -188,28 +110,12 @@ func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { } p.changes = append(p.changes, change) -======= - for ; *i < len(p.lines) && p.lines[*i].typ == diffLineAdded; *i++ { - followingAddedLines = append(followingAddedLines, p.lines[*i].data) - } - - p.ret = append(p.ret, Change{ - LineRange: result.Range{ - From: line.originalNumber, - To: line.originalNumber, - }, - Replacement: result.Replacement{ - NewLines: append(p.replacementLinesToPrepend, append([]string{line.data}, followingAddedLines...)...), - }, - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.replacementLinesToPrepend = nil p.lastOriginalLine = &line } func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) { change := Change{ -<<<<<<< HEAD From: deletedLines[0].originalNumber, To: deletedLines[len(deletedLines)-1].originalNumber, } @@ -226,34 +132,6 @@ func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLin } p.changes = append(p.changes, change) -======= - LineRange: result.Range{ - From: deletedLines[0].originalNumber, - To: deletedLines[len(deletedLines)-1].originalNumber, - }, - } - - if len(addedLines) != 0 { - change.Replacement.NewLines = append([]string{}, p.replacementLinesToPrepend...) - change.Replacement.NewLines = append(change.Replacement.NewLines, addedLines...) - if len(p.replacementLinesToPrepend) != 0 { - p.replacementLinesToPrepend = nil - } - - p.ret = append(p.ret, change) - return - } - - // delete-only change with possible prepending - if len(p.replacementLinesToPrepend) != 0 { - change.Replacement.NewLines = p.replacementLinesToPrepend - p.replacementLinesToPrepend = nil - } else { - change.Replacement.NeedOnlyDelete = true - } - - p.ret = append(p.ret, change) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { @@ -266,15 +144,11 @@ func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { // 2. ... p.replacementLinesToPrepend = addedLines -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } // add-only change merged into the last original line with possible prepending -<<<<<<< HEAD change := Change{ From: p.lastOriginalLine.originalNumber, To: p.lastOriginalLine.originalNumber, @@ -356,68 +230,6 @@ func ExtractDiagnosticFromPatch( adjLine := pass.Fset.PositionFor(file.Pos(), false).Line - pass.Fset.PositionFor(file.Pos(), true).Line -======= - p.ret = append(p.ret, Change{ - LineRange: result.Range{ - From: p.lastOriginalLine.originalNumber, - To: p.lastOriginalLine.originalNumber, - }, - Replacement: result.Replacement{ - NewLines: append(p.replacementLinesToPrepend, append([]string{p.lastOriginalLine.data}, addedLines...)...), - }, - }) - p.replacementLinesToPrepend = nil -} - -func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { - p.parseDiffLines(h) - - for i := 0; i < len(p.lines); { - line := p.lines[i] - if line.typ == diffLineOriginal { - p.handleOriginalLine(line, &i) - continue - } - - var deletedLines []diffLine - for ; i < len(p.lines) && p.lines[i].typ == diffLineDeleted; i++ { - deletedLines = append(deletedLines, p.lines[i]) - } - - var addedLines []string - for ; i < len(p.lines) && p.lines[i].typ == diffLineAdded; i++ { - addedLines = append(addedLines, p.lines[i].data) - } - - if len(deletedLines) != 0 { - p.handleDeletedLines(deletedLines, addedLines) - continue - } - - // no deletions, only additions - p.handleAddedOnlyLines(addedLines) - } - - if len(p.replacementLinesToPrepend) != 0 { - p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", p.lines) - return nil - } - - return p.ret -} - -func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string, formatter fmtTextFormatter) ([]result.Issue, error) { - diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) - if err != nil { - return nil, fmt.Errorf("can't parse patch: %w", err) - } - - if len(diffs) == 0 { - return nil, fmt.Errorf("got no diffs from patch parser: %v", patch) - } - - var issues []result.Issue ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, d := range diffs { if len(d.Hunks) == 0 { lintCtx.Log.Warnf("Got no hunks in diff %+v", d) @@ -430,29 +242,11 @@ func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName st changes := p.parse(hunk) for _, change := range changes { -<<<<<<< HEAD pass.Report(toDiagnostic(ft, change, adjLine)) -======= - i := result.Issue{ - FromLinter: linterName, - Pos: token.Position{ - Filename: d.NewName, - Line: change.LineRange.From, - }, - Text: formatter(lintCtx.Settings()), - Replacement: &change.Replacement, - } - if change.LineRange.From != change.LineRange.To { - i.LineRange = &change.LineRange - } - - issues = append(issues, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } -<<<<<<< HEAD return nil } @@ -478,7 +272,4 @@ func toDiagnostic(ft *token.File, change Change, adjLine int) analysis.Diagnosti }}, }}, } -======= - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go index 943cf57181..7525f2f2c5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go @@ -2,19 +2,12 @@ package internal import ( "fmt" -<<<<<<< HEAD -======= - "path/filepath" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/goanalysis" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func FormatCode(code string, _ *config.Config) string { @@ -25,7 +18,6 @@ func FormatCode(code string, _ *config.Config) string { return fmt.Sprintf("`%s`", code) } -<<<<<<< HEAD func GetGoFileNames(pass *analysis.Pass) []string { var filenames []string @@ -39,18 +31,4 @@ func GetGoFileNames(pass *analysis.Pass) []string { } return filenames -======= -func GetFileNames(pass *analysis.Pass) []string { - var fileNames []string - for _, f := range pass.Files { - fileName := pass.Fset.PositionFor(f.Pos(), true).Filename - ext := filepath.Ext(fileName) - if ext != "" && ext != ".go" { - // position has been adjusted to a non-go file, revert to original file - fileName = pass.Fset.PositionFor(f.Pos(), false).Filename - } - fileNames = append(fileNames, fileName) - } - return fileNames ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go index 6fa0d520fc..bad3b0c4e2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go @@ -4,28 +4,15 @@ import ( "bufio" "errors" "fmt" -<<<<<<< HEAD "go/ast" "os" "strings" -======= - "go/token" - "os" - "strings" - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "unicode/utf8" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "lll" @@ -33,36 +20,15 @@ const linterName = "lll" const goCommentDirectivePrefix = "//go:" func New(settings *config.LllSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runLll(pass, settings) -======= - issues, err := runLll(pass, settings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -72,7 +38,6 @@ func New(settings *config.LllSettings) *goanalysis.Linter { "Reports long lines", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -106,42 +71,6 @@ func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, ta ft := pass.Fset.File(file.Pos()) -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - spaces := strings.Repeat(" ", settings.TabWidth) - - var issues []goanalysis.Issue - for _, f := range fileNames { - lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces) - if err != nil { - return nil, err - } - - for i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) - } - } - - return issues, nil -} - -func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { - var res []result.Issue - - f, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("can't open file %s: %w", filename, err) - } - defer f.Close() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lineNumber := 0 multiImportEnabled := false @@ -171,26 +100,15 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r lineLen := utf8.RuneCountInString(line) if lineLen > maxLineLen { -<<<<<<< HEAD pass.Report(analysis.Diagnostic{ Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)), Message: fmt.Sprintf("The line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen), -======= - res = append(res, result.Issue{ - Pos: token.Position{ - Filename: filename, - Line: lineNumber, - }, - Text: fmt.Sprintf("the line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen), - FromLinter: linterName, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } } if err := scanner.Err(); err != nil { -<<<<<<< HEAD // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize // we can return this line as a long line instead of returning an error. @@ -212,32 +130,4 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r } return nil -======= - if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize { - // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize - // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize - // we can return this line as a long line instead of returning an error. - // The reason for this change is that this case might happen with autogenerated files - // The go-bindata tool for instance might generate a file with a very long line. - // In this case, as it's an auto generated file, the warning returned by lll will - // be ignored. - // But if we return a linter error here, and this error happens for an autogenerated - // file the error will be discarded (fine), but all the subsequent errors for lll will - // be discarded for other files, and we'll miss legit error. - res = append(res, result.Issue{ - Pos: token.Position{ - Filename: filename, - Line: lineNumber, - Column: 1, - }, - Text: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), - FromLinter: linterName, - }) - } else { - return nil, fmt.Errorf("can't scan file %s: %w", filename, err) - } - } - - return res, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go index 96c4af7ee7..84c8d73635 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go @@ -22,12 +22,9 @@ func New(settings *config.LoggerCheckSettings) *goanalysis.Linter { if !settings.Logr { disable = append(disable, "logr") } -<<<<<<< HEAD if !settings.Slog { disable = append(disable, "slog") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !settings.Zap { disable = append(disable, "zap") } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go index 4dba674fb9..799c51c874 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go @@ -8,7 +8,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.MaintIdxSettings) *goanalysis.Linter { analyzer := maintidx.Analyzer @@ -19,18 +18,6 @@ func New(settings *config.MaintIdxSettings) *goanalysis.Linter { if settings != nil { cfg[analyzer.Name] = map[string]any{ "under": settings.Under, -======= -func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { - analyzer := maintidx.Analyzer - - cfgMap := map[string]map[string]any{ - analyzer.Name: {"under": 20}, - } - - if cfg != nil { - cfgMap[analyzer.Name] = map[string]any{ - "under": cfg.Under, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -38,10 +25,6 @@ func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go index 3cfd3fac89..b5ab4515e5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go @@ -2,56 +2,26 @@ package makezero import ( "fmt" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ashanbrown/makezero/makezero" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "makezero" func New(settings *config.MakezeroSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runMakeZero(pass, settings) -======= - issues, err := runMakeZero(pass, settings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -61,7 +31,6 @@ func New(settings *config.MakezeroSettings) *goanalysis.Linter { "Finds slice declarations with non-zero initial length", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -83,32 +52,4 @@ func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) error { } return nil -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} - -func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) { - zero := makezero.NewLinter(settings.Always) - - var issues []goanalysis.Issue - - for _, file := range pass.Files { - hints, err := zero.Run(pass.Fset, pass.TypesInfo, file) - if err != nil { - return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) - } - - for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) - } - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go index ad57aa6a49..e15dfa3a5a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go @@ -1,16 +1,10 @@ package mirror import ( -<<<<<<< HEAD -======= - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/butuzov/mirror" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD ) func New() *goanalysis.Linter { @@ -33,66 +27,4 @@ func New() *goanalysis.Linter { []*analysis.Analyzer{a}, linterCfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func New() *goanalysis.Linter { - var ( - mu sync.Mutex - issues []goanalysis.Issue - ) - - a := mirror.NewAnalyzer() - a.Run = func(pass *analysis.Pass) (any, error) { - // mirror only lints test files if the `--with-tests` flag is passed, - // so we pass the `with-tests` flag as true to the analyzer before running it. - // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` - // or can be disabled per linter via exclude rules. - // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) - violations := mirror.Run(pass, true) - - if len(violations) == 0 { - return nil, nil - } - - for index := range violations { - i := violations[index].Issue(pass.Fset) - - issue := result.Issue{ - FromLinter: a.Name, - Text: i.Message, - Pos: i.Start, - } - - if i.InlineFix != "" { - issue.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: i.Start.Column - 1, - Length: len(i.Original), - NewString: i.InlineFix, - }, - } - } - - mu.Lock() - issues = append(issues, goanalysis.NewIssue(&issue, pass)) - mu.Unlock() - } - - return nil, nil - } - - analyzer := goanalysis.NewLinter( - a.Name, - a.Doc, - []*analysis.Analyzer{a}, - nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) - - return analyzer ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go index 119baea2f6..3ace5fddb9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go @@ -2,15 +2,9 @@ package misspell import ( "fmt" -<<<<<<< HEAD "go/ast" "go/token" "strings" -======= - "go/token" - "strings" - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "unicode" "github.com/golangci/misspell" @@ -18,24 +12,12 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD - "github.com/golangci/golangci-lint/pkg/lint/linter" -======= - "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "misspell" func New(settings *config.MisspellSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -55,16 +37,11 @@ func New(settings *config.MisspellSettings) *goanalysis.Linter { return nil, ruleErr } -<<<<<<< HEAD err := runMisspell(lintCtx, pass, replacer, settings.Mode) -======= - issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD return nil, nil } }).WithLoadMode(goanalysis.LoadModeSyntax) @@ -79,39 +56,6 @@ func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspel } return nil -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - for _, filename := range fileNames { - lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode) - if err != nil { - return nil, err - } - - for i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) - } - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) { @@ -146,7 +90,6 @@ func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replac return replacer, nil } -<<<<<<< HEAD func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.File, replacer *misspell.Replacer, mode string) error { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -156,12 +99,6 @@ func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.F fileContent, err := lintCtx.FileCache.GetFileBytes(position.Filename) if err != nil { return fmt.Errorf("can't get file %s contents: %w", position.Filename, err) -======= -func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) { - fileContent, err := lintCtx.FileCache.GetFileBytes(filename) - if err != nil { - return nil, fmt.Errorf("can't get file %s contents: %w", filename, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments. @@ -175,7 +112,6 @@ func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *missp replace = replacer.Replace } -<<<<<<< HEAD f := pass.Fset.File(file.Pos()) _, diffs := replace(string(fileContent)) @@ -201,38 +137,6 @@ func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *missp } return nil -======= - _, diffs := replace(string(fileContent)) - - var res []result.Issue - - for _, diff := range diffs { - text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) - - pos := token.Position{ - Filename: filename, - Line: diff.Line, - Column: diff.Column + 1, - } - - replacement := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: diff.Column, - Length: len(diff.Original), - NewString: diff.Corrected, - }, - } - - res = append(res, result.Issue{ - Pos: pos, - Text: text, - FromLinter: linterName, - Replacement: replacement, - }) - } - - return res, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func appendExtraWords(replacer *misspell.Replacer, extraWords []config.MisspellExtraWords) error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go index 9873d7e850..a4e9ceff28 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go @@ -8,19 +8,11 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.MustTagSettings) *goanalysis.Linter { var funcs []musttag.Func if settings != nil { for _, fn := range settings.Functions { -======= -func New(setting *config.MustTagSettings) *goanalysis.Linter { - var funcs []musttag.Func - - if setting != nil { - for _, fn := range setting.Functions { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) funcs = append(funcs, musttag.Func{ Name: fn.Name, Tag: fn.Tag, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go index 14147fc2d3..b72538fd16 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go @@ -1,51 +1,21 @@ package nestif import ( -<<<<<<< HEAD -======= - "sort" - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/nakabonne/nestif" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "nestif" func New(settings *config.NestifSettings) *goanalysis.Linter { -<<<<<<< HEAD analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { runNestIf(pass, settings) -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - issues := runNestIf(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, @@ -56,24 +26,14 @@ func New(settings *config.NestifSettings) *goanalysis.Linter { "Reports deeply nested if statements", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) { -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) checker := &nestif.Checker{ MinComplexity: settings.MinComplexity, } -<<<<<<< HEAD for _, file := range pass.Files { position, isGoFile := goanalysis.GetGoFilePosition(pass, file) if !isGoFile { @@ -96,29 +56,4 @@ func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysi }) } } -======= - var lintIssues []nestif.Issue - for _, f := range pass.Files { - lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...) - } - - if len(lintIssues) == 0 { - return nil - } - - sort.SliceStable(lintIssues, func(i, j int) bool { - return lintIssues[i].Complexity > lintIssues[j].Complexity - }) - - issues := make([]goanalysis.Issue, 0, len(lintIssues)) - for _, i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: linterName, - }, pass)) - } - - return issues ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go index ea1022e12a..ed25dec71f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go @@ -11,7 +11,6 @@ import ( func New(settings *config.NilNilSettings) *goanalysis.Linter { a := analyzer.New() -<<<<<<< HEAD cfg := make(map[string]map[string]any) if settings != nil { cfg[a.Name] = map[string]any{ @@ -19,15 +18,6 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter { } if len(settings.CheckedTypes) != 0 { cfg[a.Name]["checked-types"] = settings.CheckedTypes -======= - cfgMap := make(map[string]map[string]any) - if settings != nil { - cfgMap[a.Name] = map[string]any{ - "detect-opposite": settings.DetectOpposite, - } - if len(settings.CheckedTypes) != 0 { - cfgMap[a.Name]["checked-types"] = settings.CheckedTypes ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -35,11 +25,7 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ). WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go index 55b99953e6..21cd20124f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go @@ -2,7 +2,6 @@ package internal import ( -<<<<<<< HEAD "go/token" "regexp" "strings" @@ -14,125 +13,6 @@ import ( ) const LinterName = "nolintlint" -======= - "fmt" - "go/ast" - "go/token" - "regexp" - "strings" - "unicode" - - "github.com/golangci/golangci-lint/pkg/result" -) - -type BaseIssue struct { - fullDirective string - directiveWithOptionalLeadingSpace string - position token.Position - replacement *result.Replacement -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (b BaseIssue) Position() token.Position { - return b.position -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (b BaseIssue) Replacement() *result.Replacement { - return b.replacement -} - -type ExtraLeadingSpace struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i ExtraLeadingSpace) Details() string { - return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) -} - -func (i ExtraLeadingSpace) String() string { return toString(i) } - -type NotMachine struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NotMachine) Details() string { - expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) - return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", - i.fullDirective, expected) -} - -func (i NotMachine) String() string { return toString(i) } - -type NotSpecific struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NotSpecific) Details() string { - return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", - i.fullDirective, i.directiveWithOptionalLeadingSpace) -} - -func (i NotSpecific) String() string { return toString(i) } - -type ParseError struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i ParseError) Details() string { - return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", - i.fullDirective, - i.directiveWithOptionalLeadingSpace) -} - -func (i ParseError) String() string { return toString(i) } - -type NoExplanation struct { - BaseIssue - fullDirectiveWithoutExplanation string -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NoExplanation) Details() string { - return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", - i.fullDirective, i.fullDirectiveWithoutExplanation) -} - -func (i NoExplanation) String() string { return toString(i) } - -type UnusedCandidate struct { - BaseIssue - ExpectedLinter string -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i UnusedCandidate) Details() string { - details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) - if i.ExpectedLinter != "" { - details += fmt.Sprintf(" for linter %q", i.ExpectedLinter) - } - return details -} - -func (i UnusedCandidate) String() string { return toString(i) } - -func toString(issue Issue) string { - return fmt.Sprintf("%s at %s", issue.Details(), issue.Position()) -} - -type Issue interface { - Details() string - Position() token.Position - String() string - Replacement() *result.Replacement -} - -type Needs uint ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const ( NeedsMachineOnly Needs = 1 << iota @@ -142,13 +22,10 @@ const ( NeedsAll = NeedsMachineOnly | NeedsSpecific | NeedsExplanation ) -<<<<<<< HEAD type Needs uint const commentMark = "//" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var commentPattern = regexp.MustCompile(`^//\s*(nolint)(:\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*)?\b`) // matches a complete nolint directive @@ -178,22 +55,10 @@ var ( ) //nolint:funlen,gocyclo // the function is going to be refactored in the future -<<<<<<< HEAD func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { var issues []goanalysis.Issue for _, file := range pass.Files { -======= -func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { - var issues []Issue - - for _, node := range nodes { - file, ok := node.(*ast.File) - if !ok { - continue - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, c := range file.Comments { for _, comment := range c.List { if !commentPattern.MatchString(comment.Text) { @@ -208,16 +73,11 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { leadingSpace = leadingSpaceMatches[1] } -<<<<<<< HEAD directiveWithOptionalLeadingSpace := commentMark -======= - directiveWithOptionalLeadingSpace := "//" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if leadingSpace != "" { directiveWithOptionalLeadingSpace += " " } -<<<<<<< HEAD split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], commentMark) directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) @@ -252,44 +112,11 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) -======= - split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") - directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) - - pos := fset.Position(comment.Pos()) - end := fset.Position(comment.End()) - - base := BaseIssue{ - fullDirective: comment.Text, - directiveWithOptionalLeadingSpace: directiveWithOptionalLeadingSpace, - position: pos, - } - - // check for, report and eliminate leading spaces, so we can check for other issues - if leadingSpace != "" { - removeWhitespace := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: pos.Column + 1, - Length: len(leadingSpace), - NewString: "", - }, - } - if (l.needs & NeedsMachineOnly) != 0 { - issue := NotMachine{BaseIssue: base} - issue.BaseIssue.replacement = removeWhitespace - issues = append(issues, issue) - } else if len(leadingSpace) > 1 { - issue := ExtraLeadingSpace{BaseIssue: base} - issue.BaseIssue.replacement = removeWhitespace - issue.BaseIssue.replacement.Inline.NewString = " " // assume a single space was intended - issues = append(issues, issue) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } fullMatches := fullDirectivePattern.FindStringSubmatch(comment.Text) if len(fullMatches) == 0 { -<<<<<<< HEAD issue := &result.Issue{ FromLinter: LinterName, Text: formatParseError(comment.Text, directiveWithOptionalLeadingSpace), @@ -298,9 +125,6 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { issues = append(issues, goanalysis.NewIssue(issue, pass)) -======= - issues = append(issues, ParseError{BaseIssue: base}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } @@ -310,11 +134,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if lintersText != "" && !strings.HasPrefix(lintersText, "all") { lls := strings.Split(lintersText, ",") linters = make([]string, 0, len(lls)) -<<<<<<< HEAD rangeStart := (pos.Column - 1) + len(commentMark) + len(leadingSpace) + len("nolint:") -======= - rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, ll := range lls { rangeEnd := rangeStart + len(ll) if i < len(lls)-1 { @@ -330,7 +150,6 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if (l.needs & NeedsSpecific) != 0 { if len(linters) == 0 { -<<<<<<< HEAD issue := &result.Issue{ FromLinter: LinterName, Text: formatNotSpecific(comment.Text, directiveWithOptionalLeadingSpace), @@ -338,15 +157,11 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) -======= - issues = append(issues, NotSpecific{BaseIssue: base}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } // when detecting unused directives, we send all the directives through and filter them out in the nolint processor if (l.needs & NeedsUnused) != 0 { -<<<<<<< HEAD removeNolintCompletely := []analysis.SuggestedFix{{ TextEdits: []analysis.TextEdit{{ Pos: token.Pos(pos.Offset), @@ -383,46 +198,11 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) -======= - removeNolintCompletely := &result.Replacement{} - - startCol := pos.Column - 1 - - if startCol == 0 { - // if the directive starts from a new line, remove the line - removeNolintCompletely.NeedOnlyDelete = true - } else { - removeNolintCompletely.Inline = &result.InlineFix{ - StartCol: startCol, - Length: end.Column - pos.Column, - NewString: "", - } - } - - if len(linters) == 0 { - issue := UnusedCandidate{BaseIssue: base} - issue.replacement = removeNolintCompletely - issues = append(issues, issue) - } else { - for _, linter := range linters { - issue := UnusedCandidate{BaseIssue: base, ExpectedLinter: linter} - // only offer replacement if there is a single linter - // because of issues around commas and the possibility of all - // linters being removed - if len(linters) == 1 { - issue.replacement = removeNolintCompletely - } - issues = append(issues, issue) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } -<<<<<<< HEAD if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == commentMark) { -======= - if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation // otherwise, check if we are excluding all the mentioned linters for _, ll := range linters { @@ -434,7 +214,6 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if needsExplanation { fullDirectiveWithoutExplanation := trailingBlankExplanation.ReplaceAllString(comment.Text, "") -<<<<<<< HEAD issue := &result.Issue{ FromLinter: LinterName, @@ -443,12 +222,6 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { } issues = append(issues, goanalysis.NewIssue(issue, pass)) -======= - issues = append(issues, NoExplanation{ - BaseIssue: base, - fullDirectiveWithoutExplanation: fullDirectiveWithoutExplanation, - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go index d92c25d491..e1c878628d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -2,15 +2,10 @@ package nolintlint import ( "fmt" -<<<<<<< HEAD -======= - "go/ast" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "golang.org/x/tools/go/analysis" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/config" @@ -20,22 +15,11 @@ import ( ) const LinterName = nolintlint.LinterName -======= - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const LinterName = "nolintlint" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func New(settings *config.NoLintLintSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue -<<<<<<< HEAD var needs nolintlint.Needs if settings.RequireExplanation { needs |= nolintlint.NeedsExplanation @@ -59,15 +43,6 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { issues, err := lnt.Run(pass) if err != nil { return nil, fmt.Errorf("linter failed to run: %w", err) -======= - analyzer := &analysis.Analyzer{ - Name: LinterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - issues, err := runNoLintLint(pass, settings) - if err != nil { - return nil, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if len(issues) == 0 { @@ -83,11 +58,7 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { } return goanalysis.NewLinter( -<<<<<<< HEAD nolintlint.LinterName, -======= - LinterName, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "Reports ill-formed or insufficient nolint directives", []*analysis.Analyzer{analyzer}, nil, @@ -95,58 +66,3 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -<<<<<<< HEAD -======= - -func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) { - var needs internal.Needs - if settings.RequireExplanation { - needs |= internal.NeedsExplanation - } - if settings.RequireSpecific { - needs |= internal.NeedsSpecific - } - if !settings.AllowUnused { - needs |= internal.NeedsUnused - } - - lnt, err := internal.NewLinter(needs, settings.AllowNoExplanation) - if err != nil { - return nil, err - } - - nodes := make([]ast.Node, 0, len(pass.Files)) - for _, n := range pass.Files { - nodes = append(nodes, n) - } - - lintIssues, err := lnt.Run(pass.Fset, nodes...) - if err != nil { - return nil, fmt.Errorf("linter failed to run: %w", err) - } - - var issues []goanalysis.Issue - - for _, i := range lintIssues { - expectNoLint := false - var expectedNolintLinter string - if ii, ok := i.(internal.UnusedCandidate); ok { - expectedNolintLinter = ii.ExpectedLinter - expectNoLint = true - } - - issue := &result.Issue{ - FromLinter: LinterName, - Text: i.Details(), - Pos: i.Position(), - ExpectNoLint: expectNoLint, - ExpectedNoLintLinter: expectedNolintLinter, - Replacement: i.Replacement(), - } - - issues = append(issues, goanalysis.NewIssue(issue, pass)) - } - - return issues, nil -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go index 4f1d16cbd6..17e86c98ee 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go @@ -2,10 +2,6 @@ package prealloc import ( "fmt" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/alexkohler/prealloc/pkg" "golang.org/x/tools/go/analysis" @@ -13,39 +9,16 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "prealloc" func New(settings *config.PreallocSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD runPreAlloc(pass, settings) -======= - issues := runPreAlloc(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, @@ -56,7 +29,6 @@ func New(settings *config.PreallocSettings) *goanalysis.Linter { "Finds slice declarations that could potentially be pre-allocated", []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) } @@ -69,25 +41,4 @@ func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) { Message: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), }) } -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue { - var issues []goanalysis.Issue - - hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops) - - for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(hint.Pos), - Text: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), - FromLinter: linterName, - }, pass)) - } - - return issues ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go index 72532e46bc..6c65f86bcf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go @@ -1,30 +1,14 @@ package protogetter import ( -<<<<<<< HEAD -======= - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ghostiam/protogetter" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD -) - -func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var cfg protogetter.Config if settings != nil { cfg = protogetter.Config{ @@ -34,61 +18,15 @@ func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend, } } -<<<<<<< HEAD cfg.Mode = protogetter.StandaloneMode a := protogetter.NewAnalyzer(&cfg) -======= - cfg.Mode = protogetter.GolangciLintMode - - a := protogetter.NewAnalyzer(&cfg) - a.Run = func(pass *analysis.Pass) (any, error) { - pgIssues, err := protogetter.Run(pass, &cfg) - if err != nil { - return nil, err - } - - issues := make([]goanalysis.Issue, len(pgIssues)) - for i, issue := range pgIssues { - report := &result.Issue{ - FromLinter: a.Name, - Pos: issue.Pos, - Text: issue.Message, - Replacement: &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: issue.InlineFix.StartCol, - Length: issue.InlineFix.Length, - NewString: issue.InlineFix.NewString, - }, - }, - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go index 5ad82008a6..3af4885b40 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go @@ -4,7 +4,6 @@ import ( "github.com/raeperd/recvcheck" "golang.org/x/tools/go/analysis" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" ) @@ -18,13 +17,6 @@ func New(settings *config.RecvcheckSettings) *goanalysis.Linter { } a := recvcheck.NewAnalyzer(cfg) -======= - "github.com/golangci/golangci-lint/pkg/goanalysis" -) - -func New() *goanalysis.Linter { - a := recvcheck.Analyzer ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return goanalysis.NewLinter( a.Name, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go index 49ef8b63ca..ec621ccfba 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go @@ -2,10 +2,7 @@ package revive import ( "bytes" -<<<<<<< HEAD "cmp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "encoding/json" "fmt" "go/token" @@ -118,11 +115,7 @@ func newWrapper(settings *config.ReviveSettings) (*wrapper, error) { } func (w *wrapper) run(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { -<<<<<<< HEAD packages := [][]string{internal.GetGoFileNames(pass)} -======= - packages := [][]string{internal.GetFileNames(pass)} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) failures, err := w.revive.Lint(packages, w.lintingRules, *w.conf) if err != nil { @@ -172,11 +165,7 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { lineRangeTo = object.Position.Start.Line } -<<<<<<< HEAD issue := &result.Issue{ -======= - return goanalysis.NewIssue(&result.Issue{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Severity: string(object.Severity), Text: fmt.Sprintf("%s: %s", object.RuleName, object.Failure.Failure), Pos: token.Position{ @@ -190,7 +179,6 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { To: lineRangeTo, }, FromLinter: linterName, -<<<<<<< HEAD } if object.ReplacementLine != "" { @@ -209,9 +197,6 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { } return goanalysis.NewIssue(issue, pass) -======= - }, pass) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // This function mimics the GetConfig function of revive. @@ -412,17 +397,8 @@ const defaultConfidence = 0.8 func normalizeConfig(cfg *lint.Config) { // NOTE(ldez): this custom section for golangci-lint should be kept. // --- -<<<<<<< HEAD cfg.Confidence = cmp.Or(cfg.Confidence, defaultConfidence) cfg.Severity = cmp.Or(cfg.Severity, lint.SeverityWarning) -======= - if cfg.Confidence == 0 { - cfg.Confidence = defaultConfidence - } - if cfg.Severity == "" { - cfg.Severity = lint.SeverityWarning - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // --- if len(cfg.Rules) == 0 { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go index b46118d96c..7c8a0c8b02 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go @@ -1,32 +1,15 @@ package tagalign import ( -<<<<<<< HEAD -======= - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/4meepo/tagalign" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD ) func New(settings *config.TagAlignSettings) *goanalysis.Linter { var options []tagalign.Option -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -func New(settings *config.TagAlignSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if settings != nil { options = append(options, tagalign.WithAlign(settings.Align)) @@ -42,51 +25,11 @@ func New(settings *config.TagAlignSettings) *goanalysis.Linter { } analyzer := tagalign.NewAnalyzer(options...) -<<<<<<< HEAD -======= - analyzer.Run = func(pass *analysis.Pass) (any, error) { - taIssues := tagalign.Run(pass, options...) - - issues := make([]goanalysis.Issue, len(taIssues)) - for i, issue := range taIssues { - report := &result.Issue{ - FromLinter: analyzer.Name, - Pos: issue.Pos, - Text: issue.Message, - Replacement: &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: issue.InlineFix.StartCol, - Length: issue.InlineFix.Length, - NewString: issue.InlineFix.NewString, - }, - }, - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return goanalysis.NewLinter( analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) -======= - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go index 0e76e3858d..08215c3a53 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go @@ -10,19 +10,12 @@ import ( func New(settings *config.TagliatelleSettings) *goanalysis.Linter { cfg := tagliatelle.Config{ -<<<<<<< HEAD Base: tagliatelle.Base{ Rules: map[string]string{ "json": "camel", "yaml": "camel", "header": "header", }, -======= - Rules: map[string]string{ - "json": "camel", - "yaml": "camel", - "header": "header", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, } @@ -30,7 +23,6 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { for k, v := range settings.Case.Rules { cfg.Rules[k] = v } -<<<<<<< HEAD cfg.ExtendedRules = toExtendedRules(settings.Case.ExtendedRules) cfg.UseFieldName = settings.Case.UseFieldName @@ -48,9 +40,6 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { Package: override.Package, }) } -======= - cfg.UseFieldName = settings.Case.UseFieldName ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } a := tagliatelle.New(cfg) @@ -60,7 +49,6 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -76,7 +64,4 @@ func toExtendedRules(src map[string]config.TagliatelleExtendedRule) map[string]t } return result -======= - ).WithLoadMode(goanalysis.LoadModeSyntax) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go index 0f98fcca34..f617da5536 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go @@ -10,7 +10,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.TestpackageSettings) *goanalysis.Linter { a := testpackage.NewAnalyzer() @@ -20,25 +19,10 @@ func New(settings *config.TestpackageSettings) *goanalysis.Linter { a.Name: { testpackage.SkipRegexpFlagName: settings.SkipRegexp, testpackage.AllowPackagesFlagName: strings.Join(settings.AllowPackages, ","), -======= -func New(cfg *config.TestpackageSettings) *goanalysis.Linter { - a := testpackage.NewAnalyzer() - - var settings map[string]map[string]any - if cfg != nil { - settings = map[string]map[string]any{ - a.Name: { - testpackage.SkipRegexpFlagName: cfg.SkipRegexp, - testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, } } -<<<<<<< HEAD return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). -======= - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go index 57874d9300..102610a69a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go @@ -12,11 +12,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) -<<<<<<< HEAD func New(settings *config.ThelperSettings) *goanalysis.Linter { -======= -func New(cfg *config.ThelperSettings) *goanalysis.Linter { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) a := analyzer.NewAnalyzer() opts := map[string]struct{}{ @@ -37,19 +33,11 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { "tb_first": {}, } -<<<<<<< HEAD if settings != nil { applyTHelperOptions(settings.Test, "t_", opts) applyTHelperOptions(settings.Fuzz, "f_", opts) applyTHelperOptions(settings.Benchmark, "b_", opts) applyTHelperOptions(settings.TB, "tb_", opts) -======= - if cfg != nil { - applyTHelperOptions(cfg.Test, "t_", opts) - applyTHelperOptions(cfg.Fuzz, "f_", opts) - applyTHelperOptions(cfg.Benchmark, "b_", opts) - applyTHelperOptions(cfg.TB, "tb_", opts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if len(opts) == 0 { @@ -58,11 +46,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { args := maps.Keys(opts) -<<<<<<< HEAD cfg := map[string]map[string]any{ -======= - cfgMap := map[string]map[string]any{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) a.Name: { "checks": strings.Join(args, ","), }, @@ -72,11 +56,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go index c2fcae383b..04c9a223e5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go @@ -1,11 +1,6 @@ package unparam import ( -<<<<<<< HEAD -======= - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/packages" @@ -14,46 +9,21 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" -<<<<<<< HEAD -======= - "github.com/golangci/golangci-lint/pkg/result" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const linterName = "unparam" func New(settings *config.UnparamSettings) *goanalysis.Linter { -<<<<<<< HEAD -======= - var mu sync.Mutex - var resIssues []goanalysis.Issue - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: func(pass *analysis.Pass) (any, error) { -<<<<<<< HEAD err := runUnparam(pass, settings) -======= - issues, err := runUnparam(pass, settings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD -======= - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil }, } @@ -67,19 +37,10 @@ func New(settings *config.UnparamSettings) *goanalysis.Linter { if settings.Algo != "cha" { lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") } -<<<<<<< HEAD }).WithLoadMode(goanalysis.LoadModeTypesInfo) } func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error { -======= - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) -} - -func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) ssaPkg := ssa.Pkg @@ -97,7 +58,6 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanal unparamIssues, err := c.Check() if err != nil { -<<<<<<< HEAD return err } @@ -109,19 +69,4 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanal } return nil -======= - return nil, err - } - - var issues []goanalysis.Issue - for _, i := range unparamIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(i.Pos()), - Text: i.Message(), - FromLinter: linterName, - }, pass)) - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go index 84a676b4bb..00f7d9742a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go @@ -8,7 +8,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -<<<<<<< HEAD func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { a := analyzer.New() @@ -27,26 +26,6 @@ func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { analyzer.TimeMonthFlag: settings.TimeMonth, analyzer.TimeWeekdayFlag: settings.TimeWeekday, analyzer.TLSSignatureSchemeFlag: settings.TLSSignatureScheme, -======= -func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { - a := analyzer.New() - - cfgMap := make(map[string]map[string]any) - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - analyzer.ConstantKindFlag: cfg.ConstantKind, - analyzer.CryptoHashFlag: cfg.CryptoHash, - analyzer.HTTPMethodFlag: cfg.HTTPMethod, - analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode, - analyzer.OSDevNullFlag: cfg.OSDevNull, - analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath, - analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel, - analyzer.SyslogPriorityFlag: cfg.SyslogPriority, - analyzer.TimeLayoutFlag: cfg.TimeLayout, - analyzer.TimeMonthFlag: cfg.TimeMonth, - analyzer.TimeWeekdayFlag: cfg.TimeWeekday, - analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -54,10 +33,6 @@ func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, -<<<<<<< HEAD cfg, -======= - cfgMap, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go index 38a82c1f3d..d45969efce 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go @@ -1,40 +1,17 @@ package whitespace import ( -<<<<<<< HEAD -======= - "fmt" - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/ultraware/whitespace" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" -<<<<<<< HEAD ) func New(settings *config.WhitespaceSettings) *goanalysis.Linter { var wsSettings whitespace.Settings if settings != nil { wsSettings = whitespace.Settings{ -======= - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const linterName = "whitespace" - -func New(settings *config.WhitespaceSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - var wsSettings whitespace.Settings - if settings != nil { - wsSettings = whitespace.Settings{ - Mode: whitespace.RunningModeGolangCI, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MultiIf: settings.MultiIf, MultiFunc: settings.MultiFunc, } @@ -47,72 +24,5 @@ func New(settings *config.WhitespaceSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, -<<<<<<< HEAD ).WithLoadMode(goanalysis.LoadModeSyntax) -======= - ).WithContextSetter(func(_ *linter.Context) { - a.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runWhitespace(pass, wsSettings) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) { - lintIssues := whitespace.Run(pass, &wsSettings) - - issues := make([]goanalysis.Issue, len(lintIssues)) - for i, issue := range lintIssues { - report := &result.Issue{ - FromLinter: linterName, - Pos: pass.Fset.PositionFor(issue.Diagnostic, false), - Text: issue.Message, - } - - switch issue.MessageType { - case whitespace.MessageTypeRemove: - if len(issue.LineNumbers) == 0 { - continue - } - - report.LineRange = &result.Range{ - From: issue.LineNumbers[0], - To: issue.LineNumbers[len(issue.LineNumbers)-1], - } - - report.Replacement = &result.Replacement{NeedOnlyDelete: true} - - case whitespace.MessageTypeAdd: - report.Pos = pass.Fset.PositionFor(issue.FixStart, false) - report.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: 0, - Length: 1, - NewString: "\n\t", - }, - } - - default: - return nil, fmt.Errorf("unknown message type: %v", issue.MessageType) - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - return issues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go index 609d8e88c5..b2f5ec7420 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go @@ -11,11 +11,8 @@ import ( func New(settings *config.WrapcheckSettings) *goanalysis.Linter { cfg := wrapcheck.NewDefaultConfig() if settings != nil { -<<<<<<< HEAD cfg.ExtraIgnoreSigs = settings.ExtraIgnoreSigs -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(settings.IgnoreSigs) != 0 { cfg.IgnoreSigs = settings.IgnoreSigs } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go index 3de17437e2..1e438a0f03 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -164,7 +164,6 @@ func (lc *Config) WithNoopFallback(cfg *config.Config, cond func(cfg *config.Con } func IsGoLowerThanGo122() func(cfg *config.Config) error { -<<<<<<< HEAD return isGoLowerThanGo("1.22") } @@ -179,14 +178,6 @@ func isGoLowerThanGo(v string) func(cfg *config.Config) error { } return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go %s", cfg.Run.Go, v) -======= - return func(cfg *config.Config) error { - if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, "1.22") { - return nil - } - - return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go 1.22", cfg.Run.Go) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go index a05303c3e9..4338aa88cc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go @@ -26,10 +26,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/exhaustive" "github.com/golangci/golangci-lint/pkg/golinters/exhaustruct" "github.com/golangci/golangci-lint/pkg/golinters/exportloopref" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/exptostd" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/golinters/fatcontext" "github.com/golangci/golangci-lint/pkg/golinters/forbidigo" "github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert" @@ -76,10 +73,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/nakedret" "github.com/golangci/golangci-lint/pkg/golinters/nestif" "github.com/golangci/golangci-lint/pkg/golinters/nilerr" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/nilnesserr" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/golinters/nilnil" "github.com/golangci/golangci-lint/pkg/golinters/nlreturn" "github.com/golangci/golangci-lint/pkg/golinters/noctx" @@ -113,10 +107,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/unparam" "github.com/golangci/golangci-lint/pkg/golinters/unused" "github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/golinters/usetesting" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/golinters/varnamelen" "github.com/golangci/golangci-lint/pkg/golinters/wastedassign" "github.com/golangci/golangci-lint/pkg/golinters/whitespace" @@ -172,12 +163,8 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). -<<<<<<< HEAD WithAutoFix(). WithURL("https://github.com/lasiar/canonicalheader"), -======= - WithURL("https://github.com/lasiar/canonicalHeader"), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linter.NewConfig(containedctx.New()). WithSince("v1.44.0"). @@ -199,28 +186,16 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(cyclop.New(&cfg.LintersSettings.Cyclop)). WithSince("v1.37.0"). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetComplexity). WithURL("https://github.com/bkielbasa/cyclop"), linter.NewConfig(decorder.New(&cfg.LintersSettings.Decorder)). WithSince("v1.44.0"). -<<<<<<< HEAD WithPresets(linter.PresetStyle). -======= - WithPresets(linter.PresetFormatting, linter.PresetStyle). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://gitlab.com/bosi/decorder"), linter.NewConfig(linter.NewNoopDeprecated("deadcode", cfg, linter.DeprecationError)). WithSince("v1.0.0"). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetUnused). WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -243,10 +218,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(dupword.New(&cfg.LintersSettings.DupWord)). WithSince("v1.50.0"). WithPresets(linter.PresetComment). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Abirdcfly/dupword"), linter.NewConfig(durationcheck.New()). @@ -278,19 +250,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.32.0"). WithPresets(linter.PresetBugs, linter.PresetError). WithLoadForGoAnalysis(). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/polyfloyd/go-errorlint"), linter.NewConfig(linter.NewNoopDeprecated("execinquery", cfg, linter.DeprecationError)). WithSince("v1.46.0"). WithPresets(linter.PresetSQL). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/1uf3/execinquery"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.58.0", ""), @@ -303,10 +268,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("exhaustivestruct", cfg, linter.DeprecationError)). WithSince("v1.32.0"). WithPresets(linter.PresetStyle, linter.PresetTest). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/mbilski/exhaustivestruct"). DeprecatedError("The repository of the linter has been deprecated by the owner.", "v1.46.0", "exhaustruct"), @@ -323,7 +284,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithURL("https://github.com/kyoh86/exportloopref"). DeprecatedWarning("Since Go1.22 (loopvar) this linter is no longer relevant.", "v1.60.2", "copyloopvar"), -<<<<<<< HEAD linter.NewConfig(exptostd.New()). WithSince("v1.63.0"). WithPresets(linter.PresetStyle). @@ -331,8 +291,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithAutoFix(). WithURL("https://github.com/ldez/exptostd"), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linter.NewConfig(forbidigo.New(&cfg.LintersSettings.Forbidigo)). WithSince("v1.34.0"). WithPresets(linter.PresetStyle). @@ -352,10 +310,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetPerformance). WithLoadForGoAnalysis(). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Crocmagnon/fatcontext"), linter.NewConfig(funlen.New(&cfg.LintersSettings.Funlen)). @@ -373,10 +328,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.51.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/nunnatsa/ginkgolinter"), linter.NewConfig(gocheckcompilerdirectives.New()). @@ -438,10 +390,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetStyle, linter.PresetError). WithLoadForGoAnalysis(). WithAlternativeNames("goerr113"). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Djarvur/go-err113"), linter.NewConfig(gofmt.New(&cfg.LintersSettings.Gofmt)). @@ -470,10 +419,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("golint", cfg, linter.DeprecationError)). WithSince("v1.0.0"). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetStyle). WithURL("https://github.com/golang/lint"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), @@ -517,10 +462,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithAlternativeNames(megacheckName). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), linter.NewConfig(gosmopolitan.New(&cfg.LintersSettings.Gosmopolitan)). @@ -534,10 +476,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.0.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithAlternativeNames("vet", "vetshadow"). WithURL("https://pkg.go.dev/cmd/vet"), @@ -556,20 +495,14 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.62.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/uudashr/iface"), linter.NewConfig(importas.New(&cfg.LintersSettings.ImportAs)). WithSince("v1.38.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/julz/importas"), linter.NewConfig(inamedparam.New(&cfg.LintersSettings.Inamedparam)). @@ -590,10 +523,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("interfacer", cfg, linter.DeprecationError)). WithSince("v1.0.0"). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetStyle). WithURL("https://github.com/mvdan/interfacer"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", ""), @@ -602,10 +531,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.57.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/ckaznocha/intrange"). WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), @@ -639,10 +565,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("maligned", cfg, linter.DeprecationError)). WithSince("v1.0.0"). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetPerformance). WithURL("https://github.com/mdempsky/maligned"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), @@ -669,10 +591,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nakedret.New(&cfg.LintersSettings.Nakedret)). WithSince("v1.19.0"). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/alexkohler/nakedret"), linter.NewConfig(nestif.New(&cfg.LintersSettings.Nestif)). @@ -686,15 +605,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetBugs). WithURL("https://github.com/gostaticanalysis/nilerr"), -<<<<<<< HEAD linter.NewConfig(nilnesserr.New()). WithSince("v1.63.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs). WithURL("https://github.com/alingse/nilnesserr"), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linter.NewConfig(nilnil.New(&cfg.LintersSettings.NilNil)). WithSince("v1.43.0"). WithPresets(linter.PresetStyle). @@ -704,10 +620,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nlreturn.New(&cfg.LintersSettings.Nlreturn)). WithSince("v1.30.0"). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/ssgreg/nlreturn"), linter.NewConfig(noctx.New()). @@ -743,10 +656,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/catenacyber/perfsprint"), linter.NewConfig(prealloc.New(&cfg.LintersSettings.Prealloc)). @@ -777,11 +687,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithURL("https://github.com/curioswitch/go-reassign"), -<<<<<<< HEAD linter.NewConfig(recvcheck.New(&cfg.LintersSettings.Recvcheck)). -======= - linter.NewConfig(recvcheck.New()). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithSince("v1.62.0"). WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). @@ -791,10 +697,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.37.0"). WithPresets(linter.PresetStyle, linter.PresetMetaLinter). ConsiderSlow(). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/mgechev/revive"), linter.NewConfig(rowserrcheck.New(&cfg.LintersSettings.RowsErrCheck)). @@ -806,11 +709,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(sloglint.New(&cfg.LintersSettings.SlogLint)). WithSince("v1.55.0"). WithLoadForGoAnalysis(). -<<<<<<< HEAD WithPresets(linter.PresetStyle). -======= - WithPresets(linter.PresetStyle, linter.PresetFormatting). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/go-simpler/sloglint"), linter.NewConfig(linter.NewNoopDeprecated("scopelint", cfg, linter.DeprecationError)). @@ -837,18 +736,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). WithAlternativeNames(megacheckName). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://staticcheck.dev/"), linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). -<<<<<<< HEAD -======= - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -857,29 +749,19 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.20.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), linter.NewConfig(tagalign.New(&cfg.LintersSettings.TagAlign)). WithSince("v1.53.0"). -<<<<<<< HEAD WithPresets(linter.PresetStyle). -======= - WithPresets(linter.PresetStyle, linter.PresetFormatting). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithAutoFix(). WithURL("https://github.com/4meepo/tagalign"), linter.NewConfig(tagliatelle.New(&cfg.LintersSettings.Tagliatelle)). WithSince("v1.40.0"). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithLoadForGoAnalysis(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/ldez/tagliatelle"), linter.NewConfig(tenv.New(&cfg.LintersSettings.Tenv)). @@ -897,10 +779,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithPresets(linter.PresetTest, linter.PresetBugs). WithLoadForGoAnalysis(). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/Antonboom/testifylint"), linter.NewConfig(testpackage.New(&cfg.LintersSettings.Testpackage)). @@ -950,7 +829,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(usestdlibvars.New(&cfg.LintersSettings.UseStdlibVars)). WithSince("v1.48.0"). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). WithURL("https://github.com/sashamelentyev/usestdlibvars"), @@ -963,13 +841,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). -======= - WithURL("https://github.com/sashamelentyev/usestdlibvars"), - - linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)). - WithSince("v1.0.0"). - WithLoadForGoAnalysis(). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -1001,10 +872,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(wsl.New(&cfg.LintersSettings.WSL)). WithSince("v1.20.0"). WithPresets(linter.PresetStyle). -<<<<<<< HEAD WithAutoFix(). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WithURL("https://github.com/bombsimon/wsl"), linter.NewConfig(zerologlint.New()). diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index da0021949a..157fde715f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -10,10 +10,7 @@ import ( "github.com/golangci/golangci-lint/internal/errorutil" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" -<<<<<<< HEAD "github.com/golangci/golangci-lint/pkg/goformatters" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -64,14 +61,11 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti return nil, fmt.Errorf("failed to get enabled linters: %w", err) } -<<<<<<< HEAD metaFormatter, err := goformatters.NewMetaFormatter(log, cfg, enabledLinters) if err != nil { return nil, fmt.Errorf("failed to create meta-formatter: %w", err) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &Runner{ Processors: []processors.Processor{ processors.NewCgo(goenv), @@ -106,11 +100,7 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti processors.NewSeverity(log.Child(logutils.DebugKeySeverityRules), files, &cfg.Severity), // The fixer still needs to see paths for the issues that are relative to the current directory. -<<<<<<< HEAD processors.NewFixer(cfg, log, fileCache, metaFormatter), -======= - processors.NewFixer(cfg, log, fileCache), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Now we can modify the issues for output. processors.NewPathPrefixer(cfg.Output.PathPrefix), diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index 50c87d234e..7c5932368f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -12,11 +12,7 @@ const defaultCodeClimateSeverity = "critical" // CodeClimateIssue is a subset of the Code Climate spec. // https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types // It is just enough to support GitLab CI Code Quality. -<<<<<<< HEAD // https://docs.gitlab.com/ee/ci/testing/code_quality.html#code-quality-report-format -======= -// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CodeClimateIssue struct { Description string `json:"description"` CheckName string `json:"check_name"` diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go index 289c8c3859..83c4959114 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go @@ -4,10 +4,6 @@ import ( "fmt" "io" "strings" -<<<<<<< HEAD -======= - "unicode/utf8" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/result" ) @@ -115,19 +111,9 @@ func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int, } func cutVal(s string, limit int) string { -<<<<<<< HEAD runes := []rune(s) if len(runes) > limit { return string(runes[:limit]) } return s -======= - var size, count int - for i := 0; i < limit && count < len(s); i++ { - _, size = utf8.DecodeRuneInString(s[count:]) - count += size - } - - return s[:count] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 713fe36e5b..e338963fa3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -5,10 +5,7 @@ import ( "fmt" "go/token" -<<<<<<< HEAD "golang.org/x/tools/go/analysis" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/packages" ) @@ -16,21 +13,6 @@ type Range struct { From, To int } -<<<<<<< HEAD -======= -type Replacement struct { - NeedOnlyDelete bool // need to delete all lines of the issue without replacement with new lines - NewLines []string // if NeedDelete is false it's the replacement lines - Inline *InlineFix -} - -type InlineFix struct { - StartCol int // zero-based - Length int // length of chunk to be replaced - NewString string -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Issue struct { FromLinter string Text string @@ -40,7 +22,6 @@ type Issue struct { // Source lines of a code with the issue to show SourceLines []string -<<<<<<< HEAD // Pkg is needed for proper caching of linting results Pkg *packages.Package `json:"-"` @@ -54,21 +35,6 @@ type Issue struct { // If we know how to fix the issue we can provide replacement lines SuggestedFixes []analysis.SuggestedFix `json:",omitempty"` -======= - // If we know how to fix the issue we can provide replacement lines - Replacement *Replacement - - // Pkg is needed for proper caching of linting results - Pkg *packages.Package `json:"-"` - - LineRange *Range `json:",omitempty"` - - Pos token.Position - - // HunkPos is used only when golangci-lint is run over a diff - HunkPos int `json:",omitempty"` - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If we are expecting a nolint (because this is from nolintlint), record the expected linter ExpectNoLint bool ExpectedNoLintLinter string diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index bdf81fcf6c..b82c7f2071 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -23,21 +22,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" "github.com/golangci/golangci-lint/pkg/goformatters/goimports" -======= -package processors - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/golangci/golangci-lint/internal/go/robustio" - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/fsutils" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/timeutils" @@ -45,35 +29,23 @@ import ( var _ Processor = (*Fixer)(nil) -<<<<<<< HEAD const filePerm = 0644 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type Fixer struct { cfg *config.Config log logutils.Log fileCache *fsutils.FileCache sw *timeutils.Stopwatch -<<<<<<< HEAD formatter *goformatters.MetaFormatter } func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache, formatter *goformatters.MetaFormatter) *Fixer { -======= -} - -func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache) *Fixer { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &Fixer{ cfg: cfg, log: log, fileCache: fileCache, sw: timeutils.NewStopwatch("fixer", log), -<<<<<<< HEAD formatter: formatter, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -86,7 +58,6 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } -<<<<<<< HEAD p.log.Infof("Applying suggested fixes") notFixableIssues, err := timeutils.TrackStage(p.sw, "all", func() ([]result.Issue, error) { @@ -94,35 +65,10 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) { }) if err != nil { p.log.Warnf("Failed to fix issues: %v", err) -======= - outIssues := make([]result.Issue, 0, len(issues)) - issuesToFixPerFile := map[string][]result.Issue{} - for i := range issues { - issue := &issues[i] - if issue.Replacement == nil { - outIssues = append(outIssues, *issue) - continue - } - - issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) - } - - for file, issuesToFix := range issuesToFixPerFile { - err := p.sw.TrackStageErr("all", func() error { - return p.fixIssuesInFile(file, issuesToFix) - }) - if err != nil { - p.log.Errorf("Failed to fix issues in file %s: %s", file, err) - - // show issues only if can't fix them - outIssues = append(outIssues, issuesToFix...) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } p.printStat() -<<<<<<< HEAD return notFixableIssues, nil } @@ -277,14 +223,10 @@ func (p Fixer) process(issues []result.Issue) ([]result.Issue, error) { } return notFixableIssues, editError -======= - return outIssues, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (Fixer) Finish() {} -<<<<<<< HEAD func (p Fixer) printStat() { p.sw.PrintStages() } @@ -359,189 +301,3 @@ func diff3Conflict(path, xlabel, ylabel string, xedits, yedits []diff.Edit) erro return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", xlabel, ylabel, path, xdiff, ydiff) } -======= -func (p Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { - // TODO: don't read the whole file into memory: read line by line; - // can't just use bufio.scanner: it has a line length limit - origFileData, err := p.fileCache.GetFileBytes(filePath) - if err != nil { - return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err) - } - - origFileLines := bytes.Split(origFileData, []byte("\n")) - - tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) - - tmpOutFile, err := os.Create(tmpFileName) - if err != nil { - return fmt.Errorf("failed to make file %s: %w", tmpFileName, err) - } - - // merge multiple issues per line into one issue - issuesPerLine := map[int][]result.Issue{} - for i := range issues { - issue := &issues[i] - issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) - } - - issues = issues[:0] // reuse the same memory - for line, lineIssues := range issuesPerLine { - if mergedIssue := p.mergeLineIssues(line, lineIssues, origFileLines); mergedIssue != nil { - issues = append(issues, *mergedIssue) - } - } - - issues = p.findNotIntersectingIssues(issues) - - if err = p.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { - tmpOutFile.Close() - _ = robustio.RemoveAll(tmpOutFile.Name()) - return err - } - - tmpOutFile.Close() - - if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil { - _ = robustio.RemoveAll(tmpOutFile.Name()) - return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err) - } - - return nil -} - -func (p Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileLines [][]byte) *result.Issue { - origLine := origFileLines[lineNum-1] // lineNum is 1-based - - if len(lineIssues) == 1 && lineIssues[0].Replacement.Inline == nil { - return &lineIssues[0] - } - - // check issues first - for ind := range lineIssues { - li := &lineIssues[ind] - - if li.LineRange != nil { - p.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) - return &lineIssues[0] - } - - inline := li.Replacement.Inline - - if inline == nil || len(li.Replacement.NewLines) != 0 || li.Replacement.NeedOnlyDelete { - p.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues) - return li - } - - if inline.StartCol < 0 || inline.Length <= 0 || inline.StartCol+inline.Length > len(origLine) { - p.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, li, inline) - return nil - } - } - - return p.applyInlineFixes(lineIssues, origLine, lineNum) -} - -func (p Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, lineNum int) *result.Issue { - sort.Slice(lineIssues, func(i, j int) bool { - return lineIssues[i].Replacement.Inline.StartCol < lineIssues[j].Replacement.Inline.StartCol - }) - - var newLineBuf bytes.Buffer - newLineBuf.Grow(len(origLine)) - - //nolint:misspell // misspelling is intentional - // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" - - curOrigLinePos := 0 - for i := range lineIssues { - fix := lineIssues[i].Replacement.Inline - if fix.StartCol < curOrigLinePos { - p.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) - return nil - } - - if curOrigLinePos != fix.StartCol { - newLineBuf.Write(origLine[curOrigLinePos:fix.StartCol]) - } - newLineBuf.WriteString(fix.NewString) - curOrigLinePos = fix.StartCol + fix.Length - } - if curOrigLinePos != len(origLine) { - newLineBuf.Write(origLine[curOrigLinePos:]) - } - - mergedIssue := lineIssues[0] // use text from the first issue (it's not really used) - mergedIssue.Replacement = &result.Replacement{ - NewLines: []string{newLineBuf.String()}, - } - return &mergedIssue -} - -func (p Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { - sort.SliceStable(issues, func(i, j int) bool { - a, b := issues[i], issues[j] - return a.Line() < b.Line() - }) - - var ret []result.Issue - var currentEnd int - for i := range issues { - issue := &issues[i] - rng := issue.GetLineRange() - if rng.From <= currentEnd { - p.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) - continue // skip intersecting issue - } - p.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) - ret = append(ret, *issue) - currentEnd = rng.To - } - - return ret -} - -func (p Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmpOutFile *os.File) error { - // issues aren't intersecting - - nextIssueIndex := 0 - for i := 0; i < len(origFileLines); i++ { - var outLine string - var nextIssue *result.Issue - if nextIssueIndex != len(issues) { - nextIssue = &issues[nextIssueIndex] - } - - origFileLineNumber := i + 1 - if nextIssue == nil || origFileLineNumber != nextIssue.GetLineRange().From { - outLine = string(origFileLines[i]) - } else { - nextIssueIndex++ - rng := nextIssue.GetLineRange() - if rng.From > rng.To { - // Maybe better decision is to skip such issues, re-evaluate if regressed. - p.log.Warnf("[fixer]: issue line range is probably invalid, fix can be incorrect (from=%d, to=%d, linter=%s)", - rng.From, rng.To, nextIssue.FromLinter, - ) - } - i += rng.To - rng.From - if nextIssue.Replacement.NeedOnlyDelete { - continue - } - outLine = strings.Join(nextIssue.Replacement.NewLines, "\n") - } - - if i < len(origFileLines)-1 { - outLine += "\n" - } - if _, err := tmpOutFile.WriteString(outLine); err != nil { - return fmt.Errorf("failed to write output line: %w", err) - } - } - - return nil -} - -func (p Fixer) printStat() { - p.sw.PrintStages() -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go index 0fab2b09f6..690cdf3f8a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -34,11 +34,7 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { -<<<<<<< HEAD if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { -======= - if issue.Replacement != nil && p.cfg.Issues.NeedFix { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // we need to fix all issues at once => we need to return all of them return true } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go index d961ccb0f5..0d1c286282 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -36,11 +36,7 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { -<<<<<<< HEAD if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { -======= - if issue.Replacement != nil && p.cfg.Issues.NeedFix { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // we need to fix all issues at once => we need to return all of them return true } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go index 68432daf72..fcd65326f9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go @@ -1,10 +1,7 @@ package processors import ( -<<<<<<< HEAD "cmp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "regexp" "github.com/golangci/golangci-lint/pkg/config" @@ -71,14 +68,7 @@ func (p *Severity) transform(issue *result.Issue) *result.Issue { return issue } -<<<<<<< HEAD issue.Severity = cmp.Or(rule.severity, p.defaultSeverity) -======= - issue.Severity = rule.severity - if issue.Severity == "" { - issue.Severity = p.defaultSeverity - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return issue } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go index 7cf9daa576..ec134f25f6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -26,11 +26,7 @@ func (*UniqByLine) Name() string { } func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { -<<<<<<< HEAD if !p.cfg.Issues.UniqByLine { -======= - if !p.cfg.Output.UniqByLine { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return issues, nil } @@ -40,11 +36,7 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { func (*UniqByLine) Finish() {} func (p *UniqByLine) shouldPassIssue(issue *result.Issue) bool { -<<<<<<< HEAD if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { -======= - if issue.Replacement != nil && p.cfg.Issues.NeedFix { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // if issue will be auto-fixed we shouldn't collapse issues: // e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them. return true diff --git a/vendor/github.com/golangci/modinfo/.gitignore b/vendor/github.com/golangci/modinfo/.gitignore deleted file mode 100644 index 9f11b755a1..0000000000 --- a/vendor/github.com/golangci/modinfo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea/ diff --git a/vendor/github.com/golangci/modinfo/.golangci.yml b/vendor/github.com/golangci/modinfo/.golangci.yml deleted file mode 100644 index 9698182f2a..0000000000 --- a/vendor/github.com/golangci/modinfo/.golangci.yml +++ /dev/null @@ -1,157 +0,0 @@ -run: - timeout: 7m - -linters-settings: - govet: - enable: - - shadow - gocyclo: - min-complexity: 12 - goconst: - min-len: 3 - min-occurrences: 3 - funlen: - lines: -1 - statements: 50 - misspell: - locale: US - depguard: - rules: - main: - deny: - - pkg: "github.com/instana/testify" - desc: not allowed - - pkg: "github.com/pkg/errors" - desc: Should be replaced by standard lib errors package - tagalign: - align: false - order: - - xml - - json - - yaml - - yml - - toml - - mapstructure - - url - godox: - keywords: - - FIXME - gocritic: - enabled-tags: - - diagnostic - - style - - performance - disabled-checks: - - paramTypeCombine # already handle by gofumpt.extra-rules - - whyNoLint # already handle by nonolint - - unnamedResult - - hugeParam - - sloppyReassign - - rangeValCopy - - octalLiteral - - ptrToRefParam - - appendAssign - - ruleguard - - httpNoBody - - exposedSyncMutex - revive: - rules: - - name: struct-tag - - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - disabled: true - - name: if-return - - name: increment-decrement - - name: var-naming - - name: var-declaration - - name: package-comments - disabled: true - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - disabled: true - - name: unreachable-code - - name: redefines-builtin-id - - tagliatelle: - case: - rules: - json: pascal - yaml: camel - xml: camel - header: header - mapstructure: camel - env: upperSnake - envconfig: upperSnake - -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - cyclop # duplicate of gocyclo - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - lll - - gosec - - dupl # not relevant - - prealloc # too many false-positive - - bodyclose # too many false-positive - - gomnd - - testpackage # not relevant - - tparallel # not relevant - - paralleltest # not relevant - - nestif # too many false-positive - - wrapcheck - - goerr113 # not relevant - - nlreturn # not relevant - - wsl # not relevant - - exhaustive # not relevant - - exhaustruct # not relevant - - makezero # not relevant - - forbidigo - - varnamelen # not relevant - - nilnil # not relevant - - ireturn # not relevant - - contextcheck # too many false-positive - - tenv # we already have a test "framework" to handle env vars - - noctx - - errchkjson - - nonamedreturns - - gosmopolitan # not relevant - - gochecknoglobals - -issues: - exclude-use-default: false - max-issues-per-linter: 0 - max-same-issues: 0 - exclude: - - 'Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' - - 'ST1000: at least one file in a package should have a package comment' - exclude-rules: - - path: (.+)_test.go - linters: - - funlen - - goconst - - maintidx diff --git a/vendor/github.com/golangci/modinfo/LICENSE b/vendor/github.com/golangci/modinfo/LICENSE deleted file mode 100644 index f288702d2f..0000000000 --- a/vendor/github.com/golangci/modinfo/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/vendor/github.com/golangci/modinfo/Makefile b/vendor/github.com/golangci/modinfo/Makefile deleted file mode 100644 index df91018f11..0000000000 --- a/vendor/github.com/golangci/modinfo/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -.PHONY: clean check test - -default: clean check test - -clean: - rm -rf dist/ cover.out - -test: clean - go test -v -cover ./... - -check: - golangci-lint run diff --git a/vendor/github.com/golangci/modinfo/module.go b/vendor/github.com/golangci/modinfo/module.go deleted file mode 100644 index ff0b21b9b8..0000000000 --- a/vendor/github.com/golangci/modinfo/module.go +++ /dev/null @@ -1,157 +0,0 @@ -package modinfo - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "reflect" - "sort" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/go/analysis" -) - -type ModInfo struct { - Path string `json:"Path"` - Dir string `json:"Dir"` - GoMod string `json:"GoMod"` - GoVersion string `json:"GoVersion"` - Main bool `json:"Main"` -} - -var ( - once sync.Once - information []ModInfo - errInfo error -) - -var Analyzer = &analysis.Analyzer{ - Name: "modinfo", - Doc: "Module information", - URL: "https://github.com/golangci/modinfo", - Run: runOnce, - ResultType: reflect.TypeOf([]ModInfo(nil)), -} - -func runOnce(pass *analysis.Pass) (any, error) { - _, ok := os.LookupEnv("MODINFO_DEBUG_DISABLE_ONCE") - if ok { - return GetModuleInfo(pass) - } - - once.Do(func() { - information, errInfo = GetModuleInfo(pass) - }) - - return information, errInfo -} - -// GetModuleInfo gets modules information. -// Always returns 1 element except for workspace (returns all the modules of the workspace). -// Based on `go list -m -json` behavior. -func GetModuleInfo(pass *analysis.Pass) ([]ModInfo, error) { - // https://github.com/golang/go/issues/44753#issuecomment-790089020 - cmd := exec.Command("go", "list", "-m", "-json") - for _, file := range pass.Files { - name := pass.Fset.File(file.Pos()).Name() - if filepath.Ext(name) != ".go" { - continue - } - - cmd.Dir = filepath.Dir(name) - break - } - - out, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("command go list: %w: %s", err, string(out)) - } - - var infos []ModInfo - - for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); { - var v ModInfo - if err := dec.Decode(&v); err != nil { - return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out)) - } - - if v.GoMod == "" { - return nil, errors.New("working directory is not part of a module") - } - - if !v.Main || v.Dir == "" { - continue - } - - infos = append(infos, v) - } - - if len(infos) == 0 { - return nil, errors.New("go.mod file not found") - } - - sort.Slice(infos, func(i, j int) bool { - return len(infos[i].Path) > len(infos[j].Path) - }) - - return infos, nil -} - -// FindModuleFromPass finds the module related to the files of the pass. -func FindModuleFromPass(pass *analysis.Pass) (ModInfo, error) { - infos, ok := pass.ResultOf[Analyzer].([]ModInfo) - if !ok { - return ModInfo{}, errors.New("no modinfo analyzer result") - } - - var name string - for _, file := range pass.Files { - f := pass.Fset.File(file.Pos()).Name() - if filepath.Ext(f) != ".go" { - continue - } - - name = f - break - } - - // no Go file found in analysis pass - if name == "" { - name, _ = os.Getwd() - } - - for _, info := range infos { - if !strings.HasPrefix(name, info.Dir) { - continue - } - return info, nil - } - - return ModInfo{}, errors.New("module information not found") -} - -// ReadModuleFileFromPass read the `go.mod` file from the pass result. -func ReadModuleFileFromPass(pass *analysis.Pass) (*modfile.File, error) { - info, err := FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - return ReadModuleFile(info) -} - -// ReadModuleFile read the `go.mod` file. -func ReadModuleFile(info ModInfo) (*modfile.File, error) { - raw, err := os.ReadFile(info.GoMod) - if err != nil { - return nil, fmt.Errorf("reading go.mod file: %w", err) - } - - return modfile.Parse("go.mod", raw, nil) -} diff --git a/vendor/github.com/golangci/modinfo/readme.md b/vendor/github.com/golangci/modinfo/readme.md deleted file mode 100644 index 2175de8eb4..0000000000 --- a/vendor/github.com/golangci/modinfo/readme.md +++ /dev/null @@ -1,73 +0,0 @@ -# modinfo - -This module contains: -- an analyzer that returns module information. -- methods to find and read `go.mod` file - -## Examples - -```go -package main - -import ( - "fmt" - - "github.com/golangci/modinfo" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" -) - -var Analyzer = &analysis.Analyzer{ - Name: "example", - Doc: "Example", - Run: func(pass *analysis.Pass) (interface{}, error) { - file, err := modinfo.ReadModuleFileFromPass(pass) - if err != nil { - return nil, err - } - - fmt.Println("go.mod", file) - - // TODO - - return nil, nil - }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - modinfo.Analyzer, - }, -} -``` - -```go -package main - -import ( - "fmt" - - "github.com/golangci/modinfo" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" -) - -var Analyzer = &analysis.Analyzer{ - Name: "example", - Doc: "Example", - Run: func(pass *analysis.Pass) (interface{}, error) { - info, err := modinfo.FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - fmt.Println("Module", info.Dir) - - // TODO - - return nil, nil - }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - modinfo.Analyzer, - }, -} -``` diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 67ba29766f..81549fb4c5 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -39,10 +39,7 @@ go_library( "//common/types/traits:go_default_library", "//interpreter:go_default_library", "//parser:go_default_library", -<<<<<<< HEAD "@dev_cel_expr//:expr", -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protodesc:go_default_library", @@ -85,10 +82,6 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", -<<<<<<< HEAD -======= - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//encoding/prototext:go_default_library", diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go index 6b3dacb174..4188060210 100644 --- a/vendor/github.com/google/cel-go/cel/decls.go +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -23,10 +23,7 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" -<<<<<<< HEAD celpb "cel.dev/expr" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -316,7 +313,6 @@ func ExprTypeToType(t *exprpb.Type) (*Type, error) { // ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { -<<<<<<< HEAD return AlphaProtoAsDeclaration(d) } @@ -333,30 +329,18 @@ func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) { func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { switch d.GetDeclKind().(type) { case *celpb.Decl_Function: -======= - switch d.GetDeclKind().(type) { - case *exprpb.Decl_Function: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) overloads := d.GetFunction().GetOverloads() opts := make([]FunctionOpt, len(overloads)) for i, o := range overloads { args := make([]*Type, len(o.GetParams())) for j, p := range o.GetParams() { -<<<<<<< HEAD a, err := types.ProtoAsType(p) -======= - a, err := types.ExprTypeToType(p) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } args[j] = a } -<<<<<<< HEAD res, err := types.ProtoAsType(o.GetResultType()) -======= - res, err := types.ExprTypeToType(o.GetResultType()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -367,24 +351,15 @@ func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { } } return Function(d.GetName(), opts...), nil -<<<<<<< HEAD case *celpb.Decl_Ident: t, err := types.ProtoAsType(d.GetIdent().GetType()) -======= - case *exprpb.Decl_Ident: - t, err := types.ExprTypeToType(d.GetIdent().GetType()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } if d.GetIdent().GetValue() == nil { return Variable(d.GetName(), t), nil } -<<<<<<< HEAD val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue()) -======= - val, err := ast.ConstantToVal(d.GetIdent().GetValue()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index 57b0e8204c..3bfe428992 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -44,12 +44,9 @@ type Ast struct { // NativeRep converts the AST to a Go-native representation. func (ast *Ast) NativeRep() *celast.AST { -<<<<<<< HEAD if ast == nil { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ast.impl } @@ -61,24 +58,13 @@ func (ast *Ast) Expr() *exprpb.Expr { if ast == nil { return nil } -<<<<<<< HEAD pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) -======= - pbExpr, _ := celast.ExprToProto(ast.impl.Expr()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pbExpr } // IsChecked returns whether the Ast value has been successfully type-checked. func (ast *Ast) IsChecked() bool { -<<<<<<< HEAD return ast.NativeRep().IsChecked() -======= - if ast == nil { - return false - } - return ast.impl.IsChecked() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SourceInfo returns character offset and newline position information about expression elements. @@ -86,11 +72,7 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { if ast == nil { return nil } -<<<<<<< HEAD pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) -======= - pbInfo, _ := celast.SourceInfoToProto(ast.impl.SourceInfo()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return pbInfo } @@ -113,11 +95,7 @@ func (ast *Ast) OutputType() *Type { if ast == nil { return types.ErrorType } -<<<<<<< HEAD return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) -======= - return ast.impl.GetType(ast.impl.Expr().ID()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Source returns a view of the input used to create the Ast. This source may be complete or @@ -239,7 +217,6 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { chk, err := e.initChecker() if err != nil { errs := common.NewErrors(ast.Source()) -<<<<<<< HEAD errs.ReportErrorString(common.NoLocation, err.Error()) return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } @@ -247,15 +224,6 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) if len(errs.GetErrors()) > 0 { return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) -======= - errs.ReportError(common.NoLocation, err.Error()) - return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) - } - - checked, errs := checker.Check(ast.impl, ast.Source(), chk) - if len(errs.GetErrors()) > 0 { - return nil, NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. @@ -276,11 +244,7 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { } } // Apply additional validators on the type-checked result. -<<<<<<< HEAD iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) -======= - iss := NewIssuesWithSourceInfo(errs, ast.impl.SourceInfo()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, v := range e.validators { v.Validate(e, vConfig, checked, iss) } @@ -345,26 +309,13 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. -<<<<<<< HEAD -======= - varsCopy := []*decls.VariableDecl{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) -<<<<<<< HEAD } varsCopy := make([]*decls.VariableDecl, len(e.variables)) copy(varsCopy, e.variables) -======= - } else { - // If the type-checker has not been instantiated, ensure the unvalidated declarations are - // provided to the extended Env instance. - varsCopy = make([]*decls.VariableDecl, len(e.variables)) - copy(varsCopy, e.variables) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copy macros and program options macsCopy := make([]parser.Macro, len(e.macros)) @@ -461,7 +412,6 @@ func (e *Env) Libraries() []string { return libraries } -<<<<<<< HEAD // HasFunction returns whether a specific function has been configured in the environment func (e *Env) HasFunction(functionName string) bool { _, ok := e.functions[functionName] @@ -473,8 +423,6 @@ func (e *Env) Functions() map[string]*decls.FunctionDecl { return e.functions } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // HasValidator returns whether a specific ASTValidator has been configured in the environment. func (e *Env) HasValidator(name string) bool { for _, v := range e.validators { @@ -511,15 +459,12 @@ func (e *Env) ParseSource(src Source) (*Ast, *Issues) { // Program generates an evaluable instance of the Ast within the environment (Env). func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { -<<<<<<< HEAD return e.PlanProgram(ast.NativeRep(), opts...) } // PlanProgram generates an evaluable instance of the AST in the go-native representation within // the environment (Env). func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) { -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) optSet := e.progOpts if len(opts) != 0 { mergedOpts := []ProgramOption{} @@ -527,11 +472,7 @@ func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) mergedOpts = append(mergedOpts, opts...) optSet = mergedOpts } -<<<<<<< HEAD return newProgram(e, a, optSet) -======= - return newProgram(e, ast, optSet) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CELTypeAdapter returns the `types.Adapter` configured for the environment. @@ -615,12 +556,8 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { // TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an // Ast format and then Program again. func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { -<<<<<<< HEAD ast := a.NativeRep() pruned := interpreter.PruneAst(ast.Expr(), ast.SourceInfo().MacroCalls(), details.State()) -======= - pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newAST := &Ast{source: a.Source(), impl: pruned} expr, err := AstToString(newAST) if err != nil { @@ -646,11 +583,7 @@ func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...ch extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) extendedOpts = append(extendedOpts, opts...) extendedOpts = append(extendedOpts, e.costOptions...) -<<<<<<< HEAD return checker.Cost(ast.NativeRep(), estimator, extendedOpts...) -======= - return checker.Cost(ast.impl, estimator, extendedOpts...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // configure applies a series of EnvOptions to the current environment. @@ -682,12 +615,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { if e.HasFeature(featureVariadicLogicalASTs) { prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true)) } -<<<<<<< HEAD if e.HasFeature(featureIdentEscapeSyntax) { prsrOpts = append(prsrOpts, parser.EnableIdentEscapeSyntax(true)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) e.prsr, err = parser.NewParser(prsrOpts...) if err != nil { return nil, err @@ -840,17 +770,10 @@ func (i *Issues) Append(other *Issues) *Issues { if i == nil { return other } -<<<<<<< HEAD if other == nil || i == other { return i } return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) -======= - if other == nil { - return i - } - return NewIssues(i.errs.Append(other.errs.GetErrors())) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // String converts the issues to a suitable display string. @@ -884,11 +807,7 @@ type interopCELTypeProvider struct { // FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. // -<<<<<<< HEAD // This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type -======= -// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if et, found := p.FindType(typeName); found { @@ -911,11 +830,7 @@ func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string // FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field // name, if one exists. // -<<<<<<< HEAD // This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type -======= -// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { if ft, found := p.FindFieldType(structType, fieldName); found { diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go index 8d9fde251c..a4530e19e7 100644 --- a/vendor/github.com/google/cel-go/cel/inlining.go +++ b/vendor/github.com/google/cel-go/cel/inlining.go @@ -60,11 +60,7 @@ func NewInlineVariable(name string, definition *Ast) *InlineVariable { // If the variable occurs more than once, the provided alias will be used to replace the expressions // where the variable name occurs. func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable { -<<<<<<< HEAD return &InlineVariable{name: name, alias: alias, def: definition.NativeRep()} -======= - return &InlineVariable{name: name, alias: alias, def: definition.impl} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewInliningOptimizer creates and optimizer which replaces variables with expression definitions. diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go index 079ad0c1a1..a327c9672d 100644 --- a/vendor/github.com/google/cel-go/cel/io.go +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -28,10 +28,7 @@ import ( "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/parser" -<<<<<<< HEAD celpb "cel.dev/expr" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" anypb "google.golang.org/protobuf/types/known/anypb" ) @@ -65,11 +62,7 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { if !a.IsChecked() { return nil, fmt.Errorf("cannot convert unchecked ast") } -<<<<<<< HEAD return ast.ToProto(a.NativeRep()) -======= - return ast.ToProto(a.impl) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ParsedExprToAst converts a parsed expression proto message to an Ast. @@ -106,7 +99,6 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { // Note, the conversion may not be an exact replica of the original expression, but will produce // a string that is semantically equivalent and whose textual representation is stable. func AstToString(a *Ast) (string, error) { -<<<<<<< HEAD return parser.Unparse(a.NativeRep().Expr(), a.NativeRep().SourceInfo()) } @@ -150,39 +142,11 @@ func ValueAsProto(res ref.Val) (*celpb.Value, error) { elts := make([]*celpb.Value, 0, int64(sz)) for i := types.Int(0); i < sz; i++ { v, err := ValueAsProto(l.Get(i)) -======= - return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo()) -} - -// RefValueToValue converts between ref.Val and api.expr.Value. -// The result Value is the serialized proto form. The ref.Val must not be error or unknown. -func RefValueToValue(res ref.Val) (*exprpb.Value, error) { - switch res.Type() { - case types.BoolType: - return &exprpb.Value{ - Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil - case types.BytesType: - return &exprpb.Value{ - Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil - case types.DoubleType: - return &exprpb.Value{ - Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil - case types.IntType: - return &exprpb.Value{ - Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil - case types.ListType: - l := res.(traits.Lister) - sz := l.Size().(types.Int) - elts := make([]*exprpb.Value, 0, int64(sz)) - for i := types.Int(0); i < sz; i++ { - v, err := RefValueToValue(l.Get(i)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } elts = append(elts, v) } -<<<<<<< HEAD return &celpb.Value{ Kind: &celpb.Value_ListValue{ ListValue: &celpb.ListValue{Values: elts}}}, nil @@ -218,55 +182,13 @@ func RefValueToValue(res ref.Val) (*exprpb.Value, error) { case types.UintType: return &celpb.Value{ Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil -======= - return &exprpb.Value{ - Kind: &exprpb.Value_ListValue{ - ListValue: &exprpb.ListValue{Values: elts}}}, nil - case types.MapType: - mapper := res.(traits.Mapper) - sz := mapper.Size().(types.Int) - entries := make([]*exprpb.MapValue_Entry, 0, int64(sz)) - for it := mapper.Iterator(); it.HasNext().(types.Bool); { - k := it.Next() - v := mapper.Get(k) - kv, err := RefValueToValue(k) - if err != nil { - return nil, err - } - vv, err := RefValueToValue(v) - if err != nil { - return nil, err - } - entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv}) - } - return &exprpb.Value{ - Kind: &exprpb.Value_MapValue{ - MapValue: &exprpb.MapValue{Entries: entries}}}, nil - case types.NullType: - return &exprpb.Value{ - Kind: &exprpb.Value_NullValue{}}, nil - case types.StringType: - return &exprpb.Value{ - Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil - case types.TypeType: - typeName := res.(ref.Type).TypeName() - return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil - case types.UintType: - return &exprpb.Value{ - Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: any, err := res.ConvertToNative(anyPbType) if err != nil { return nil, err } -<<<<<<< HEAD return &celpb.Value{ Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil -======= - return &exprpb.Value{ - Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -287,7 +209,6 @@ var ( anyPbType = reflect.TypeOf(&anypb.Any{}) ) -<<<<<<< HEAD // ValueToRefValue converts between google.api.expr.v1alpha1.Value and ref.Val. func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { return AlphaProtoAsValue(adapter, v) @@ -320,33 +241,12 @@ func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { case *celpb.Value_BytesValue: return types.Bytes(v.GetBytesValue()), nil case *celpb.Value_ObjectValue: -======= -// ValueToRefValue converts between exprpb.Value and ref.Val. -func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { - switch v.Kind.(type) { - case *exprpb.Value_NullValue: - return types.NullValue, nil - case *exprpb.Value_BoolValue: - return types.Bool(v.GetBoolValue()), nil - case *exprpb.Value_Int64Value: - return types.Int(v.GetInt64Value()), nil - case *exprpb.Value_Uint64Value: - return types.Uint(v.GetUint64Value()), nil - case *exprpb.Value_DoubleValue: - return types.Double(v.GetDoubleValue()), nil - case *exprpb.Value_StringValue: - return types.String(v.GetStringValue()), nil - case *exprpb.Value_BytesValue: - return types.Bytes(v.GetBytesValue()), nil - case *exprpb.Value_ObjectValue: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) any := v.GetObjectValue() msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true}) if err != nil { return nil, err } return adapter.NativeToValue(msg), nil -<<<<<<< HEAD case *celpb.Value_MapValue: m := v.GetMapValue() entries := make(map[ref.Val]ref.Val) @@ -356,47 +256,24 @@ func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { return nil, err } pb, err := ProtoAsValue(adapter, entry.Value) -======= - case *exprpb.Value_MapValue: - m := v.GetMapValue() - entries := make(map[ref.Val]ref.Val) - for _, entry := range m.Entries { - key, err := ValueToRefValue(adapter, entry.Key) - if err != nil { - return nil, err - } - pb, err := ValueToRefValue(adapter, entry.Value) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } entries[key] = pb } return adapter.NativeToValue(entries), nil -<<<<<<< HEAD case *celpb.Value_ListValue: l := v.GetListValue() elts := make([]ref.Val, len(l.Values)) for i, e := range l.Values { rv, err := ProtoAsValue(adapter, e) -======= - case *exprpb.Value_ListValue: - l := v.GetListValue() - elts := make([]ref.Val, len(l.Values)) - for i, e := range l.Values { - rv, err := ValueToRefValue(adapter, e) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } elts[i] = rv } return adapter.NativeToValue(elts), nil -<<<<<<< HEAD case *celpb.Value_TypeValue: -======= - case *exprpb.Value_TypeValue: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) typeName := v.GetTypeValue() tv, ok := typeNameToTypeValue[typeName] if ok { @@ -406,7 +283,6 @@ func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { } return nil, errors.New("unknown value") } -<<<<<<< HEAD func convertProto(src, dst proto.Message) error { pb, err := proto.Marshal(src) @@ -416,5 +292,3 @@ func convertProto(src, dst proto.Message) error { err = proto.Unmarshal(pb, dst) return err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index c0348d9024..c0aef50190 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -15,10 +15,7 @@ package cel import ( -<<<<<<< HEAD "fmt" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "math" "strconv" "strings" @@ -39,17 +36,11 @@ const ( optMapMacro = "optMap" optFlatMapMacro = "optFlatMap" hasValueFunc = "hasValue" -<<<<<<< HEAD unwrapOptFunc = "unwrapOpt" optionalNoneFunc = "optional.none" optionalOfFunc = "optional.of" optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" optionalUnwrapFunc = "optional.unwrap" -======= - optionalNoneFunc = "optional.none" - optionalOfFunc = "optional.of" - optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) valueFunc = "value" unusedIterVar = "#unused" ) @@ -272,7 +263,6 @@ func (stdLibrary) ProgramOptions() []ProgramOption { // be expressed with `optMap`. // // msg.?elements.optFlatMap(e, e[?0]) // return the first element if present. -<<<<<<< HEAD // # First // @@ -304,8 +294,6 @@ func (stdLibrary) ProgramOptions() []ProgramOption { // optional.unwrap([optional.of(42), optional.none()]) == [42] // [optional.of(42), optional.none()].unwrapOpt() == [42] -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func OptionalTypes(opts ...OptionalTypesOption) EnvOption { lib := &optionalLib{version: math.MaxUint32} for _, opt := range opts { @@ -349,10 +337,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption { optionalTypeV := OptionalType(paramTypeV) listTypeV := ListType(paramTypeV) mapTypeKV := MapType(paramTypeK, paramTypeV) -<<<<<<< HEAD listOptionalTypeV := ListType(optionalTypeV) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) opts := []EnvOption{ // Enable the optional syntax in the parser. @@ -425,7 +410,6 @@ func (lib *optionalLib) CompileOptions() []EnvOption { if lib.version >= 1 { opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap))) } -<<<<<<< HEAD if lib.version >= 2 { opts = append(opts, Function("last", @@ -466,8 +450,6 @@ func (lib *optionalLib) CompileOptions() []EnvOption { UnaryBinding(optUnwrap)))) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return opts } @@ -496,11 +478,7 @@ func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, * meh.NewList(), unusedIterVar, varName, -<<<<<<< HEAD meh.NewMemberCall(valueFunc, meh.Copy(target)), -======= - meh.NewMemberCall(valueFunc, target), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) meh.NewLiteral(types.False), meh.NewIdent(varName), mapExpr, @@ -527,11 +505,7 @@ func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Exp meh.NewList(), unusedIterVar, varName, -<<<<<<< HEAD meh.NewMemberCall(valueFunc, meh.Copy(target)), -======= - meh.NewMemberCall(valueFunc, target), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) meh.NewLiteral(types.False), meh.NewIdent(varName), mapExpr, @@ -540,7 +514,6 @@ func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Exp ), nil } -<<<<<<< HEAD func optUnwrap(value ref.Val) ref.Val { list := value.(traits.Lister) var unwrappedList []ref.Val @@ -558,8 +531,6 @@ func optUnwrap(value ref.Val) ref.Val { return types.DefaultTypeAdapter.NativeToValue(unwrappedList) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func enableOptionalSyntax() EnvOption { return func(e *Env) (*Env, error) { e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true)) @@ -567,15 +538,12 @@ func enableOptionalSyntax() EnvOption { } } -<<<<<<< HEAD // EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field // selection is performed on a primitive type. func EnableErrorOnBadPresenceTest(value bool) EnvOption { return features(featureEnableErrorOnBadPresenceTest, value) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { call, ok := i.(interpreter.InterpretableCall) if !ok { @@ -801,11 +769,7 @@ var ( func timestampGetFullYear(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Year()) } @@ -813,11 +777,7 @@ func timestampGetFullYear(ts, tz ref.Val) ref.Val { func timestampGetMonth(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CEL spec indicates that the month should be 0-based, but the Time value // for Month() is 1-based. @@ -827,11 +787,7 @@ func timestampGetMonth(ts, tz ref.Val) ref.Val { func timestampGetDayOfYear(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.YearDay() - 1) } @@ -839,11 +795,7 @@ func timestampGetDayOfYear(ts, tz ref.Val) ref.Val { func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Day() - 1) } @@ -851,11 +803,7 @@ func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val { func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Day()) } @@ -863,11 +811,7 @@ func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val { func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Weekday()) } @@ -875,11 +819,7 @@ func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val { func timestampGetHours(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Hour()) } @@ -887,11 +827,7 @@ func timestampGetHours(ts, tz ref.Val) ref.Val { func timestampGetMinutes(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Minute()) } @@ -899,11 +835,7 @@ func timestampGetMinutes(ts, tz ref.Val) ref.Val { func timestampGetSeconds(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Second()) } @@ -911,11 +843,7 @@ func timestampGetSeconds(ts, tz ref.Val) ref.Val { func timestampGetMilliseconds(ts, tz ref.Val) ref.Val { t, err := inTimeZone(ts, tz) if err != nil { -<<<<<<< HEAD return types.NewErrFromString(err.Error()) -======= - return types.NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return types.Int(t.Nanosecond() / 1000000) } diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go index 3cf5022328..9a2a97a647 100644 --- a/vendor/github.com/google/cel-go/cel/optimizer.go +++ b/vendor/github.com/google/cel-go/cel/optimizer.go @@ -15,11 +15,8 @@ package cel import ( -<<<<<<< HEAD "sort" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" @@ -51,13 +48,8 @@ func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { // If issues are encountered, the Issues.Err() return value will be non-nil. func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // Make a copy of the AST to be optimized. -<<<<<<< HEAD optimized := ast.Copy(a.NativeRep()) ids := newIDGenerator(ast.MaxID(a.NativeRep())) -======= - optimized := ast.Copy(a.impl) - ids := newIDGenerator(ast.MaxID(a.impl)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Create the optimizer context, could be pooled in the future. issues := NewIssues(common.NewErrors(a.Source())) @@ -94,11 +86,7 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { if iss.Err() != nil { return nil, iss } -<<<<<<< HEAD optimized = checked.NativeRep() -======= - optimized = checked.impl ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Return the optimized result. @@ -112,15 +100,10 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { // that the ids within the expression correspond to the ids within macros. func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { optimized.RenumberIDs(idGen) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(info.MacroCalls()) == 0 { return } -<<<<<<< HEAD // Sort the macro ids to make sure that the renumbering of macro-specific variables // is stable across normalization calls. sortedMacroIDs := []int64{} @@ -132,11 +115,6 @@ func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInf // First, update the macro call ids themselves. callIDMap := map[int64]int64{} for _, id := range sortedMacroIDs { -======= - // First, update the macro call ids themselves. - callIDMap := map[int64]int64{} - for id := range info.MacroCalls() { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) callIDMap[id] = idGen(id) } // Then update the macro call definitions which refer to these ids, but @@ -147,12 +125,8 @@ func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInf call ast.Expr } macroUpdates := []macroUpdate{} -<<<<<<< HEAD for _, oldID := range sortedMacroIDs { newID := callIDMap[oldID] -======= - for oldID, newID := range callIDMap { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) call, found := info.GetMacroCall(oldID) if !found { continue @@ -170,10 +144,7 @@ func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { if len(info.MacroCalls()) == 0 { return } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Sanitize the macro call references once the optimized expression has been computed // and the ids normalized between the expression and the macros. exprRefMap := make(map[int64]struct{}) @@ -240,7 +211,6 @@ type OptimizerContext struct { *Issues } -<<<<<<< HEAD // ExtendEnv auguments the context's environment with the additional options. func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { e, err := opt.Env.Extend(opts...) @@ -251,8 +221,6 @@ func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ASTOptimizer applies an optimization over an AST and returns the optimized result. type ASTOptimizer interface { // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. @@ -306,14 +274,11 @@ func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { opt.sourceInfo.SetMacroCall(id, expr) } -<<<<<<< HEAD // MacroCalls returns the map of macro calls currently in the context. func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { return opt.sourceInfo.MacroCalls() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression // representing the unexpanded call signature to be inserted into the source info macro call metadata. func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index f947a0d05c..85f777e959 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -61,7 +61,6 @@ const ( // compressing the logic graph to a single call when multiple like-operator // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) featureVariadicLogicalASTs -<<<<<<< HEAD // Enable error generation when a presence test or optional field selection is // performed on a primitive type. @@ -69,8 +68,6 @@ const ( // Enable escape syntax for field identifiers (`). featureIdentEscapeSyntax -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // EnvOption is a functional interface for configuring the environment. @@ -253,7 +250,6 @@ func Abbrevs(qualifiedNames ...string) EnvOption { } } -<<<<<<< HEAD // customTypeRegistry is an internal-only interface containing the minimum methods required to support // custom types. It is a subset of methods from ref.TypeRegistry. type customTypeRegistry interface { @@ -261,8 +257,6 @@ type customTypeRegistry interface { RegisterType(...ref.Type) error } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Types adds one or more type declarations to the environment, allowing for construction of // type-literals whose definitions are included in the common expression built-in set. // @@ -275,16 +269,7 @@ type customTypeRegistry interface { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { -<<<<<<< HEAD reg, isReg := e.provider.(customTypeRegistry) -======= - var reg ref.TypeRegistry - var isReg bool - reg, isReg = e.provider.(*types.Registry) - if !isReg { - reg, isReg = e.provider.(ref.TypeRegistry) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -321,11 +306,7 @@ func Types(addTypes ...any) EnvOption { // extension or by re-using the same EnvOption with another NewEnv() call. func TypeDescs(descs ...any) EnvOption { return func(e *Env) (*Env, error) { -<<<<<<< HEAD reg, isReg := e.provider.(customTypeRegistry) -======= - reg, isReg := e.provider.(ref.TypeRegistry) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -373,11 +354,7 @@ func TypeDescs(descs ...any) EnvOption { } } -<<<<<<< HEAD func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { -======= -func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) files, err := protodesc.NewFiles(fileSet) if err != nil { return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) @@ -385,11 +362,7 @@ func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) er return registerFiles(reg, files) } -<<<<<<< HEAD func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { -======= -func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var err error files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { err = reg.RegisterDescriptor(fd) @@ -648,15 +621,12 @@ func EnableMacroCallTracking() EnvOption { return features(featureEnableMacroCallTracking, true) } -<<<<<<< HEAD // EnableIdentifierEscapeSyntax enables identifier escaping (`) syntax for // fields. func EnableIdentifierEscapeSyntax() EnvOption { return features(featureIdentEscapeSyntax, true) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CrossTypeNumericComparisons makes it possible to compare across numeric types, e.g. double < int func CrossTypeNumericComparisons(enabled bool) EnvOption { return features(featureCrossTypeNumericComparisons, enabled) @@ -694,7 +664,6 @@ func ParserExpressionSizeLimit(limit int) EnvOption { } } -<<<<<<< HEAD // EnableHiddenAccumulatorName sets the parser to use the identifier '@result' for accumulators // which is not normally accessible from CEL source. func EnableHiddenAccumulatorName(enabled bool) EnvOption { @@ -704,8 +673,6 @@ func EnableHiddenAccumulatorName(enabled bool) EnvOption { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maybeInteropProvider(provider any) (types.Provider, error) { switch p := provider.(type) { case types.Provider: diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index ef9321acd0..49bd537838 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -19,10 +19,7 @@ import ( "fmt" "sync" -<<<<<<< HEAD "github.com/google/cel-go/common/ast" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -103,12 +100,9 @@ type EvalDetails struct { // State of the evaluation, non-nil if the OptTrackState or OptExhaustiveEval is specified // within EvalOptions. func (ed *EvalDetails) State() interpreter.EvalState { -<<<<<<< HEAD if ed == nil { return interpreter.NewEvalState() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ed.state } @@ -161,11 +155,7 @@ func (p *prog) clone() *prog { // ProgramOption values. // // If the program cannot be configured the prog will be nil, with a non-nil error response. -<<<<<<< HEAD func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { -======= -func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Build the dispatcher, interpreter, and default program value. disp := interpreter.NewDispatcher() @@ -201,7 +191,6 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { // Set the attribute factory after the options have been set. var attrFactory interpreter.AttributeFactory -<<<<<<< HEAD attrFactorOpts := []interpreter.AttrFactoryOption{ interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)), } @@ -209,12 +198,6 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } else { attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) -======= - if p.evalOpts&OptPartialEval == OptPartialEval { - attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider) - } else { - attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory) p.interpreter = interp @@ -276,15 +259,9 @@ func newProgram(e *Env, a *Ast, opts []ProgramOption) (Program, error) { return p.initInterpretable(a, decorators) } -<<<<<<< HEAD func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) { // When the AST has been exprAST it contains metadata that can be used to speed up program execution. interpretable, err := p.interpreter.NewInterpretable(a, decs...) -======= -func (p *prog) initInterpretable(a *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { - // When the AST has been exprAST it contains metadata that can be used to speed up program execution. - interpretable, err := p.interpreter.NewInterpretable(a.impl, decs...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel index 370a8a5d99..678b412a95 100644 --- a/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -16,10 +16,6 @@ go_library( "options.go", "printer.go", "scopes.go", -<<<<<<< HEAD -======= - "standard.go", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "types.go", ], importpath = "github.com/google/cel-go/checker", diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index a95bcf9950..6824af7a54 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -496,7 +496,6 @@ func (c *checker) checkComprehension(e ast.Expr) { comp := e.AsComprehension() c.check(comp.IterRange()) c.check(comp.AccuInit()) -<<<<<<< HEAD rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) // Create a scope for the comprehension since it has a local accumulation variable. @@ -523,18 +522,6 @@ func (c *checker) checkComprehension(e ast.Expr) { // var2Type represents the map entry value for two-variable comprehensions. var2Type = rangeType.Parameters()[1] } -======= - accuType := c.getType(comp.AccuInit()) - rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) - var varType *types.Type - - switch rangeType.Kind() { - case types.ListKind: - varType = rangeType.Parameters()[0] - case types.MapKind: - // Ranges over the keys. - varType = rangeType.Parameters()[0] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case types.DynKind, types.ErrorKind, types.TypeParamKind: // Set the range type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type @@ -542,7 +529,6 @@ func (c *checker) checkComprehension(e ast.Expr) { c.isAssignable(types.DynType, rangeType) // Set the range iteration variable to type DYN as well. varType = types.DynType -<<<<<<< HEAD if comp.HasIterVar2() { var2Type = types.DynType } @@ -560,20 +546,6 @@ func (c *checker) checkComprehension(e ast.Expr) { if comp.HasIterVar2() { c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type)) } -======= - default: - c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType) - varType = types.ErrorType - } - - // Create a scope for the comprehension since it has a local accumulation variable. - // This scope will contain the accumulation variable used to compute the result. - c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) - // Create a block scope for the loop. - c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Check the variable references in the condition and step. c.check(comp.LoopCondition()) c.assertType(comp.LoopCondition(), types.BoolType) diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go index 9378cd653b..b9cd8a2ed3 100644 --- a/vendor/github.com/google/cel-go/checker/cost.go +++ b/vendor/github.com/google/cel-go/checker/cost.go @@ -28,7 +28,6 @@ import ( // CostEstimator estimates the sizes of variable length input data and the costs of functions. type CostEstimator interface { -<<<<<<< HEAD // EstimateSize returns a SizeEstimate for the given AstNode, or nil if the estimator has no // estimate to provide. // @@ -43,17 +42,6 @@ type CostEstimator interface { // EstimateCallCost returns the estimated cost of an invocation, or nil if the estimator has no // estimate to provide. -======= - // EstimateSize returns a SizeEstimate for the given AstNode, or nil if - // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function: - // length of strings and bytes, number of map entries or number of list items. - // EstimateSize is only called for AstNodes where - // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size - // is already obvious to CEL. - EstimateSize(element AstNode) *SizeEstimate - // EstimateCallCost returns the estimated cost of an invocation, or nil if - // the estimator has no estimate to provide. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate } @@ -61,10 +49,7 @@ type CostEstimator interface { // The ResultSize should only be provided if the call results in a map, list, string or bytes. type CallEstimate struct { CostEstimate -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ResultSize *SizeEstimate } @@ -74,7 +59,6 @@ type AstNode interface { // represent type directly reachable from the provided type declarations. // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. Path() []string -<<<<<<< HEAD // Type returns the deduced type of the AstNode. Type() *types.Type @@ -82,12 +66,6 @@ type AstNode interface { // Expr returns the expression of the AstNode. Expr() ast.Expr -======= - // Type returns the deduced type of the AstNode. - Type() *types.Type - // Expr returns the expression of the AstNode. - Expr() ast.Expr ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no @@ -115,40 +93,7 @@ func (e astNode) Expr() ast.Expr { } func (e astNode) ComputedSize() *SizeEstimate { -<<<<<<< HEAD return e.derivedSize -======= - if e.derivedSize != nil { - return e.derivedSize - } - var v uint64 - switch e.expr.Kind() { - case ast.LiteralKind: - switch ck := e.expr.AsLiteral().(type) { - case types.String: - // converting to runes here is an O(n) operation, but - // this is consistent with how size is computed at runtime, - // and how the language definition defines string size - v = uint64(len([]rune(ck))) - case types.Bytes: - v = uint64(len(ck)) - case types.Bool, types.Double, types.Duration, - types.Int, types.Timestamp, types.Uint, - types.Null: - v = uint64(1) - default: - return nil - } - case ast.ListKind: - v = uint64(e.expr.AsList().Size()) - case ast.MapKind: - v = uint64(e.expr.AsMap().Size()) - default: - return nil - } - - return &SizeEstimate{Min: v, Max: v} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SizeEstimate represents an estimated size of a variable length string, bytes, map or list. @@ -156,7 +101,6 @@ type SizeEstimate struct { Min, Max uint64 } -<<<<<<< HEAD // UnknownSizeEstimate returns a size between 0 and max uint func UnknownSizeEstimate() SizeEstimate { return unknownSizeEstimate @@ -167,8 +111,6 @@ func FixedSizeEstimate(size uint64) SizeEstimate { return SizeEstimate{Min: size, Max: size} } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Add adds to another SizeEstimate and returns the sum. // If add would result in an uint64 overflow, the result is math.MaxUint64. func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate { @@ -223,7 +165,6 @@ type CostEstimate struct { Min, Max uint64 } -<<<<<<< HEAD // UnknownCostEstimate returns a cost with an unknown impact. func UnknownCostEstimate() CostEstimate { return unknownCostEstimate @@ -234,19 +175,12 @@ func FixedCostEstimate(cost uint64) CostEstimate { return CostEstimate{Min: cost, Max: cost} } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Add adds the costs and returns the sum. // If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64. func (ce CostEstimate) Add(cost CostEstimate) CostEstimate { return CostEstimate{ -<<<<<<< HEAD Min: addUint64NoOverflow(ce.Min, cost.Min), Max: addUint64NoOverflow(ce.Max, cost.Max), -======= - addUint64NoOverflow(ce.Min, cost.Min), - addUint64NoOverflow(ce.Max, cost.Max), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -254,13 +188,8 @@ func (ce CostEstimate) Add(cost CostEstimate) CostEstimate { // If multiply would result in an uint64 overflow, the result is math.MaxUint64. func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate { return CostEstimate{ -<<<<<<< HEAD Min: multiplyUint64NoOverflow(ce.Min, cost.Min), Max: multiplyUint64NoOverflow(ce.Max, cost.Max), -======= - multiplyUint64NoOverflow(ce.Min, cost.Min), - multiplyUint64NoOverflow(ce.Max, cost.Max), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -268,13 +197,8 @@ func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate { // nearest integer of the result, rounded up. func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate { return CostEstimate{ -<<<<<<< HEAD Min: multiplyByCostFactor(ce.Min, costPerUnit), Max: multiplyByCostFactor(ce.Max, costPerUnit), -======= - multiplyByCostFactor(ce.Min, costPerUnit), - multiplyByCostFactor(ce.Max, costPerUnit), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -321,52 +245,6 @@ func multiplyByCostFactor(x uint64, y float64) uint64 { return uint64(ceil) } -<<<<<<< HEAD -======= -var ( - selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost} - constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost} - - createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} - createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost} - createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost} -) - -type coster struct { - // exprPath maps from Expr Id to field path. - exprPath map[int64][]string - // iterRanges tracks the iterRange of each iterVar. - iterRanges iterRangeScopes - // computedSizes tracks the computed sizes of call results. - computedSizes map[int64]SizeEstimate - checkedAST *ast.AST - estimator CostEstimator - overloadEstimators map[string]FunctionEstimator - // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. - presenceTestCost CostEstimate -} - -// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. -type iterRangeScopes map[string][]int64 - -func (vs iterRangeScopes) push(varName string, expr ast.Expr) { - vs[varName] = append(vs[varName], expr.ID()) -} - -func (vs iterRangeScopes) pop(varName string) { - varStack := vs[varName] - vs[varName] = varStack[:len(varStack)-1] -} - -func (vs iterRangeScopes) peek(varName string) (int64, bool) { - varStack := vs[varName] - if len(varStack) > 0 { - return varStack[len(varStack)-1], true - } - return 0, false -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CostOption configures flags which affect cost computations. type CostOption func(*coster) error @@ -379,11 +257,7 @@ func PresenceTestHasCost(hasCost bool) CostOption { c.presenceTestCost = selectAndIdentCost return nil } -<<<<<<< HEAD c.presenceTestCost = FixedCostEstimate(0) -======= - c.presenceTestCost = CostEstimate{Min: 0, Max: 0} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } @@ -408,18 +282,11 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs checkedAST: checked, estimator: estimator, overloadEstimators: map[string]FunctionEstimator{}, -<<<<<<< HEAD exprPaths: map[int64][]string{}, localVars: make(scopes), computedSizes: map[int64]SizeEstimate{}, computedEntrySizes: map[int64]entrySizeEstimate{}, presenceTestCost: FixedCostEstimate(1), -======= - exprPath: map[int64][]string{}, - iterRanges: map[string][]int64{}, - computedSizes: map[int64]SizeEstimate{}, - presenceTestCost: CostEstimate{Min: 1, Max: 1}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, opt := range opts { err := opt(c) @@ -430,7 +297,6 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs return c.cost(checked.Expr()), nil } -<<<<<<< HEAD type coster struct { // exprPaths maps from Expr Id to field path. exprPaths map[int64][]string @@ -590,8 +456,6 @@ func (c *coster) popLocalVar(varName string) { c.localVars.pop(varName) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *coster) cost(e ast.Expr) CostEstimate { if e == nil { return CostEstimate{} @@ -613,15 +477,11 @@ func (c *coster) cost(e ast.Expr) CostEstimate { case ast.StructKind: cost = c.costCreateStruct(e) case ast.ComprehensionKind: -<<<<<<< HEAD if c.isBind(e) { cost = c.costBind(e) } else { cost = c.costComprehension(e) } -======= - cost = c.costComprehension(e) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return CostEstimate{} } @@ -631,25 +491,11 @@ func (c *coster) cost(e ast.Expr) CostEstimate { func (c *coster) costIdent(e ast.Expr) CostEstimate { identName := e.AsIdent() // build and track the field path -<<<<<<< HEAD if v, ok := c.peekLocalVar(identName); ok { c.addPath(e, v.path) } else { c.addPath(e, []string{identName}) } -======= - if iterRange, ok := c.iterRanges.peek(identName); ok { - switch c.checkedAST.GetType(iterRange).Kind() { - case types.ListKind: - c.addPath(e, append(c.exprPath[iterRange], "@items")) - case types.MapKind: - c.addPath(e, append(c.exprPath[iterRange], "@keys")) - } - } else { - c.addPath(e, []string{identName}) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return selectAndIdentCost } @@ -674,15 +520,10 @@ func (c *coster) costSelect(e ast.Expr) CostEstimate { // build and track the field path c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName())) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return sum } func (c *coster) costCall(e ast.Expr) CostEstimate { -<<<<<<< HEAD // Dyn is just a way to disable type-checking, so return the cost of 1 with the cost of the argument if dynEstimate := c.maybeUnwrapDynCall(e); dynEstimate != nil { return *dynEstimate @@ -691,11 +532,6 @@ func (c *coster) costCall(e ast.Expr) CostEstimate { // Continue estimating the cost of all other calls. call := e.AsCall() args := call.Args() -======= - call := e.AsCall() - args := call.Args() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var sum CostEstimate argTypes := make([]AstNode, len(args)) @@ -718,11 +554,7 @@ func (c *coster) costCall(e ast.Expr) CostEstimate { fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} var resultSize *SizeEstimate for _, overload := range overloadIDs { -<<<<<<< HEAD overloadCost := c.functionCost(e, call.FunctionName(), overload, &targetType, argTypes, argCosts) -======= - overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fnCost = fnCost.Union(overloadCost.CostEstimate) if overloadCost.ResultSize != nil { if resultSize == nil { @@ -736,16 +568,12 @@ func (c *coster) costCall(e ast.Expr) CostEstimate { switch overload { case overloads.IndexList: if len(args) > 0 { -<<<<<<< HEAD // note: assigning resultSize here could be redundant with the path-based lookup later resultSize = c.computeEntrySize(args[0]).valSize() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.addPath(e, append(c.getPath(args[0]), "@items")) } case overloads.IndexMap: if len(args) > 0 { -<<<<<<< HEAD resultSize = c.computeEntrySize(args[0]).valSize() c.addPath(e, append(c.getPath(args[0]), "@values")) } @@ -783,44 +611,22 @@ func (c *coster) costCreateList(e ast.Expr) CostEstimate { itemSize = itemSize.Union(is) } c.setEntrySize(e, &entrySizeEstimate{containerKind: types.ListKind, key: FixedSizeEstimate(1), val: itemSize}) -======= - c.addPath(e, append(c.getPath(args[0]), "@values")) - } - } - } - if resultSize != nil { - c.computedSizes[e.ID()] = *resultSize - } - return sum.Add(fnCost) -} - -func (c *coster) costCreateList(e ast.Expr) CostEstimate { - create := e.AsList() - var sum CostEstimate - for _, e := range create.Elements() { - sum = sum.Add(c.cost(e)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return sum.Add(createListBaseCost) } func (c *coster) costCreateMap(e ast.Expr) CostEstimate { mapVal := e.AsMap() var sum CostEstimate -<<<<<<< HEAD keySize := SizeEstimate{Min: math.MaxUint64, Max: 0} valSize := SizeEstimate{Min: math.MaxUint64, Max: 0} if mapVal.Size() == 0 { valSize.Min = 0 keySize.Min = 0 } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, ent := range mapVal.Entries() { entry := ent.AsMapEntry() sum = sum.Add(c.cost(entry.Key())) sum = sum.Add(c.cost(entry.Value())) -<<<<<<< HEAD // Compute the key size range ks := c.sizeOrUnknown(entry.Key()) keySize = keySize.Union(ks) @@ -829,9 +635,6 @@ func (c *coster) costCreateMap(e ast.Expr) CostEstimate { valSize = valSize.Union(vs) } c.setEntrySize(e, &entrySizeEstimate{containerKind: types.MapKind, key: keySize, val: valSize}) -======= - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return sum.Add(createMapBaseCost) } @@ -850,7 +653,6 @@ func (c *coster) costComprehension(e ast.Expr) CostEstimate { var sum CostEstimate sum = sum.Add(c.cost(comp.IterRange())) sum = sum.Add(c.cost(comp.AccuInit())) -<<<<<<< HEAD c.pushLocalVar(comp.AccuVar(), comp.AccuInit()) // Track the iterRange of each IterVar and AccuVar for field path construction @@ -921,45 +723,6 @@ func (c *coster) costBind(e ast.Expr) CostEstimate { } func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate { -======= - - // Track the iterRange of each IterVar for field path construction - c.iterRanges.push(comp.IterVar(), comp.IterRange()) - loopCost := c.cost(comp.LoopCondition()) - stepCost := c.cost(comp.LoopStep()) - c.iterRanges.pop(comp.IterVar()) - sum = sum.Add(c.cost(comp.Result())) - rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange())) - - c.computedSizes[e.ID()] = rangeCnt - - rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost)) - sum = sum.Add(rangeCost) - - return sum -} - -func (c *coster) sizeEstimate(t AstNode) SizeEstimate { - if l := t.ComputedSize(); l != nil { - return *l - } - if l := c.estimator.EstimateSize(t); l != nil { - return *l - } - // return an estimate of 1 for return types of set - // lengths, since strings/bytes/more complex objects could be of - // variable length - if isScalar(t.Type()) { - // TODO: since the logic for size estimation is split between - // ComputedSize and isScalar, changing one will likely require changing - // the other, so they should be merged in the future if possible - return SizeEstimate{Min: 1, Max: 1} - } - return SizeEstimate{Min: 0, Max: math.MaxUint64} -} - -func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) argCostSum := func() CostEstimate { var sum CostEstimate for _, a := range argCosts { @@ -984,7 +747,6 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args case overloads.ExtFormatString: if target != nil { // ResultSize not calculated because we can't bound the max size. -<<<<<<< HEAD return CallEstimate{ CostEstimate: c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} } @@ -1015,41 +777,12 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args case overloads.StartsWithString, overloads.EndsWithString: if len(args) == 1 { return CallEstimate{CostEstimate: c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} -======= - return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} - } - case overloads.StringToBytes: - if len(args) == 1 { - sz := c.sizeEstimate(args[0]) - // ResultSize max is when each char converts to 4 bytes. - return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}} - } - case overloads.BytesToString: - if len(args) == 1 { - sz := c.sizeEstimate(args[0]) - // ResultSize min is when 4 bytes convert to 1 char. - return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}} - } - case overloads.ExtQuoteString: - if len(args) == 1 { - sz := c.sizeEstimate(args[0]) - // ResultSize max is when each char is escaped. 2 quote chars always added. - return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}} - } - case overloads.StartsWithString, overloads.EndsWithString: - if len(args) == 1 { - return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case overloads.InList: // If a list is composed entirely of constant values this is O(1), but we don't account for that here. // We just assume all list containment checks are O(n). if len(args) == 2 { -<<<<<<< HEAD return CallEstimate{CostEstimate: c.sizeOrUnknown(args[1]).MultiplyByCostFactor(1).Add(argCostSum())} -======= - return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // O(nm) functions case overloads.MatchesString: @@ -1057,32 +790,19 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args if target != nil && len(args) == 1 { // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 // in case where string is empty but regex is still expensive. -<<<<<<< HEAD strCost := c.sizeOrUnknown(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) -======= - strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // We don't know how many expressions are in the regex, just the string length (a huge // improvement here would be to somehow get a count the number of expressions in the regex or // how many states are in the regex state machine and use that to measure regex cost). // For now, we're making a guess that each expression in a regex is typically at least 4 chars // in length. -<<<<<<< HEAD regexCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) -======= - regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())} } case overloads.ContainsString: if target != nil && len(args) == 1 { -<<<<<<< HEAD strCost := c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor) substrCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor) -======= - strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor) - substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())} } case overloads.LogicalOr, overloads.LogicalAnd: @@ -1092,13 +812,9 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max} return CallEstimate{CostEstimate: argCost} case overloads.Conditional: -<<<<<<< HEAD size := c.sizeOrUnknown(args[1]).Union(c.sizeOrUnknown(args[2])) resultEntrySize := c.computeEntrySize(args[1].Expr()).union(c.computeEntrySize(args[2].Expr())) c.setEntrySize(e, resultEntrySize) -======= - size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2])) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) conditionalCost := argCosts[0] ifTrueCost := argCosts[1] ifFalseCost := argCosts[2] @@ -1106,7 +822,6 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args return CallEstimate{CostEstimate: argCost, ResultSize: &size} case overloads.AddString, overloads.AddBytes, overloads.AddList: if len(args) == 2 { -<<<<<<< HEAD lhsSize := c.sizeOrUnknown(args[0]) rhsSize := c.sizeOrUnknown(args[1]) resultSize := lhsSize.Add(rhsSize) @@ -1120,15 +835,6 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args case overloads.AddList: // list concatenation is O(1), but we handle it here to track size return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum()), ResultSize: &resultSize} -======= - lhsSize := c.sizeEstimate(args[0]) - rhsSize := c.sizeEstimate(args[1]) - resultSize := lhsSize.Add(rhsSize) - switch overloadID { - case overloads.AddList: - // list concatenation is O(1), but we handle it here to track size - return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize} } @@ -1136,13 +842,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, overloads.Equals, overloads.NotEquals: -<<<<<<< HEAD lhsCost := c.sizeOrUnknown(args[0]) rhsCost := c.sizeOrUnknown(args[1]) -======= - lhsCost := c.sizeEstimate(args[0]) - rhsCost := c.sizeEstimate(args[1]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) min := uint64(0) smallestMax := lhsCost.Max if rhsCost.Max < smallestMax { @@ -1152,24 +853,16 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args min = 1 } // equality of 2 scalar values results in a cost of 1 -<<<<<<< HEAD return CallEstimate{ CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), } -======= - return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // O(1) functions // See CostTracker.costCall for more details about O(1) cost calculations // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit // which on an Intel xeon 2.20GHz CPU is 50ns. -<<<<<<< HEAD return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum())} -======= - return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *coster) getType(e ast.Expr) *types.Type { @@ -1177,7 +870,6 @@ func (c *coster) getType(e ast.Expr) *types.Type { } func (c *coster) getPath(e ast.Expr) []string { -<<<<<<< HEAD if e.Kind() == ast.IdentKind { if v, found := c.peekLocalVar(e.AsIdent()); found { return v.path[:] @@ -1192,37 +884,18 @@ func (c *coster) addPath(e ast.Expr, path []string) { func isAccumulatorVar(name string) bool { return name == parser.AccumulatorName || name == parser.HiddenAccumulatorName -======= - return c.exprPath[e.ID()] -} - -func (c *coster) addPath(e ast.Expr, path []string) { - c.exprPath[e.ID()] = path ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c *coster) newAstNode(e ast.Expr) *astNode { path := c.getPath(e) -<<<<<<< HEAD if len(path) > 0 && isAccumulatorVar(path[0]) { // only provide paths to root vars; omit accumulator vars path = nil } -======= - if len(path) > 0 && path[0] == parser.AccumulatorName { - // only provide paths to root vars; omit accumulator vars - path = nil - } - var derivedSize *SizeEstimate - if size, ok := c.computedSizes[e.ID()]; ok { - derivedSize = &size - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &astNode{ path: path, t: c.getType(e), expr: e, -<<<<<<< HEAD derivedSize: c.computeSize(e)} } @@ -1336,9 +1009,6 @@ func computeTypeSize(t *types.Type) *SizeEstimate { return &cost } return nil -======= - derivedSize: derivedSize} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // isScalar returns true if the given type is known to be of a constant size at @@ -1348,20 +1018,16 @@ func isScalar(t *types.Type) bool { switch t.Kind() { case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind: return true -<<<<<<< HEAD case types.OpaqueKind: if t.TypeName() == "optional_type" { return isScalar(t.Parameters()[0]) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return false } var ( doubleTwoTo64 = math.Ldexp(1.0, 64) -<<<<<<< HEAD unknownSizeEstimate = SizeEstimate{Min: 0, Max: math.MaxUint64} unknownCostEstimate = unknownSizeEstimate.MultiplyByCostFactor(1) @@ -1372,6 +1038,4 @@ var ( createListBaseCost = FixedCostEstimate(common.ListCreateBaseCost) createMapBaseCost = FixedCostEstimate(common.MapCreateBaseCost) createMessageBaseCost = FixedCostEstimate(common.StructCreateBaseCost) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go deleted file mode 100644 index 11b35b80ee..0000000000 --- a/vendor/github.com/google/cel-go/checker/standard.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/common/stdlib" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// StandardFunctions returns the Decls for all functions in the evaluator. -// -// Deprecated: prefer stdlib.FunctionExprDecls() -func StandardFunctions() []*exprpb.Decl { - return stdlib.FunctionExprDecls() -} - -// StandardTypes returns the set of type identifiers for standard library types. -// -// Deprecated: prefer stdlib.TypeExprDecls() -func StandardTypes() []*exprpb.Decl { - return stdlib.TypeExprDecls() -} diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel index 61ae36273f..eef7f281be 100644 --- a/vendor/github.com/google/cel-go/common/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -18,10 +18,6 @@ go_library( deps = [ "//common/runes:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", -<<<<<<< HEAD -======= - "@org_golang_x_text//width:go_default_library", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ], ) diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel index eb0340777a..9824f57a9f 100644 --- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "navigable.go", ], importpath = "github.com/google/cel-go/common/ast", -<<<<<<< HEAD deps = [ "//common:go_default_library", "//common/types:go_default_library", @@ -23,13 +22,6 @@ go_library( "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", -======= - deps = [ - "//common:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], ) @@ -45,20 +37,13 @@ go_test( embed = [ ":go_default_library", ], -<<<<<<< HEAD deps = [ -======= - deps = [ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "//checker:go_default_library", "//checker/decls:go_default_library", "//common:go_default_library", "//common/containers:go_default_library", "//common/decls:go_default_library", -<<<<<<< HEAD "//common/operators:go_default_library", -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "//common/overloads:go_default_library", "//common/stdlib:go_default_library", "//common/types:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go index b3e7cb5995..b807669d49 100644 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -310,7 +310,6 @@ func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { s.offsetRanges[id] = o } -<<<<<<< HEAD // ClearOffsetRange removes the OffsetRange for the given expression id. func (s *SourceInfo) ClearOffsetRange(id int64) { if s != nil { @@ -318,27 +317,11 @@ func (s *SourceInfo) ClearOffsetRange(id int64) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character // of the expression node at the id. func (s *SourceInfo) GetStartLocation(id int64) common.Location { if o, found := s.GetOffsetRange(id); found { -<<<<<<< HEAD return s.GetLocationByOffset(o.Start) -======= - line := 1 - col := int(o.Start) - for _, lineOffset := range s.LineOffsets() { - if lineOffset < o.Start { - line++ - col = int(o.Start - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return common.NoLocation } @@ -350,26 +333,11 @@ func (s *SourceInfo) GetStartLocation(id int64) common.Location { // be identical to the start location for the expression. func (s *SourceInfo) GetStopLocation(id int64) common.Location { if o, found := s.GetOffsetRange(id); found { -<<<<<<< HEAD return s.GetLocationByOffset(o.Stop) -======= - line := 1 - col := int(o.Stop) - for _, lineOffset := range s.LineOffsets() { - if lineOffset < o.Stop { - line++ - col = int(o.Stop - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return common.NoLocation } -<<<<<<< HEAD // GetLocationByOffset returns the line and column information for a given character offset. func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { line := 1 @@ -384,8 +352,6 @@ func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { return common.NewLocation(line, col) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. func (s *SourceInfo) ComputeOffset(line, col int32) int32 { if s != nil { diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go index c846b3f72e..435d8f6547 100644 --- a/vendor/github.com/google/cel-go/common/ast/conversion.go +++ b/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -17,7 +17,6 @@ package ast import ( "fmt" -<<<<<<< HEAD "google.golang.org/protobuf/proto" "github.com/google/cel-go/common/types" @@ -26,14 +25,6 @@ import ( celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" structpb "google.golang.org/protobuf/types/known/structpb" -======= - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - - structpb "google.golang.org/protobuf/types/known/structpb" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ToProto converts an AST to a CheckedExpr protobouf. @@ -184,16 +175,10 @@ func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehe if err != nil { return nil, err } -<<<<<<< HEAD return factory.NewComprehensionTwoVar(id, iterRange, comp.GetIterVar(), comp.GetIterVar2(), -======= - return factory.NewComprehension(id, - iterRange, - comp.GetIterVar(), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) comp.GetAccuVar(), accuInit, loopCond, @@ -381,10 +366,7 @@ func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) ExprKind: &exprpb.Expr_ComprehensionExpr{ ComprehensionExpr: &exprpb.Expr_Comprehension{ IterVar: comp.IterVar(), -<<<<<<< HEAD IterVar2: comp.IterVar2(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) IterRange: iterRange, AccuVar: comp.AccuVar(), AccuInit: accuInit, @@ -631,7 +613,6 @@ func ValToConstant(v ref.Val) (*exprpb.Constant, error) { // ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { -<<<<<<< HEAD return AlphaProtoConstantAsVal(c) } @@ -663,30 +644,10 @@ func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) { case *celpb.Constant_StringValue: return types.String(c.GetStringValue()), nil case *celpb.Constant_Uint64Value: -======= - if c == nil { - return nil, nil - } - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return types.Bool(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return types.Bytes(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return types.Double(c.GetDoubleValue()), nil - case *exprpb.Constant_Int64Value: - return types.Int(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return types.NullValue, nil - case *exprpb.Constant_StringValue: - return types.String(c.GetStringValue()), nil - case *exprpb.Constant_Uint64Value: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return types.Uint(c.GetUint64Value()), nil } return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) } -<<<<<<< HEAD func convertProto(src, dst proto.Message) error { pb, err := proto.Marshal(src) @@ -696,5 +657,3 @@ func convertProto(src, dst proto.Message) error { err = proto.Unmarshal(pb, dst) return err } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go index a7386ab564..9f55cb3b9f 100644 --- a/vendor/github.com/google/cel-go/common/ast/expr.go +++ b/vendor/github.com/google/cel-go/common/ast/expr.go @@ -158,11 +158,7 @@ type EntryExpr interface { // IDGenerator produces unique ids suitable for tagging expression nodes type IDGenerator func(originalID int64) int64 -<<<<<<< HEAD // CallExpr defines an interface for inspecting a function call and its arguments. -======= -// CallExpr defines an interface for inspecting a function call and its arugments. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CallExpr interface { // FunctionName returns the name of the function. FunctionName() string @@ -273,7 +269,6 @@ type ComprehensionExpr interface { IterRange() Expr // IterVar returns the iteration variable name. -<<<<<<< HEAD // // For one-variable comprehensions, the iter var refers to the element value // when iterating over a list, or the map key when iterating over a map. @@ -290,10 +285,6 @@ type ComprehensionExpr interface { // HasIterVar2 returns true if the second iteration variable is non-empty. HasIterVar2() bool -======= - IterVar() string - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AccuVar returns the accumulation variable name. AccuVar() string @@ -420,10 +411,7 @@ func (e *expr) SetKindCase(other Expr) { e.exprKindCase = &baseComprehensionExpr{ iterRange: c.IterRange(), iterVar: c.IterVar(), -<<<<<<< HEAD iterVar2: c.IterVar2(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar: c.AccuVar(), accuInit: c.AccuInit(), loopCond: c.LoopCondition(), @@ -532,10 +520,7 @@ var _ ComprehensionExpr = &baseComprehensionExpr{} type baseComprehensionExpr struct { iterRange Expr iterVar string -<<<<<<< HEAD iterVar2 string -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar string accuInit Expr loopCond Expr @@ -558,7 +543,6 @@ func (e *baseComprehensionExpr) IterVar() string { return e.iterVar } -<<<<<<< HEAD func (e *baseComprehensionExpr) IterVar2() string { return e.iterVar2 } @@ -567,8 +551,6 @@ func (e *baseComprehensionExpr) HasIterVar2() bool { return e.iterVar2 != "" } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *baseComprehensionExpr) AccuVar() string { return e.accuVar } diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go index e891684782..d4dcde4d94 100644 --- a/vendor/github.com/google/cel-go/common/ast/factory.go +++ b/vendor/github.com/google/cel-go/common/ast/factory.go @@ -27,18 +27,12 @@ type ExprFactory interface { // NewCall creates an Expr value representing a global function call. NewCall(id int64, function string, args ...Expr) Expr -<<<<<<< HEAD // NewComprehension creates an Expr value representing a one-variable comprehension over a value range. NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range. NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr -======= - // NewComprehension creates an Expr value representing a comprehension over a value range. - NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewMemberCall creates an Expr value representing a member function call. NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr @@ -46,28 +40,18 @@ type ExprFactory interface { NewIdent(id int64, name string) Expr // NewAccuIdent creates an Expr value representing an accumulator identifier within a -<<<<<<< HEAD // comprehension. NewAccuIdent(id int64) Expr // AccuIdentName reports the name of the accumulator variable to be used within a comprehension. AccuIdentName() string -======= - //comprehension. - NewAccuIdent(id int64) Expr - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewLiteral creates an Expr value representing a literal value, such as a string or integer. NewLiteral(id int64, value ref.Val) Expr // NewList creates an Expr value representing a list literal expression with optional indices. // -<<<<<<< HEAD // Optional indices will typically be empty unless the CEL optional types are enabled. -======= - // Optional indicies will typically be empty unless the CEL optional types are enabled. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NewList(id int64, elems []Expr, optIndices []int32) Expr // NewMap creates an Expr value representing a map literal expression @@ -97,7 +81,6 @@ type ExprFactory interface { isExprFactory() } -<<<<<<< HEAD type baseExprFactory struct { accumulatorName string } @@ -115,13 +98,6 @@ func NewExprFactoryWithAccumulator(id string) ExprFactory { return &baseExprFactory{ id, } -======= -type baseExprFactory struct{} - -// NewExprFactory creates an ExprFactory instance. -func NewExprFactory() ExprFactory { - return &baseExprFactory{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr { @@ -153,23 +129,17 @@ func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr } func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { -<<<<<<< HEAD // Set the iter_var2 to empty string to indicate the second variable is omitted return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result) } func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fac.newExpr( id, &baseComprehensionExpr{ iterRange: iterRange, iterVar: iterVar, -<<<<<<< HEAD iterVar2: iterVar2, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar: accuVar, accuInit: accuInit, loopCond: loopCond, @@ -183,15 +153,11 @@ func (fac *baseExprFactory) NewIdent(id int64, name string) Expr { } func (fac *baseExprFactory) NewAccuIdent(id int64) Expr { -<<<<<<< HEAD return fac.NewIdent(id, fac.AccuIdentName()) } func (fac *baseExprFactory) AccuIdentName() string { return fac.accumulatorName -======= - return fac.NewIdent(id, "__result__") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr { @@ -285,16 +251,10 @@ func (fac *baseExprFactory) CopyExpr(e Expr) Expr { return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) case ComprehensionKind: compre := e.AsComprehension() -<<<<<<< HEAD return fac.NewComprehensionTwoVar(e.ID(), fac.CopyExpr(compre.IterRange()), compre.IterVar(), compre.IterVar2(), -======= - return fac.NewComprehension(e.ID(), - fac.CopyExpr(compre.IterRange()), - compre.IterVar(), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) compre.AccuVar(), fac.CopyExpr(compre.AccuInit()), fac.CopyExpr(compre.LoopCondition()), diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go index 278e4dd3fd..d7a90fb7c3 100644 --- a/vendor/github.com/google/cel-go/common/ast/navigable.go +++ b/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -390,7 +390,6 @@ func (comp navigableComprehensionImpl) IterVar() string { return comp.Expr.AsComprehension().IterVar() } -<<<<<<< HEAD func (comp navigableComprehensionImpl) IterVar2() string { return comp.Expr.AsComprehension().IterVar2() } @@ -399,8 +398,6 @@ func (comp navigableComprehensionImpl) HasIterVar2() bool { return comp.Expr.AsComprehension().HasIterVar2() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (comp navigableComprehensionImpl) AccuVar() string { return comp.Expr.AsComprehension().AccuVar() } diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go index d7fbb4e6e3..3097a3f785 100644 --- a/vendor/github.com/google/cel-go/common/containers/container.go +++ b/vendor/github.com/google/cel-go/common/containers/container.go @@ -19,10 +19,7 @@ package containers import ( "fmt" "strings" -<<<<<<< HEAD "unicode" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/ast" ) @@ -216,7 +213,6 @@ type ContainerOption func(*Container) (*Container, error) func Abbrevs(qualifiedNames ...string) ContainerOption { return func(c *Container) (*Container, error) { for _, qn := range qualifiedNames { -<<<<<<< HEAD qn = strings.TrimSpace(qn) for _, r := range qn { if !isIdentifierChar(r) { @@ -224,8 +220,6 @@ func Abbrevs(qualifiedNames ...string) ContainerOption { "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ind := strings.LastIndex(qn, ".") if ind <= 0 || ind >= len(qn)-1 { return nil, fmt.Errorf( @@ -292,13 +286,10 @@ func aliasAs(kind, qualifiedName, alias string) ContainerOption { } } -<<<<<<< HEAD func isIdentifierChar(r rune) bool { return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Name sets the fully-qualified name of the Container. func Name(name string) ContainerOption { return func(c *Container) (*Container, error) { diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go index b447cb015e..75f5f0d636 100644 --- a/vendor/github.com/google/cel-go/common/debug/debug.go +++ b/vendor/github.com/google/cel-go/common/debug/debug.go @@ -215,14 +215,11 @@ func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { w.append(comprehension.IterVar()) w.append(",") w.appendLine() -<<<<<<< HEAD if comprehension.HasIterVar2() { w.append(comprehension.IterVar2()) w.append(",") w.appendLine() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) w.append("// Target") w.appendLine() w.Buffer(comprehension.IterRange()) @@ -260,11 +257,7 @@ func formatLiteral(c ref.Val) string { case types.Bool: return fmt.Sprintf("%t", v) case types.Bytes: -<<<<<<< HEAD return fmt.Sprintf("b%s", strconv.Quote(string(v))) -======= - return fmt.Sprintf("b\"%s\"", string(v)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case types.Double: return fmt.Sprintf("%v", float64(v)) case types.Int: diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go index d486bcaf91..bfeb52c515 100644 --- a/vendor/github.com/google/cel-go/common/decls/decls.go +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -162,13 +162,9 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { if oID == overload.ID() { if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { // Allow redefinition of an overload implementation so long as the signatures match. -<<<<<<< HEAD if overload.hasBinding() { f.overloads[oID] = overload } -======= - f.overloads[oID] = overload ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) @@ -255,7 +251,6 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { // are preserved in order to assist with the function resolution step. switch len(args) { case 1: -<<<<<<< HEAD if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.unaryOp(args[0]) } @@ -265,17 +260,6 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { } } if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { -======= - if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { - return o.unaryOp(args[0]) - } - case 2: - if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { - return o.binaryOp(args[0], args[1]) - } - } - if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return o.functionOp(args...) } // eventually this will fall through to the noSuchOverload below. @@ -793,7 +777,6 @@ func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) } -<<<<<<< HEAD // TypeVariable creates a new type identifier for use within a types.Provider func TypeVariable(t *types.Type) *VariableDecl { return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) @@ -806,10 +789,6 @@ func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { // variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { -======= -// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. -func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) varType, err := types.TypeToExprType(v.Type()) if err != nil { return nil, err @@ -817,7 +796,6 @@ func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { return chkdecls.NewVar(v.Name(), varType), nil } -<<<<<<< HEAD // FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { return functionDeclToExprDecl(f) @@ -825,15 +803,6 @@ func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { // functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { -======= -// TypeVariable creates a new type identifier for use within a types.Provider -func TypeVariable(t *types.Type) *VariableDecl { - return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) -} - -// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. -func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) for i, oID := range f.overloadOrdinals { o := f.overloads[oID] diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go index f0be04eb2b..0cf21345e6 100644 --- a/vendor/github.com/google/cel-go/common/error.go +++ b/vendor/github.com/google/cel-go/common/error.go @@ -18,11 +18,6 @@ import ( "fmt" "strings" "unicode/utf8" -<<<<<<< HEAD -======= - - "golang.org/x/text/width" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewError creates an error associated with an expression id with the given message at the given location. @@ -38,28 +33,15 @@ type Error struct { } const ( -<<<<<<< HEAD dot = "." ind = "^" wideDot = "\uff0e" wideInd = "\uff3e" -======= - dot = "." - ind = "^" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. maxSnippetLength = 16384 ) -<<<<<<< HEAD -======= -var ( - wideDot = width.Widen.String(dot) - wideInd = width.Widen.String(ind) -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ToDisplayString decorates the error message with the source location. func (e *Error) ToDisplayString(source Source) string { var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go index f4338630e7..c8865df8cd 100644 --- a/vendor/github.com/google/cel-go/common/errors.go +++ b/vendor/github.com/google/cel-go/common/errors.go @@ -30,7 +30,6 @@ type Errors struct { // NewErrors creates a new instance of the Errors type. func NewErrors(source Source) *Errors { -<<<<<<< HEAD src := source if src == nil { src = NewTextSource("") @@ -38,11 +37,6 @@ func NewErrors(source Source) *Errors { return &Errors{ errors: []*Error{}, source: src, -======= - return &Errors{ - errors: []*Error{}, - source: source, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) maxErrorsToReport: 100, } } @@ -52,14 +46,11 @@ func (e *Errors) ReportError(l Location, format string, args ...any) { e.ReportErrorAtID(0, l, format, args...) } -<<<<<<< HEAD // ReportErrorString records an error at a source location. func (e *Errors) ReportErrorString(l Location, message string) { e.ReportErrorAtID(0, l, "%s", message) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ReportErrorAtID records an error at a source location and expression id. func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) { e.numErrors++ diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go index d13679c044..021198224d 100644 --- a/vendor/github.com/google/cel-go/common/runes/buffer.go +++ b/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -127,7 +127,6 @@ var nilBuffer = &emptyBuffer{} // elements of the byte or uint16 array, and continue. The underlying storage is an rune array // containing any Unicode character. func NewBuffer(data string) Buffer { -<<<<<<< HEAD buf, _ := newBuffer(data, false) return buf } @@ -160,31 +159,16 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { buf16 []uint16 buf32 []rune offs []int32 -======= - if len(data) == 0 { - return nilBuffer - } - var ( - idx = 0 - buf8 = make([]byte, 0, len(data)) - buf16 []uint16 - buf32 []rune ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s -<<<<<<< HEAD if lines && r == '\n' { offs = append(offs, off+1) } if r < utf8.RuneSelf { buf8 = append(buf8, byte(r)) off++ -======= - if r < utf8.RuneSelf { - buf8 = append(buf8, byte(r)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } if r <= 0xffff { @@ -194,10 +178,7 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { } buf8 = nil buf16 = append(buf16, uint16(r)) -<<<<<<< HEAD off++ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goto copy16 } buf32 = make([]rune, len(buf8), len(data)) @@ -206,7 +187,6 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { } buf8 = nil buf32 = append(buf32, r) -<<<<<<< HEAD off++ goto copy32 } @@ -216,28 +196,16 @@ func newBuffer(data string, lines bool) (Buffer, []int32) { return &asciiBuffer{ arr: buf8, }, offs -======= - goto copy32 - } - return &asciiBuffer{ - arr: buf8, - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) copy16: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s -<<<<<<< HEAD if lines && r == '\n' { offs = append(offs, off+1) } if r <= 0xffff { buf16 = append(buf16, uint16(r)) off++ -======= - if r <= 0xffff { - buf16 = append(buf16, uint16(r)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } buf32 = make([]rune, len(buf16), len(data)) @@ -246,7 +214,6 @@ copy16: } buf16 = nil buf32 = append(buf32, r) -<<<<<<< HEAD off++ goto copy32 } @@ -256,18 +223,10 @@ copy16: return &basicBuffer{ arr: buf16, }, offs -======= - goto copy32 - } - return &basicBuffer{ - arr: buf16, - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) copy32: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s -<<<<<<< HEAD if lines && r == '\n' { offs = append(offs, off+1) } @@ -280,11 +239,4 @@ copy32: return &supplementalBuffer{ arr: buf32, }, offs -======= - buf32 = append(buf32, r) - } - return &supplementalBuffer{ - arr: buf32, - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go index 3d387a457c..ec79cb5454 100644 --- a/vendor/github.com/google/cel-go/common/source.go +++ b/vendor/github.com/google/cel-go/common/source.go @@ -15,12 +15,6 @@ package common import ( -<<<<<<< HEAD -======= - "strings" - "unicode/utf8" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/runes" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -83,25 +77,11 @@ func NewTextSource(text string) Source { // NewStringSource creates a new Source from the given contents and description. func NewStringSource(contents string, description string) Source { // Compute line offsets up front as they are referred to frequently. -<<<<<<< HEAD buf, offs := runes.NewBufferAndLineOffsets(contents) return &sourceImpl{ Buffer: buf, description: description, lineOffsets: offs, -======= - lines := strings.Split(contents, "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset - } - return &sourceImpl{ - Buffer: runes.NewBuffer(contents), - description: description, - lineOffsets: offsets, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -183,14 +163,8 @@ func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { for _, lineOffset := range s.lineOffsets { if lineOffset > characterOffset { break -<<<<<<< HEAD } line++ -======= - } else { - line++ - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if line == 1 { return line, 0 diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel index 1dea5de7ce..b55f452156 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -12,10 +12,6 @@ go_library( ], importpath = "github.com/google/cel-go/common/stdlib", deps = [ -<<<<<<< HEAD -======= - "//checker/decls:go_default_library", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "//common/decls:go_default_library", "//common/functions:go_default_library", "//common/operators:go_default_library", @@ -23,9 +19,5 @@ go_library( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", -<<<<<<< HEAD -======= - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ], ) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go index fc3e2e1f3c..1550c17863 100644 --- a/vendor/github.com/google/cel-go/common/stdlib/standard.go +++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -23,22 +23,11 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" -<<<<<<< HEAD -======= - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var ( stdFunctions []*decls.FunctionDecl -<<<<<<< HEAD - stdTypes []*decls.VariableDecl -======= - stdFnDecls []*exprpb.Decl stdTypes []*decls.VariableDecl - stdTypeDecls []*exprpb.Decl ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { @@ -62,18 +51,6 @@ func init() { decls.TypeVariable(types.UintType), } -<<<<<<< HEAD -======= - stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) - for _, stdType := range stdTypes { - typeVar, err := decls.VariableDeclToExprDecl(stdType) - if err != nil { - panic(err) - } - stdTypeDecls = append(stdTypeDecls, typeVar) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stdFunctions = []*decls.FunctionDecl{ // Logical operators. Special-cased within the interpreter. // Note, the singleton binding prevents extensions from overriding the operator behavior. @@ -586,21 +563,6 @@ func init() { decls.MemberOverload(overloads.DurationToMilliseconds, argTypes(types.DurationType), types.IntType)), } -<<<<<<< HEAD -======= - - stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) - for _, fn := range stdFunctions { - if fn.IsDeclarationDisabled() { - continue - } - ed, err := decls.FunctionDeclToExprDecl(fn) - if err != nil { - panic(err) - } - stdFnDecls = append(stdFnDecls, ed) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Functions returns the set of standard library function declarations and definitions for CEL. @@ -608,33 +570,11 @@ func Functions() []*decls.FunctionDecl { return stdFunctions } -<<<<<<< HEAD -======= -// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads -// in the CEL standard environment. -// -// Deprecated: use Functions -func FunctionExprDecls() []*exprpb.Decl { - return stdFnDecls -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Types returns the set of standard library types for CEL. func Types() []*decls.VariableDecl { return stdTypes } -<<<<<<< HEAD -======= -// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL -// standard environment. -// -// Deprecated: use Types -func TypeExprDecls() []*exprpb.Decl { - return stdTypeDecls -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func notStrictlyFalse(value ref.Val) ref.Val { if types.IsBool(value) { return value diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel index 822ce2f367..8f010fae44 100644 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -40,18 +40,12 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", -<<<<<<< HEAD "@dev_cel_expr//:expr", -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", -<<<<<<< HEAD "@org_golang_google_protobuf//types/dynamicpb:go_default_library", -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "@org_golang_google_protobuf//types/known/anypb:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index 2950c3557e..7e813e291b 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -58,7 +58,6 @@ func (b Bytes) Compare(other ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { -<<<<<<< HEAD case reflect.Array: if len(b) != typeDesc.Len() { return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) @@ -70,9 +69,6 @@ func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { } return refArr.Interface(), nil case reflect.Slice: -======= - case reflect.Array, reflect.Slice: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go index 5738db7af1..17ab1a95e5 100644 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -62,15 +62,12 @@ func NewErr(format string, args ...any) ref.Val { return &Err{error: fmt.Errorf(format, args...)} } -<<<<<<< HEAD // NewErrFromString creates a new Err with the provided message. // TODO: Audit the use of this function and standardize the error messages and codes. func NewErrFromString(message string) ref.Val { return &Err{error: errors.New(message)} } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewErrWithNodeID creates a new Err described by the format string and args. // TODO: Audit the use of this function and standardize the error messages and codes. func NewErrWithNodeID(id int64, format string, args ...any) ref.Val { diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index c665e8e100..7e68a5daf3 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -243,11 +243,7 @@ func (l *baseList) Equal(other ref.Val) ref.Val { func (l *baseList) Get(index ref.Val) ref.Val { ind, err := IndexOrError(index) if err != nil { -<<<<<<< HEAD return ValOrErr(index, "%v", err) -======= - return ValOrErr(index, err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if ind < 0 || ind >= l.size { return NewErr("index '%d' out of range in list size '%d'", ind, l.Size()) @@ -260,7 +256,6 @@ func (l *baseList) IsZeroValue() bool { return l.size == 0 } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (index, value) pair in the list. func (l *baseList) Fold(f traits.Folder) { for i := 0; i < l.size; i++ { @@ -270,8 +265,6 @@ func (l *baseList) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Iterator implements the traits.Iterable interface method. func (l *baseList) Iterator() traits.Iterator { return newListIterator(l) @@ -434,11 +427,7 @@ func (l *concatList) Equal(other ref.Val) ref.Val { func (l *concatList) Get(index ref.Val) ref.Val { ind, err := IndexOrError(index) if err != nil { -<<<<<<< HEAD return ValOrErr(index, "%v", err) -======= - return ValOrErr(index, err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } i := Int(ind) if i < l.prevList.Size().(Int) { @@ -453,7 +442,6 @@ func (l *concatList) IsZeroValue() bool { return l.Size().(Int) == 0 } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (index, value) pair in the list. func (l *concatList) Fold(f traits.Folder) { for i := Int(0); i < l.Size().(Int); i++ { @@ -463,8 +451,6 @@ func (l *concatList) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Iterator implements the traits.Iterable interface method. func (l *concatList) Iterator() traits.Iterator { return newListIterator(l) @@ -559,7 +545,6 @@ func IndexOrError(index ref.Val) (int, error) { return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type()) } } -<<<<<<< HEAD // ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration. // @@ -587,5 +572,3 @@ func (l interopFoldableList) Fold(f traits.Folder) { } } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go index 44378d6086..cb6cce78b0 100644 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -94,7 +94,6 @@ func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { } } -<<<<<<< HEAD // NewMutableMap constructs a mutable map from an adapter and a set of map values. func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper { mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues)) @@ -113,8 +112,6 @@ func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.Mu return m } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // mapAccessor is a private interface for finding values within a map and iterating over the keys. // This interface implements portions of the API surface area required by the traits.Mapper // interface. @@ -126,12 +123,9 @@ type mapAccessor interface { // Iterator returns an Iterator over the map key set. Iterator() traits.Iterator -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. Fold(traits.Folder) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // baseMap is a reflection based map implementation designed to handle a variety of map-like types. @@ -334,7 +328,6 @@ func (m *baseMap) Value() any { return m.value } -<<<<<<< HEAD // mutableMap holds onto a set of mutable values which are used for intermediate computations. type mutableMap struct { *baseMap @@ -357,8 +350,6 @@ func (m *mutableMap) ToImmutableMap() traits.Mapper { return NewRefValMap(m.Adapter, m.mutableValues) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { return &jsonStructAccessor{ Adapter: adapter, @@ -402,7 +393,6 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator { } } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *jsonStructAccessor) Fold(f traits.Folder) { for k, v := range a.st { @@ -412,8 +402,6 @@ func (a *jsonStructAccessor) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { keyType := value.Type().Key() return &reflectMapAccessor{ @@ -488,7 +476,6 @@ func (m *reflectMapAccessor) Iterator() traits.Iterator { } } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (m *reflectMapAccessor) Fold(f traits.Folder) { mapRange := m.refValue.MapRange() @@ -499,8 +486,6 @@ func (m *reflectMapAccessor) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { return &refValMapAccessor{mapVal: mapVal} } @@ -554,7 +539,6 @@ func (a *refValMapAccessor) Iterator() traits.Iterator { } } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *refValMapAccessor) Fold(f traits.Folder) { for k, v := range a.mapVal { @@ -564,8 +548,6 @@ func (a *refValMapAccessor) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newStringMapAccessor(strMap map[string]string) mapAccessor { return &stringMapAccessor{mapVal: strMap} } @@ -604,7 +586,6 @@ func (a *stringMapAccessor) Iterator() traits.Iterator { } } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *stringMapAccessor) Fold(f traits.Folder) { for k, v := range a.mapVal { @@ -614,8 +595,6 @@ func (a *stringMapAccessor) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { return &stringIfaceMapAccessor{ Adapter: adapter, @@ -658,7 +637,6 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { } } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { for k, v := range a.mapVal { @@ -668,8 +646,6 @@ func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // accessing protoreflect.Map values. type protoMap struct { @@ -882,7 +858,6 @@ func (m *protoMap) Iterator() traits.Iterator { } } -<<<<<<< HEAD // Fold calls the FoldEntry method for each (key, value) pair in the map. func (m *protoMap) Fold(f traits.Folder) { m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { @@ -890,8 +865,6 @@ func (m *protoMap) Fold(f traits.Folder) { }) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Size returns the number of entries in the protoreflect.Map. func (m *protoMap) Size() ref.Val { return Int(m.value.Len()) @@ -975,7 +948,6 @@ func (it *stringKeyIterator) Next() ref.Val { } return nil } -<<<<<<< HEAD // ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration. // @@ -1028,5 +1000,3 @@ func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val { } return NewErr("insert failed: key %v already exists", k) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go index 0a330ca4ec..36514ff200 100644 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -35,11 +35,8 @@ var ( // golang reflect type for Null values. nullReflectType = reflect.TypeOf(NullValue) -<<<<<<< HEAD protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ConvertToNative implements ref.Val.ConvertToNative. @@ -66,7 +63,6 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { return structpb.NewNullValue(), nil case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, -<<<<<<< HEAD uint64WrapperType, durationValueType, timestampValueType, protoIfaceType: return nil, nil case jsonListValueType, jsonStructType: @@ -75,10 +71,6 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { if typeDesc.Implements(protoIfaceType) { return nil, nil } -======= - uint64WrapperType: - return nil, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case reflect.Interface: nv := n.Value() diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go index b664c27b69..5377bff8de 100644 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -151,11 +151,7 @@ func (o *protoObj) Get(index ref.Val) ref.Val { } fv, err := fd.GetFrom(o.value) if err != nil { -<<<<<<< HEAD return NewErrFromString(err.Error()) -======= - return NewErr(err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return o.NativeToValue(fv) } diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index d6458a858a..bdd474c95a 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -427,7 +427,6 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { return structpb.NullValue_NULL_VALUE, true, nil } case *wrapperspb.BoolValue: -<<<<<<< HEAD if v == nil { return nil, true, nil } @@ -471,24 +470,6 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { if v == nil { return nil, true, nil } -======= - return v.GetValue(), true, nil - case *wrapperspb.BytesValue: - return v.GetValue(), true, nil - case *wrapperspb.DoubleValue: - return v.GetValue(), true, nil - case *wrapperspb.FloatValue: - return float64(v.GetValue()), true, nil - case *wrapperspb.Int32Value: - return int64(v.GetValue()), true, nil - case *wrapperspb.Int64Value: - return v.GetValue(), true, nil - case *wrapperspb.StringValue: - return v.GetValue(), true, nil - case *wrapperspb.UInt32Value: - return uint64(v.GetValue()), true, nil - case *wrapperspb.UInt64Value: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v.GetValue(), true, nil } return msg, false, nil diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index 4a89b97f3b..936a4e28b2 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -585,7 +585,6 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { refKind := refValue.Kind() switch refKind { case reflect.Array, reflect.Slice: -<<<<<<< HEAD if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { if refValue.CanAddr() { return Bytes(refValue.Bytes()), true @@ -594,8 +593,6 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { tmp.Elem().Set(refValue) return Bytes(tmp.Elem().Bytes()), true } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return NewDynamicList(a, v), true case reflect.Map: return NewDynamicMap(a, v), true diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go index dc325ee34a..91c10f08fc 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/iterator.go +++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go @@ -34,7 +34,6 @@ type Iterator interface { // Next returns the next element. Next() ref.Val } -<<<<<<< HEAD // Foldable aggregate types support iteration over (key, value) or (index, value) pairs. type Foldable interface { @@ -48,5 +47,3 @@ type Folder interface { // If the output is true, continue folding. Otherwise, terminate the fold. FoldEntry(key, val any) bool } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go index 6256a4ed52..e54781a602 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/lister.go +++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go @@ -27,12 +27,9 @@ type Lister interface { } // MutableLister interface which emits an immutable result after an intermediate computation. -<<<<<<< HEAD // // Note, this interface is intended only to be used within Comprehensions where the mutable // value is not directly observable within the user-authored CEL expression. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type MutableLister interface { Lister ToImmutableList() Lister diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go index f844e75c5f..d13333f3f6 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/mapper.go +++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go @@ -31,7 +31,6 @@ type Mapper interface { // (Unknown|Err, false). Find(key ref.Val) (ref.Val, bool) } -<<<<<<< HEAD // MutableMapper interface which emits an immutable result after an intermediate computation. // @@ -47,5 +46,3 @@ type MutableMapper interface { // ToImmutableMap converts a mutable map into an immutable map. ToImmutableMap() Mapper } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go index 6e3f8bbb00..51a09df564 100644 --- a/vendor/github.com/google/cel-go/common/types/traits/traits.go +++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go @@ -59,7 +59,6 @@ const ( // SizerType types support the size() method. SizerType -<<<<<<< HEAD // SubtractorType types support '-' operations. SubtractorType @@ -77,8 +76,4 @@ const ( // // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators. MapperType = ContainerType | IndexerType | IterableType | SizerType -======= - // SubtractorType type support '-' operations. - SubtractorType ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go index 85ddf981aa..f419beabd0 100644 --- a/vendor/github.com/google/cel-go/common/types/types.go +++ b/vendor/github.com/google/cel-go/common/types/types.go @@ -19,19 +19,13 @@ import ( "reflect" "strings" -<<<<<<< HEAD "google.golang.org/protobuf/proto" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) chkdecls "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" -<<<<<<< HEAD celpb "cel.dev/expr" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -675,7 +669,6 @@ func TypeToExprType(t *Type) (*exprpb.Type, error) { // ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. func ExprTypeToType(t *exprpb.Type) (*Type, error) { -<<<<<<< HEAD return AlphaProtoAsType(t) } @@ -697,52 +690,28 @@ func ProtoAsType(t *celpb.Type) (*Type, error) { paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) for i, p := range t.GetAbstractType().GetParameterTypes() { pt, err := ProtoAsType(p) -======= - switch t.GetTypeKind().(type) { - case *exprpb.Type_Dyn: - return DynType, nil - case *exprpb.Type_AbstractType_: - paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) - for i, p := range t.GetAbstractType().GetParameterTypes() { - pt, err := ExprTypeToType(p) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } paramTypes[i] = pt } return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil -<<<<<<< HEAD case *celpb.Type_ListType_: et, err := ProtoAsType(t.GetListType().GetElemType()) -======= - case *exprpb.Type_ListType_: - et, err := ExprTypeToType(t.GetListType().GetElemType()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewListType(et), nil -<<<<<<< HEAD case *celpb.Type_MapType_: kt, err := ProtoAsType(t.GetMapType().GetKeyType()) if err != nil { return nil, err } vt, err := ProtoAsType(t.GetMapType().GetValueType()) -======= - case *exprpb.Type_MapType_: - kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) - if err != nil { - return nil, err - } - vt, err := ExprTypeToType(t.GetMapType().GetValueType()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewMapType(kt, vt), nil -<<<<<<< HEAD case *celpb.Type_MessageType: return NewObjectType(t.GetMessageType()), nil case *celpb.Type_Null: @@ -760,49 +729,21 @@ func ProtoAsType(t *celpb.Type) (*Type, error) { case celpb.Type_STRING: return StringType, nil case celpb.Type_UINT64: -======= - case *exprpb.Type_MessageType: - return NewObjectType(t.GetMessageType()), nil - case *exprpb.Type_Null: - return NullType, nil - case *exprpb.Type_Primitive: - switch t.GetPrimitive() { - case exprpb.Type_BOOL: - return BoolType, nil - case exprpb.Type_BYTES: - return BytesType, nil - case exprpb.Type_DOUBLE: - return DoubleType, nil - case exprpb.Type_INT64: - return IntType, nil - case exprpb.Type_STRING: - return StringType, nil - case exprpb.Type_UINT64: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return UintType, nil default: return nil, fmt.Errorf("unsupported primitive type: %v", t) } -<<<<<<< HEAD case *celpb.Type_TypeParam: return NewTypeParamType(t.GetTypeParam()), nil case *celpb.Type_Type: if t.GetType().GetTypeKind() != nil { p, err := ProtoAsType(t.GetType()) -======= - case *exprpb.Type_TypeParam: - return NewTypeParamType(t.GetTypeParam()), nil - case *exprpb.Type_Type: - if t.GetType().GetTypeKind() != nil { - p, err := ExprTypeToType(t.GetType()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewTypeTypeWithParam(p), nil } return TypeType, nil -<<<<<<< HEAD case *celpb.Type_WellKnown: switch t.GetWellKnown() { case celpb.Type_ANY: @@ -810,42 +751,23 @@ func ProtoAsType(t *celpb.Type) (*Type, error) { case celpb.Type_DURATION: return DurationType, nil case celpb.Type_TIMESTAMP: -======= - case *exprpb.Type_WellKnown: - switch t.GetWellKnown() { - case exprpb.Type_ANY: - return AnyType, nil - case exprpb.Type_DURATION: - return DurationType, nil - case exprpb.Type_TIMESTAMP: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return TimestampType, nil default: return nil, fmt.Errorf("unsupported well-known type: %v", t) } -<<<<<<< HEAD case *celpb.Type_Wrapper: t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}}) -======= - case *exprpb.Type_Wrapper: - t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } return NewNullableType(t), nil -<<<<<<< HEAD case *celpb.Type_Error: -======= - case *exprpb.Type_Error: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ErrorType, nil default: return nil, fmt.Errorf("unsupported type: %v", t) } } -<<<<<<< HEAD // TypeToProto converts from a CEL-native type representation to canonical CEL celpb.Type protobuf type. func TypeToProto(t *Type) (*celpb.Type, error) { exprType, err := TypeToExprType(t) @@ -859,8 +781,6 @@ func TypeToProto(t *Type) (*celpb.Type, error) { return &pbtype, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { if t.IsAssignableType(NullType) { return chkdecls.NewWrapperType(pbType) @@ -886,7 +806,6 @@ func maybeForeignType(t ref.Type) *Type { return NewObjectType(t.TypeName(), traitMask) } -<<<<<<< HEAD func convertProto(src, dst proto.Message) error { pb, err := proto.Marshal(src) if err != nil { @@ -904,8 +823,6 @@ func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( checkedWellKnowns = map[string]*Type{ // Wrapper types. @@ -950,7 +867,6 @@ var ( } structTypeTraitMask = traits.FieldTesterType | traits.IndexerType -<<<<<<< HEAD boolType = primitiveType(celpb.Type_BOOL) bytesType = primitiveType(celpb.Type_BYTES) @@ -958,6 +874,4 @@ var ( intType = primitiveType(celpb.Type_INT64) stringType = primitiveType(celpb.Type_STRING) uintType = primitiveType(celpb.Type_UINT64) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go index 45671969c8..c20d19de1b 100644 --- a/vendor/github.com/google/cel-go/interpreter/activation.go +++ b/vendor/github.com/google/cel-go/interpreter/activation.go @@ -17,10 +17,6 @@ package interpreter import ( "errors" "fmt" -<<<<<<< HEAD -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/types/ref" ) @@ -160,14 +156,11 @@ type PartialActivation interface { UnknownAttributePatterns() []*AttributePattern } -<<<<<<< HEAD // partialActivationConverter indicates whether an Activation implementation supports conversion to a PartialActivation type partialActivationConverter interface { asPartialActivation() (PartialActivation, bool) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // partActivation is the default implementations of the PartialActivation interface. type partActivation struct { Activation @@ -179,7 +172,6 @@ func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { return a.unknowns } -<<<<<<< HEAD // asPartialActivation returns the partActivation as a PartialActivation interface. func (a *partActivation) asPartialActivation() (PartialActivation, bool) { return a, true @@ -196,36 +188,3 @@ func asPartialActivation(vars Activation) (PartialActivation, bool) { } return nil, false } -======= -// varActivation represents a single mutable variable binding. -// -// This activation type should only be used within folds as the fold loop controls the object -// life-cycle. -type varActivation struct { - parent Activation - name string - val ref.Val -} - -// Parent implements the Activation interface method. -func (v *varActivation) Parent() Activation { - return v.parent -} - -// ResolveName implements the Activation interface method. -func (v *varActivation) ResolveName(name string) (any, bool) { - if name == v.name { - return v.val, true - } - return v.parent.ResolveName(name) -} - -var ( - // pool of var activations to reduce allocations during folds. - varActivationPool = &sync.Pool{ - New: func() any { - return &varActivation{} - }, - } -) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 3362580803..7e5c2db0fc 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -178,15 +178,8 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. -<<<<<<< HEAD func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { fac := NewAttributeFactory(container, adapter, provider, opts...) -======= -func NewPartialAttributeFactory(container *containers.Container, - adapter types.Adapter, - provider types.Provider) AttributeFactory { - fac := NewAttributeFactory(container, adapter, provider) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &partialAttributeFactory{ AttributeFactory: fac, container: container, @@ -365,11 +358,7 @@ func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) { func (m *attributeMatcher) Resolve(vars Activation) (any, error) { id := m.NamespacedAttribute.ID() // Bug in how partial activation is resolved, should search parents as well. -<<<<<<< HEAD partial, isPartial := asPartialActivation(vars) -======= - partial, isPartial := toPartialActivation(vars) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if isPartial { unk, err := m.fac.matchesUnknownPatterns( partial, @@ -395,17 +384,3 @@ func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) { func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly) } -<<<<<<< HEAD -======= - -func toPartialActivation(vars Activation) (PartialActivation, bool) { - pv, ok := vars.(PartialActivation) - if ok { - return pv, true - } - if vars.Parent() != nil { - return toPartialActivation(vars.Parent()) - } - return nil, false -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index bb5df51822..b1b3aacc83 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -126,7 +126,6 @@ type NamespacedAttribute interface { Qualifiers() []Qualifier } -<<<<<<< HEAD // AttrFactoryOption specifies a functional option for configuring an attribute factory. type AttrFactoryOption func(*attrFactory) *attrFactory @@ -144,35 +143,22 @@ func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { // types: bool, int, string, and uint. func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { fac := &attrFactory{ -======= -// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values -// capable of resolving types by simple names and qualify the values using the supported qualifier -// types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { - return &attrFactory{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) container: cont, adapter: a, provider: p, } -<<<<<<< HEAD for _, o := range opts { fac = o(fac) } return fac -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type attrFactory struct { container *containers.Container adapter types.Adapter provider types.Provider -<<<<<<< HEAD errorOnBadPresenceTest bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -181,7 +167,6 @@ type attrFactory struct { // resolution rules. func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { return &absoluteAttribute{ -<<<<<<< HEAD id: id, namespaceNames: names, qualifiers: []Qualifier{}, @@ -189,14 +174,6 @@ func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAtt provider: r.provider, fac: r, errorOnBadPresenceTest: r.errorOnBadPresenceTest, -======= - id: id, - namespaceNames: names, - qualifiers: []Qualifier{}, - adapter: r.adapter, - provider: r.provider, - fac: r, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -230,20 +207,12 @@ func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { // RelativeAttribute refers to an expression and an optional qualifier path. func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { return &relativeAttribute{ -<<<<<<< HEAD id: id, operand: operand, qualifiers: []Qualifier{}, adapter: r.adapter, fac: r, errorOnBadPresenceTest: r.errorOnBadPresenceTest, -======= - id: id, - operand: operand, - qualifiers: []Qualifier{}, - adapter: r.adapter, - fac: r, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -265,11 +234,7 @@ func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, o }, nil } } -<<<<<<< HEAD return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) -======= - return newQualifier(r.adapter, qualID, val, opt) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type absoluteAttribute struct { @@ -281,11 +246,8 @@ type absoluteAttribute struct { adapter types.Adapter provider types.Provider fac AttributeFactory -<<<<<<< HEAD errorOnBadPresenceTest bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID implements the Attribute interface method. @@ -574,11 +536,8 @@ type relativeAttribute struct { qualifiers []Qualifier adapter types.Adapter fac AttributeFactory -<<<<<<< HEAD errorOnBadPresenceTest bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Attribute interface method. @@ -642,11 +601,7 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } -<<<<<<< HEAD func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { -======= -func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var qual Qualifier switch val := v.(type) { case Attribute: @@ -661,7 +616,6 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, }, nil case string: qual = &stringQualifier{ -<<<<<<< HEAD id: id, value: val, celValue: types.String(val), @@ -794,73 +748,6 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, adapter: adapter, optional: opt, errorOnBadPresenceTest: errorOnBadPresenceTest, -======= - id: id, - value: val, - celValue: types.String(val), - adapter: adapter, - optional: opt, - } - case int: - qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, - } - case int32: - qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, - } - case int64: - qual = &intQualifier{ - id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt, - } - case uint: - qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, - } - case uint32: - qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, - } - case uint64: - qual = &uintQualifier{ - id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt, - } - case bool: - qual = &boolQualifier{ - id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt, - } - case float32: - qual = &doubleQualifier{ - id: id, - value: float64(val), - celValue: types.Double(val), - adapter: adapter, - optional: opt, - } - case float64: - qual = &doubleQualifier{ - id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt, - } - case types.String: - qual = &stringQualifier{ - id: id, value: string(val), celValue: val, adapter: adapter, optional: opt, - } - case types.Int: - qual = &intQualifier{ - id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt, - } - case types.Uint: - qual = &uintQualifier{ - id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt, - } - case types.Bool: - qual = &boolQualifier{ - id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt, - } - case types.Double: - qual = &doubleQualifier{ - id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case *types.Unknown: qual = &unknownQualifier{id: id, value: val} @@ -891,20 +778,12 @@ func (q *attrQualifier) IsOptional() bool { } type stringQualifier struct { -<<<<<<< HEAD id int64 value string celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool -======= - id int64 - value string - celValue ref.Val - adapter types.Adapter - optional bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -987,11 +866,7 @@ func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest return obj, true, nil } default: -<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) -======= - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1005,20 +880,12 @@ func (q *stringQualifier) Value() ref.Val { } type intQualifier struct { -<<<<<<< HEAD id int64 value int64 celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool -======= - id int64 - value int64 - celValue ref.Val - adapter types.Adapter - optional bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1124,11 +991,7 @@ func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, p return o[i], true, nil } default: -<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) -======= - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1145,20 +1008,12 @@ func (q *intQualifier) Value() ref.Val { } type uintQualifier struct { -<<<<<<< HEAD id int64 value uint64 celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool -======= - id int64 - value uint64 - celValue ref.Val - adapter types.Adapter - optional bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1205,11 +1060,7 @@ func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: -<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) -======= - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1223,20 +1074,12 @@ func (q *uintQualifier) Value() ref.Val { } type boolQualifier struct { -<<<<<<< HEAD id int64 value bool celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool -======= - id int64 - value bool - celValue ref.Val - adapter types.Adapter - optional bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1269,11 +1112,7 @@ func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: -<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) -======= - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if presenceTest { return nil, false, nil @@ -1348,20 +1187,12 @@ func (q *fieldQualifier) Value() ref.Val { // type may not be known ahead of time and may not conform to the standard types supported as valid // protobuf map key types. type doubleQualifier struct { -<<<<<<< HEAD id int64 value float64 celValue ref.Val adapter types.Adapter optional bool errorOnBadPresenceTest bool -======= - id int64 - value float64 - celValue ref.Val - adapter types.Adapter - optional bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ID is an implementation of the Qualifier interface method. @@ -1385,11 +1216,7 @@ func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnl } func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { -<<<<<<< HEAD return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) -======= - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Value implements the ConstantQualifier interface @@ -1495,11 +1322,7 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. -<<<<<<< HEAD func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { -======= -func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { case *types.Unknown: @@ -1556,11 +1379,7 @@ func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, prese } return val, true, nil default: -<<<<<<< HEAD if presenceTest && !errorOnBadPresenceTest { -======= - if presenceTest { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, false, nil } return nil, false, missingKey(idx) diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index 7cb5528716..591b7688b7 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -16,10 +16,7 @@ package interpreter import ( "fmt" -<<<<<<< HEAD "sync" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" @@ -100,11 +97,7 @@ type InterpretableCall interface { Args() []Interpretable } -<<<<<<< HEAD // InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map -======= -// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // or struct. type InterpretableConstructor interface { Interpretable @@ -728,15 +721,11 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) } -<<<<<<< HEAD // InitVals implements the InterpretableConstructor interface method. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (o *evalObj) InitVals() []Interpretable { return o.vals } -<<<<<<< HEAD // Type implements the InterpretableConstructor interface method. func (o *evalObj) Type() ref.Type { return types.NewObjectType(o.typeName) @@ -757,22 +746,6 @@ type evalFold struct { // note an exhaustive fold will ensure that all branches are evaluated // when using mutable values, these branches will mutate the final result // rather than make a throw-away computation. -======= -func (o *evalObj) Type() ref.Type { - return types.NewObjectTypeValue(o.typeName) -} - -type evalFold struct { - id int64 - accuVar string - iterVar string - iterRange Interpretable - accu Interpretable - cond Interpretable - step Interpretable - result Interpretable - adapter types.Adapter ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) exhaustive bool interruptable bool } @@ -784,7 +757,6 @@ func (fold *evalFold) ID() int64 { // Eval implements the Interpretable interface method. func (fold *evalFold) Eval(ctx Activation) ref.Val { -<<<<<<< HEAD // Initialize the folder interface f := newFolder(fold, ctx) defer releaseFolder(f) @@ -812,66 +784,6 @@ func (fold *evalFold) Eval(ctx Activation) ref.Val { } iterable := foldRange.(traits.Iterable) return f.foldIterable(iterable) -======= - foldRange := fold.iterRange.Eval(ctx) - if !foldRange.Type().HasTrait(traits.IterableType) { - return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) - } - // Configure the fold activation with the accumulator initial value. - accuCtx := varActivationPool.Get().(*varActivation) - accuCtx.parent = ctx - accuCtx.name = fold.accuVar - accuCtx.val = fold.accu.Eval(ctx) - // If the accumulator starts as an empty list, then the comprehension will build a list - // so create a mutable list to optimize the cost of the inner loop. - l, ok := accuCtx.val.(traits.Lister) - buildingList := false - if !fold.exhaustive && ok && l.Size() == types.IntZero { - buildingList = true - accuCtx.val = types.NewMutableList(fold.adapter) - } - iterCtx := varActivationPool.Get().(*varActivation) - iterCtx.parent = accuCtx - iterCtx.name = fold.iterVar - - interrupted := false - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - // Modify the iter var in the fold activation. - iterCtx.val = it.Next() - - // Evaluate the condition, terminate the loop if false. - cond := fold.cond.Eval(iterCtx) - condBool, ok := cond.(types.Bool) - if !fold.exhaustive && ok && condBool != types.True { - break - } - // Evaluate the evaluation step into accu var. - accuCtx.val = fold.step.Eval(iterCtx) - if fold.interruptable { - if stop, found := ctx.ResolveName("#interrupted"); found && stop == true { - interrupted = true - break - } - } - } - varActivationPool.Put(iterCtx) - if interrupted { - varActivationPool.Put(accuCtx) - return types.NewErr("operation interrupted") - } - - // Compute the result. - res := fold.result.Eval(accuCtx) - varActivationPool.Put(accuCtx) - // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result. - if !types.IsUnknownOrError(res) && buildingList { - if _, ok := res.(traits.MutableLister); ok { - res = res.(traits.MutableLister).ToImmutableList() - } - } - return res ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Optional Interpretable implementations that specialize, subsume, or extend the core evaluation @@ -1327,7 +1239,6 @@ func invalidOptionalEntryInit(field any, value ref.Val) ref.Val { func invalidOptionalElementInit(value ref.Val) ref.Val { return types.NewErr("cannot initialize optional list element from non-optional value %v", value) } -<<<<<<< HEAD // newFolder creates or initializes a pooled folder instance. func newFolder(eval *evalFold, ctx Activation) *folder { @@ -1522,5 +1433,3 @@ var ( }, } ) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index 6323cb1678..f0fd4eaf94 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -506,11 +506,7 @@ func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { id: expr.ID(), elems: elems, optionals: optionals, -<<<<<<< HEAD hasOptionals: len(optionalIndices) != 0, -======= - hasOptionals: len(optionals) != 0, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) adapter: p.adapter, }, nil } @@ -522,10 +518,7 @@ func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { optionals := make([]bool, len(entries)) keys := make([]Interpretable, len(entries)) vals := make([]Interpretable, len(entries)) -<<<<<<< HEAD hasOptionals := false -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, e := range entries { entry := e.AsMapEntry() keyVal, err := p.Plan(entry.Key()) @@ -540,21 +533,14 @@ func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { } vals[i] = valVal optionals[i] = entry.IsOptional() -<<<<<<< HEAD hasOptionals = hasOptionals || entry.IsOptional() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &evalMap{ id: expr.ID(), keys: keys, vals: vals, optionals: optionals, -<<<<<<< HEAD hasOptionals: hasOptionals, -======= - hasOptionals: len(optionals) != 0, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) adapter: p.adapter, }, nil } @@ -570,10 +556,7 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { optionals := make([]bool, len(objFields)) fields := make([]string, len(objFields)) vals := make([]Interpretable, len(objFields)) -<<<<<<< HEAD hasOptionals := false -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, f := range objFields { field := f.AsStructField() fields[i] = field.Name() @@ -583,10 +566,7 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { } vals[i] = val optionals[i] = field.IsOptional() -<<<<<<< HEAD hasOptionals = hasOptionals || field.IsOptional() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &evalObj{ id: expr.ID(), @@ -594,11 +574,7 @@ func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { fields: fields, vals: vals, optionals: optionals, -<<<<<<< HEAD hasOptionals: hasOptionals, -======= - hasOptionals: len(optionals) != 0, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) provider: p.provider, }, nil } @@ -631,10 +607,7 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { accuVar: fold.AccuVar(), accu: accu, iterVar: fold.IterVar(), -<<<<<<< HEAD iterVar2: fold.IterVar2(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) iterRange: iterRange, cond: cond, step: step, diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go index ae8345043e..1662c1c1b3 100644 --- a/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/vendor/github.com/google/cel-go/interpreter/prune.go @@ -88,11 +88,7 @@ func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *as func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) { switch v := val.(type) { -<<<<<<< HEAD case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint, *types.Optional: -======= - case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.state.SetValue(id, val) return p.NewLiteral(id, val), true case types.Duration: @@ -285,7 +281,6 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { } if macro, found := p.macroCalls[node.ID()]; found { // Ensure that intermediate values for the comprehension are cleared during pruning -<<<<<<< HEAD pruneMacroCall := node.Kind() != ast.UnspecifiedExprKind if node.Kind() == ast.ComprehensionKind { // Only prune cel.bind() calls since the variables of the comprehension are all @@ -309,15 +304,6 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { p.macroCalls[node.ID()] = macro } } -======= - if node.Kind() == ast.ComprehensionKind { - compre := node.AsComprehension() - visit(macro, clearIterVarVisitor(compre.IterVar(), p.state)) - } - // prune the expression in terms of the macro call instead of the expanded form. - if newMacro, pruned := p.prune(macro); pruned { - p.macroCalls[node.ID()] = newMacro ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -451,7 +437,6 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { // the last iteration of the comprehension and not each step in the evaluation which // means that the any residuals computed in between might be inaccurate. if newRange, pruned := p.maybePrune(compre.IterRange()); pruned { -<<<<<<< HEAD if compre.HasIterVar2() { return p.NewComprehensionTwoVar( node.ID(), @@ -465,8 +450,6 @@ func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { compre.Result(), ), true } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return p.NewComprehension( node.ID(), newRange, @@ -514,19 +497,6 @@ func getMaxID(expr ast.Expr) int64 { return maxID } -<<<<<<< HEAD -======= -func clearIterVarVisitor(varName string, state EvalState) astVisitor { - return astVisitor{ - visitExpr: func(e ast.Expr) { - if e.Kind() == ast.IdentKind && e.AsIdent() == varName { - state.SetValue(e.ID(), nil) - } - }, - } -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maxIDVisitor(maxID *int64) astVisitor { return astVisitor{ visitExpr: func(e ast.Expr) { @@ -590,7 +560,6 @@ func visit(expr ast.Expr, visitor astVisitor) { } } } -<<<<<<< HEAD func isCelBindMacro(macro ast.Expr) bool { if macro.Kind() != ast.CallKind { @@ -603,5 +572,3 @@ func isCelBindMacro(macro ast.Expr) bool { target.Kind() == ast.IdentKind && target.AsIdent() == "cel" } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go index 6f65f4af6d..8f47c53d28 100644 --- a/vendor/github.com/google/cel-go/interpreter/runtimecost.go +++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -198,35 +198,20 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re switch call.OverloadID() { // O(n) functions case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString: -<<<<<<< HEAD cost += uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) case overloads.InList: // If a list is composed entirely of constant values this is O(1), but we don't account for that here. // We just assume all list containment checks are O(n). cost += actualSize(args[1]) -======= - cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) - case overloads.InList: - // If a list is composed entirely of constant values this is O(1), but we don't account for that here. - // We just assume all list containment checks are O(n). - cost += c.actualSize(args[1]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // O(min(m, n)) functions case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, overloads.Equals, overloads.NotEquals: // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.), -<<<<<<< HEAD // the CostTracker.ActualSize() function by definition returns 1 for each operand, resulting in an overall cost // of 1. lhsSize := actualSize(args[0]) rhsSize := actualSize(args[1]) -======= - // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost - // of 1. - lhsSize := c.actualSize(args[0]) - rhsSize := c.actualSize(args[1]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) minSize := lhsSize if rhsSize < minSize { minSize = rhsSize @@ -235,39 +220,23 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re // O(m+n) functions case overloads.AddString, overloads.AddBytes: // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over. -<<<<<<< HEAD cost += uint64(math.Ceil(float64(actualSize(args[0])+actualSize(args[1])) * common.StringTraversalCostFactor)) -======= - cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // O(nm) functions case overloads.MatchesString: // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 // in case where string is empty but regex is still expensive. -<<<<<<< HEAD strCost := uint64(math.Ceil((1.0 + float64(actualSize(args[0]))) * common.StringTraversalCostFactor)) -======= - strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // We don't know how many expressions are in the regex, just the string length (a huge // improvement here would be to somehow get a count the number of expressions in the regex or // how many states are in the regex state machine and use that to measure regex cost). // For now, we're making a guess that each expression in a regex is typically at least 4 chars // in length. -<<<<<<< HEAD regexCost := uint64(math.Ceil(float64(actualSize(args[1])) * common.RegexStringLengthCostFactor)) cost += strCost * regexCost case overloads.ContainsString: strCost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) substrCost := uint64(math.Ceil(float64(actualSize(args[1])) * common.StringTraversalCostFactor)) -======= - regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor)) - cost += strCost * regexCost - case overloads.ContainsString: - strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) - substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cost += strCost * substrCost default: @@ -284,7 +253,6 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re return cost } -<<<<<<< HEAD // actualSize returns the size of the value for all traits.Sizer values, a fixed size for all proto-based // objects, and a size of 1 for all other value types. func actualSize(value ref.Val) uint64 { @@ -294,13 +262,6 @@ func actualSize(value ref.Val) uint64 { if opt, ok := value.(*types.Optional); ok && opt.HasValue() { return actualSize(opt.GetValue()) } -======= -// actualSize returns the size of value -func (c *CostTracker) actualSize(value ref.Val) uint64 { - if sz, ok := value.(traits.Sizer); ok { - return uint64(sz.Size().(types.Int)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return 1 } diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go index 78c39701ca..c3cec01a8d 100644 --- a/vendor/github.com/google/cel-go/parser/errors.go +++ b/vendor/github.com/google/cel-go/parser/errors.go @@ -15,11 +15,6 @@ package parser import ( -<<<<<<< HEAD -======= - "fmt" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/cel-go/common" ) @@ -34,19 +29,11 @@ func (e *parseErrors) errorCount() int { } func (e *parseErrors) internalError(message string) { -<<<<<<< HEAD e.errs.ReportErrorAtID(0, common.NoLocation, "%s", message) } func (e *parseErrors) syntaxError(l common.Location, message string) { e.errs.ReportErrorAtID(0, l, "Syntax error: %s", message) -======= - e.errs.ReportErrorAtID(0, common.NoLocation, message) -} - -func (e *parseErrors) syntaxError(l common.Location, message string) { - e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) { diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel index 21dc947902..3efed87b70 100644 --- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -1,11 +1,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") package( -<<<<<<< HEAD default_visibility = ["//:__subpackages__"], -======= - default_visibility = ["//parser:__subpackages__"], ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) licenses = ["notice"], # Apache 2.0 ) diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 index 66f41b4096..ee53a844bd 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 @@ -52,22 +52,14 @@ unary member : primary # PrimaryExpr -<<<<<<< HEAD | member op='.' (opt='?')? id=escapeIdent # Select -======= - | member op='.' (opt='?')? id=IDENTIFIER # Select ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall | member op='[' (opt='?')? index=expr ']' # Index ; primary -<<<<<<< HEAD : leadingDot='.'? id=IDENTIFIER # Ident | leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')') # GlobalCall -======= - : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) | '(' e=expr ')' # Nested | op='[' elems=listInit? ','? ']' # CreateList | op='{' entries=mapInitializerList? ','? '}' # CreateStruct @@ -89,25 +81,18 @@ fieldInitializerList ; optField -<<<<<<< HEAD : (opt='?')? escapeIdent -======= - : (opt='?')? IDENTIFIER ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ; mapInitializerList : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)* ; -<<<<<<< HEAD escapeIdent : id=IDENTIFIER # SimpleIdentifier | id=ESC_IDENTIFIER # EscapedIdentifier ; -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) optExpr : (opt='?')? e=expr ; @@ -219,7 +204,4 @@ STRING BYTES : ('b' | 'B') STRING; IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*; -<<<<<<< HEAD -ESC_IDENTIFIER : '`' (LETTER | DIGIT | '_' | '.' | '-' | '/' | ' ')+ '`'; -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +ESC_IDENTIFIER : '`' (LETTER | DIGIT | '_' | '.' | '-' | '/' | ' ')+ '`'; \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/vendor/github.com/google/cel-go/parser/gen/CEL.interp index d978c626bd..e085bab574 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.interp +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.interp @@ -36,10 +36,7 @@ null null null null -<<<<<<< HEAD null -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) token symbolic names: null @@ -79,10 +76,7 @@ NUM_UINT STRING BYTES IDENTIFIER -<<<<<<< HEAD ESC_IDENTIFIER -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rule names: start @@ -99,17 +93,10 @@ listInit fieldInitializerList optField mapInitializerList -<<<<<<< HEAD escapeIdent -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) optExpr literal atn: -<<<<<<< HEAD -[4, 1, 37, 259, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 44, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 49, 8, 2, 10, 2, 12, 2, 52, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 57, 8, 3, 10, 3, 12, 3, 60, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 68, 8, 4, 10, 4, 12, 4, 71, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 82, 8, 5, 10, 5, 12, 5, 85, 9, 5, 1, 6, 1, 6, 4, 6, 89, 8, 6, 11, 6, 12, 6, 90, 1, 6, 1, 6, 4, 6, 95, 8, 6, 11, 6, 12, 6, 96, 1, 6, 3, 6, 100, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 108, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 116, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 122, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 127, 8, 7, 10, 7, 12, 7, 130, 9, 7, 1, 8, 3, 8, 133, 8, 8, 1, 8, 1, 8, 3, 8, 137, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 142, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 159, 8, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 3, 8, 166, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 171, 8, 8, 10, 8, 12, 8, 174, 9, 8, 1, 8, 1, 8, 3, 8, 178, 8, 8, 1, 8, 3, 8, 181, 8, 8, 1, 8, 1, 8, 3, 8, 185, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 190, 8, 9, 10, 9, 12, 9, 193, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 198, 8, 10, 10, 10, 12, 10, 201, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 211, 8, 11, 10, 11, 12, 11, 214, 9, 11, 1, 12, 3, 12, 217, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 229, 8, 13, 10, 13, 12, 13, 232, 9, 13, 1, 14, 1, 14, 3, 14, 236, 8, 14, 1, 15, 3, 15, 239, 8, 15, 1, 15, 1, 15, 1, 16, 3, 16, 244, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 249, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 257, 8, 16, 1, 16, 0, 3, 8, 10, 14, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 290, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 45, 1, 0, 0, 0, 6, 53, 1, 0, 0, 0, 8, 61, 1, 0, 0, 0, 10, 72, 1, 0, 0, 0, 12, 99, 1, 0, 0, 0, 14, 101, 1, 0, 0, 0, 16, 184, 1, 0, 0, 0, 18, 186, 1, 0, 0, 0, 20, 194, 1, 0, 0, 0, 22, 202, 1, 0, 0, 0, 24, 216, 1, 0, 0, 0, 26, 220, 1, 0, 0, 0, 28, 235, 1, 0, 0, 0, 30, 238, 1, 0, 0, 0, 32, 256, 1, 0, 0, 0, 34, 35, 3, 2, 1, 0, 35, 36, 5, 0, 0, 1, 36, 1, 1, 0, 0, 0, 37, 43, 3, 4, 2, 0, 38, 39, 5, 20, 0, 0, 39, 40, 3, 4, 2, 0, 40, 41, 5, 21, 0, 0, 41, 42, 3, 2, 1, 0, 42, 44, 1, 0, 0, 0, 43, 38, 1, 0, 0, 0, 43, 44, 1, 0, 0, 0, 44, 3, 1, 0, 0, 0, 45, 50, 3, 6, 3, 0, 46, 47, 5, 9, 0, 0, 47, 49, 3, 6, 3, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 5, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 58, 3, 8, 4, 0, 54, 55, 5, 8, 0, 0, 55, 57, 3, 8, 4, 0, 56, 54, 1, 0, 0, 0, 57, 60, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 58, 59, 1, 0, 0, 0, 59, 7, 1, 0, 0, 0, 60, 58, 1, 0, 0, 0, 61, 62, 6, 4, -1, 0, 62, 63, 3, 10, 5, 0, 63, 69, 1, 0, 0, 0, 64, 65, 10, 1, 0, 0, 65, 66, 7, 0, 0, 0, 66, 68, 3, 8, 4, 2, 67, 64, 1, 0, 0, 0, 68, 71, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 69, 70, 1, 0, 0, 0, 70, 9, 1, 0, 0, 0, 71, 69, 1, 0, 0, 0, 72, 73, 6, 5, -1, 0, 73, 74, 3, 12, 6, 0, 74, 83, 1, 0, 0, 0, 75, 76, 10, 2, 0, 0, 76, 77, 7, 1, 0, 0, 77, 82, 3, 10, 5, 3, 78, 79, 10, 1, 0, 0, 79, 80, 7, 2, 0, 0, 80, 82, 3, 10, 5, 2, 81, 75, 1, 0, 0, 0, 81, 78, 1, 0, 0, 0, 82, 85, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 11, 1, 0, 0, 0, 85, 83, 1, 0, 0, 0, 86, 100, 3, 14, 7, 0, 87, 89, 5, 19, 0, 0, 88, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 90, 91, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 100, 3, 14, 7, 0, 93, 95, 5, 18, 0, 0, 94, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 100, 3, 14, 7, 0, 99, 86, 1, 0, 0, 0, 99, 88, 1, 0, 0, 0, 99, 94, 1, 0, 0, 0, 100, 13, 1, 0, 0, 0, 101, 102, 6, 7, -1, 0, 102, 103, 3, 16, 8, 0, 103, 128, 1, 0, 0, 0, 104, 105, 10, 3, 0, 0, 105, 107, 5, 16, 0, 0, 106, 108, 5, 20, 0, 0, 107, 106, 1, 0, 0, 0, 107, 108, 1, 0, 0, 0, 108, 109, 1, 0, 0, 0, 109, 127, 3, 28, 14, 0, 110, 111, 10, 2, 0, 0, 111, 112, 5, 16, 0, 0, 112, 113, 5, 36, 0, 0, 113, 115, 5, 14, 0, 0, 114, 116, 3, 18, 9, 0, 115, 114, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 127, 5, 15, 0, 0, 118, 119, 10, 1, 0, 0, 119, 121, 5, 10, 0, 0, 120, 122, 5, 20, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 124, 3, 2, 1, 0, 124, 125, 5, 11, 0, 0, 125, 127, 1, 0, 0, 0, 126, 104, 1, 0, 0, 0, 126, 110, 1, 0, 0, 0, 126, 118, 1, 0, 0, 0, 127, 130, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 15, 1, 0, 0, 0, 130, 128, 1, 0, 0, 0, 131, 133, 5, 16, 0, 0, 132, 131, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 185, 5, 36, 0, 0, 135, 137, 5, 16, 0, 0, 136, 135, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 139, 5, 36, 0, 0, 139, 141, 5, 14, 0, 0, 140, 142, 3, 18, 9, 0, 141, 140, 1, 0, 0, 0, 141, 142, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 185, 5, 15, 0, 0, 144, 145, 5, 14, 0, 0, 145, 146, 3, 2, 1, 0, 146, 147, 5, 15, 0, 0, 147, 185, 1, 0, 0, 0, 148, 150, 5, 10, 0, 0, 149, 151, 3, 20, 10, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 185, 5, 11, 0, 0, 156, 158, 5, 12, 0, 0, 157, 159, 3, 26, 13, 0, 158, 157, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 161, 1, 0, 0, 0, 160, 162, 5, 17, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 185, 5, 13, 0, 0, 164, 166, 5, 16, 0, 0, 165, 164, 1, 0, 0, 0, 165, 166, 1, 0, 0, 0, 166, 167, 1, 0, 0, 0, 167, 172, 5, 36, 0, 0, 168, 169, 5, 16, 0, 0, 169, 171, 5, 36, 0, 0, 170, 168, 1, 0, 0, 0, 171, 174, 1, 0, 0, 0, 172, 170, 1, 0, 0, 0, 172, 173, 1, 0, 0, 0, 173, 175, 1, 0, 0, 0, 174, 172, 1, 0, 0, 0, 175, 177, 5, 12, 0, 0, 176, 178, 3, 22, 11, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 180, 1, 0, 0, 0, 179, 181, 5, 17, 0, 0, 180, 179, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 185, 5, 13, 0, 0, 183, 185, 3, 32, 16, 0, 184, 132, 1, 0, 0, 0, 184, 136, 1, 0, 0, 0, 184, 144, 1, 0, 0, 0, 184, 148, 1, 0, 0, 0, 184, 156, 1, 0, 0, 0, 184, 165, 1, 0, 0, 0, 184, 183, 1, 0, 0, 0, 185, 17, 1, 0, 0, 0, 186, 191, 3, 2, 1, 0, 187, 188, 5, 17, 0, 0, 188, 190, 3, 2, 1, 0, 189, 187, 1, 0, 0, 0, 190, 193, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 192, 1, 0, 0, 0, 192, 19, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 194, 199, 3, 30, 15, 0, 195, 196, 5, 17, 0, 0, 196, 198, 3, 30, 15, 0, 197, 195, 1, 0, 0, 0, 198, 201, 1, 0, 0, 0, 199, 197, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 21, 1, 0, 0, 0, 201, 199, 1, 0, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 212, 3, 2, 1, 0, 205, 206, 5, 17, 0, 0, 206, 207, 3, 24, 12, 0, 207, 208, 5, 21, 0, 0, 208, 209, 3, 2, 1, 0, 209, 211, 1, 0, 0, 0, 210, 205, 1, 0, 0, 0, 211, 214, 1, 0, 0, 0, 212, 210, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 23, 1, 0, 0, 0, 214, 212, 1, 0, 0, 0, 215, 217, 5, 20, 0, 0, 216, 215, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 218, 1, 0, 0, 0, 218, 219, 3, 28, 14, 0, 219, 25, 1, 0, 0, 0, 220, 221, 3, 30, 15, 0, 221, 222, 5, 21, 0, 0, 222, 230, 3, 2, 1, 0, 223, 224, 5, 17, 0, 0, 224, 225, 3, 30, 15, 0, 225, 226, 5, 21, 0, 0, 226, 227, 3, 2, 1, 0, 227, 229, 1, 0, 0, 0, 228, 223, 1, 0, 0, 0, 229, 232, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 27, 1, 0, 0, 0, 232, 230, 1, 0, 0, 0, 233, 236, 5, 36, 0, 0, 234, 236, 5, 37, 0, 0, 235, 233, 1, 0, 0, 0, 235, 234, 1, 0, 0, 0, 236, 29, 1, 0, 0, 0, 237, 239, 5, 20, 0, 0, 238, 237, 1, 0, 0, 0, 238, 239, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 241, 3, 2, 1, 0, 241, 31, 1, 0, 0, 0, 242, 244, 5, 18, 0, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 257, 5, 32, 0, 0, 246, 257, 5, 33, 0, 0, 247, 249, 5, 18, 0, 0, 248, 247, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 257, 5, 31, 0, 0, 251, 257, 5, 34, 0, 0, 252, 257, 5, 35, 0, 0, 253, 257, 5, 26, 0, 0, 254, 257, 5, 27, 0, 0, 255, 257, 5, 28, 0, 0, 256, 243, 1, 0, 0, 0, 256, 246, 1, 0, 0, 0, 256, 248, 1, 0, 0, 0, 256, 251, 1, 0, 0, 0, 256, 252, 1, 0, 0, 0, 256, 253, 1, 0, 0, 0, 256, 254, 1, 0, 0, 0, 256, 255, 1, 0, 0, 0, 257, 33, 1, 0, 0, 0, 36, 43, 50, 58, 69, 81, 83, 90, 96, 99, 107, 115, 121, 126, 128, 132, 136, 141, 150, 153, 158, 161, 165, 172, 177, 180, 184, 191, 199, 212, 216, 230, 235, 238, 243, 248, 256] -======= -[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +[4, 1, 37, 259, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 44, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 49, 8, 2, 10, 2, 12, 2, 52, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 57, 8, 3, 10, 3, 12, 3, 60, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 68, 8, 4, 10, 4, 12, 4, 71, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 82, 8, 5, 10, 5, 12, 5, 85, 9, 5, 1, 6, 1, 6, 4, 6, 89, 8, 6, 11, 6, 12, 6, 90, 1, 6, 1, 6, 4, 6, 95, 8, 6, 11, 6, 12, 6, 96, 1, 6, 3, 6, 100, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 108, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 116, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 122, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 127, 8, 7, 10, 7, 12, 7, 130, 9, 7, 1, 8, 3, 8, 133, 8, 8, 1, 8, 1, 8, 3, 8, 137, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 142, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 159, 8, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 3, 8, 166, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 171, 8, 8, 10, 8, 12, 8, 174, 9, 8, 1, 8, 1, 8, 3, 8, 178, 8, 8, 1, 8, 3, 8, 181, 8, 8, 1, 8, 1, 8, 3, 8, 185, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 190, 8, 9, 10, 9, 12, 9, 193, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 198, 8, 10, 10, 10, 12, 10, 201, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 211, 8, 11, 10, 11, 12, 11, 214, 9, 11, 1, 12, 3, 12, 217, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 229, 8, 13, 10, 13, 12, 13, 232, 9, 13, 1, 14, 1, 14, 3, 14, 236, 8, 14, 1, 15, 3, 15, 239, 8, 15, 1, 15, 1, 15, 1, 16, 3, 16, 244, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 249, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 257, 8, 16, 1, 16, 0, 3, 8, 10, 14, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 290, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 45, 1, 0, 0, 0, 6, 53, 1, 0, 0, 0, 8, 61, 1, 0, 0, 0, 10, 72, 1, 0, 0, 0, 12, 99, 1, 0, 0, 0, 14, 101, 1, 0, 0, 0, 16, 184, 1, 0, 0, 0, 18, 186, 1, 0, 0, 0, 20, 194, 1, 0, 0, 0, 22, 202, 1, 0, 0, 0, 24, 216, 1, 0, 0, 0, 26, 220, 1, 0, 0, 0, 28, 235, 1, 0, 0, 0, 30, 238, 1, 0, 0, 0, 32, 256, 1, 0, 0, 0, 34, 35, 3, 2, 1, 0, 35, 36, 5, 0, 0, 1, 36, 1, 1, 0, 0, 0, 37, 43, 3, 4, 2, 0, 38, 39, 5, 20, 0, 0, 39, 40, 3, 4, 2, 0, 40, 41, 5, 21, 0, 0, 41, 42, 3, 2, 1, 0, 42, 44, 1, 0, 0, 0, 43, 38, 1, 0, 0, 0, 43, 44, 1, 0, 0, 0, 44, 3, 1, 0, 0, 0, 45, 50, 3, 6, 3, 0, 46, 47, 5, 9, 0, 0, 47, 49, 3, 6, 3, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 5, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 58, 3, 8, 4, 0, 54, 55, 5, 8, 0, 0, 55, 57, 3, 8, 4, 0, 56, 54, 1, 0, 0, 0, 57, 60, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 58, 59, 1, 0, 0, 0, 59, 7, 1, 0, 0, 0, 60, 58, 1, 0, 0, 0, 61, 62, 6, 4, -1, 0, 62, 63, 3, 10, 5, 0, 63, 69, 1, 0, 0, 0, 64, 65, 10, 1, 0, 0, 65, 66, 7, 0, 0, 0, 66, 68, 3, 8, 4, 2, 67, 64, 1, 0, 0, 0, 68, 71, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 69, 70, 1, 0, 0, 0, 70, 9, 1, 0, 0, 0, 71, 69, 1, 0, 0, 0, 72, 73, 6, 5, -1, 0, 73, 74, 3, 12, 6, 0, 74, 83, 1, 0, 0, 0, 75, 76, 10, 2, 0, 0, 76, 77, 7, 1, 0, 0, 77, 82, 3, 10, 5, 3, 78, 79, 10, 1, 0, 0, 79, 80, 7, 2, 0, 0, 80, 82, 3, 10, 5, 2, 81, 75, 1, 0, 0, 0, 81, 78, 1, 0, 0, 0, 82, 85, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 11, 1, 0, 0, 0, 85, 83, 1, 0, 0, 0, 86, 100, 3, 14, 7, 0, 87, 89, 5, 19, 0, 0, 88, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 90, 91, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 100, 3, 14, 7, 0, 93, 95, 5, 18, 0, 0, 94, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 100, 3, 14, 7, 0, 99, 86, 1, 0, 0, 0, 99, 88, 1, 0, 0, 0, 99, 94, 1, 0, 0, 0, 100, 13, 1, 0, 0, 0, 101, 102, 6, 7, -1, 0, 102, 103, 3, 16, 8, 0, 103, 128, 1, 0, 0, 0, 104, 105, 10, 3, 0, 0, 105, 107, 5, 16, 0, 0, 106, 108, 5, 20, 0, 0, 107, 106, 1, 0, 0, 0, 107, 108, 1, 0, 0, 0, 108, 109, 1, 0, 0, 0, 109, 127, 3, 28, 14, 0, 110, 111, 10, 2, 0, 0, 111, 112, 5, 16, 0, 0, 112, 113, 5, 36, 0, 0, 113, 115, 5, 14, 0, 0, 114, 116, 3, 18, 9, 0, 115, 114, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 127, 5, 15, 0, 0, 118, 119, 10, 1, 0, 0, 119, 121, 5, 10, 0, 0, 120, 122, 5, 20, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 124, 3, 2, 1, 0, 124, 125, 5, 11, 0, 0, 125, 127, 1, 0, 0, 0, 126, 104, 1, 0, 0, 0, 126, 110, 1, 0, 0, 0, 126, 118, 1, 0, 0, 0, 127, 130, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 15, 1, 0, 0, 0, 130, 128, 1, 0, 0, 0, 131, 133, 5, 16, 0, 0, 132, 131, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 185, 5, 36, 0, 0, 135, 137, 5, 16, 0, 0, 136, 135, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 138, 1, 0, 0, 0, 138, 139, 5, 36, 0, 0, 139, 141, 5, 14, 0, 0, 140, 142, 3, 18, 9, 0, 141, 140, 1, 0, 0, 0, 141, 142, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 185, 5, 15, 0, 0, 144, 145, 5, 14, 0, 0, 145, 146, 3, 2, 1, 0, 146, 147, 5, 15, 0, 0, 147, 185, 1, 0, 0, 0, 148, 150, 5, 10, 0, 0, 149, 151, 3, 20, 10, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 185, 5, 11, 0, 0, 156, 158, 5, 12, 0, 0, 157, 159, 3, 26, 13, 0, 158, 157, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 161, 1, 0, 0, 0, 160, 162, 5, 17, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 185, 5, 13, 0, 0, 164, 166, 5, 16, 0, 0, 165, 164, 1, 0, 0, 0, 165, 166, 1, 0, 0, 0, 166, 167, 1, 0, 0, 0, 167, 172, 5, 36, 0, 0, 168, 169, 5, 16, 0, 0, 169, 171, 5, 36, 0, 0, 170, 168, 1, 0, 0, 0, 171, 174, 1, 0, 0, 0, 172, 170, 1, 0, 0, 0, 172, 173, 1, 0, 0, 0, 173, 175, 1, 0, 0, 0, 174, 172, 1, 0, 0, 0, 175, 177, 5, 12, 0, 0, 176, 178, 3, 22, 11, 0, 177, 176, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 180, 1, 0, 0, 0, 179, 181, 5, 17, 0, 0, 180, 179, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 185, 5, 13, 0, 0, 183, 185, 3, 32, 16, 0, 184, 132, 1, 0, 0, 0, 184, 136, 1, 0, 0, 0, 184, 144, 1, 0, 0, 0, 184, 148, 1, 0, 0, 0, 184, 156, 1, 0, 0, 0, 184, 165, 1, 0, 0, 0, 184, 183, 1, 0, 0, 0, 185, 17, 1, 0, 0, 0, 186, 191, 3, 2, 1, 0, 187, 188, 5, 17, 0, 0, 188, 190, 3, 2, 1, 0, 189, 187, 1, 0, 0, 0, 190, 193, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 192, 1, 0, 0, 0, 192, 19, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 194, 199, 3, 30, 15, 0, 195, 196, 5, 17, 0, 0, 196, 198, 3, 30, 15, 0, 197, 195, 1, 0, 0, 0, 198, 201, 1, 0, 0, 0, 199, 197, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 21, 1, 0, 0, 0, 201, 199, 1, 0, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 212, 3, 2, 1, 0, 205, 206, 5, 17, 0, 0, 206, 207, 3, 24, 12, 0, 207, 208, 5, 21, 0, 0, 208, 209, 3, 2, 1, 0, 209, 211, 1, 0, 0, 0, 210, 205, 1, 0, 0, 0, 211, 214, 1, 0, 0, 0, 212, 210, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 23, 1, 0, 0, 0, 214, 212, 1, 0, 0, 0, 215, 217, 5, 20, 0, 0, 216, 215, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 218, 1, 0, 0, 0, 218, 219, 3, 28, 14, 0, 219, 25, 1, 0, 0, 0, 220, 221, 3, 30, 15, 0, 221, 222, 5, 21, 0, 0, 222, 230, 3, 2, 1, 0, 223, 224, 5, 17, 0, 0, 224, 225, 3, 30, 15, 0, 225, 226, 5, 21, 0, 0, 226, 227, 3, 2, 1, 0, 227, 229, 1, 0, 0, 0, 228, 223, 1, 0, 0, 0, 229, 232, 1, 0, 0, 0, 230, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 27, 1, 0, 0, 0, 232, 230, 1, 0, 0, 0, 233, 236, 5, 36, 0, 0, 234, 236, 5, 37, 0, 0, 235, 233, 1, 0, 0, 0, 235, 234, 1, 0, 0, 0, 236, 29, 1, 0, 0, 0, 237, 239, 5, 20, 0, 0, 238, 237, 1, 0, 0, 0, 238, 239, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 241, 3, 2, 1, 0, 241, 31, 1, 0, 0, 0, 242, 244, 5, 18, 0, 0, 243, 242, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 257, 5, 32, 0, 0, 246, 257, 5, 33, 0, 0, 247, 249, 5, 18, 0, 0, 248, 247, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 257, 5, 31, 0, 0, 251, 257, 5, 34, 0, 0, 252, 257, 5, 35, 0, 0, 253, 257, 5, 26, 0, 0, 254, 257, 5, 27, 0, 0, 255, 257, 5, 28, 0, 0, 256, 243, 1, 0, 0, 0, 256, 246, 1, 0, 0, 0, 256, 248, 1, 0, 0, 0, 256, 251, 1, 0, 0, 0, 256, 252, 1, 0, 0, 0, 256, 253, 1, 0, 0, 0, 256, 254, 1, 0, 0, 0, 256, 255, 1, 0, 0, 0, 257, 33, 1, 0, 0, 0, 36, 43, 50, 58, 69, 81, 83, 90, 96, 99, 107, 115, 121, 126, 128, 132, 136, 141, 150, 153, 158, 161, 165, 172, 177, 180, 184, 191, 199, 212, 216, 230, 235, 238, 243, 248, 256] \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens index b4fbf00e51..aa1f5eee6f 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens @@ -34,10 +34,7 @@ NUM_UINT=33 STRING=34 BYTES=35 IDENTIFIER=36 -<<<<<<< HEAD ESC_IDENTIFIER=37 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) '=='=1 '!='=2 'in'=3 diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp index a7a6f50efd..162d52188c 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp +++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp @@ -36,10 +36,7 @@ null null null null -<<<<<<< HEAD null -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) token symbolic names: null @@ -79,10 +76,7 @@ NUM_UINT STRING BYTES IDENTIFIER -<<<<<<< HEAD ESC_IDENTIFIER -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rule names: EQUALS @@ -132,10 +126,7 @@ NUM_UINT STRING BYTES IDENTIFIER -<<<<<<< HEAD ESC_IDENTIFIER -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channel names: DEFAULT_TOKEN_CHANNEL @@ -145,8 +136,4 @@ mode names: DEFAULT_MODE atn: -<<<<<<< HEAD -[4, 0, 37, 435, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 179, 8, 31, 1, 31, 4, 31, 182, 8, 31, 11, 31, 12, 31, 183, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 194, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 227, 8, 38, 1, 39, 4, 39, 230, 8, 39, 11, 39, 12, 39, 231, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 240, 8, 40, 10, 40, 12, 40, 243, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 248, 8, 41, 11, 41, 12, 41, 249, 1, 41, 1, 41, 4, 41, 254, 8, 41, 11, 41, 12, 41, 255, 1, 41, 3, 41, 259, 8, 41, 1, 41, 4, 41, 262, 8, 41, 11, 41, 12, 41, 263, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 270, 8, 41, 11, 41, 12, 41, 271, 1, 41, 3, 41, 275, 8, 41, 3, 41, 277, 8, 41, 1, 42, 4, 42, 280, 8, 42, 11, 42, 12, 42, 281, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 288, 8, 42, 11, 42, 12, 42, 289, 3, 42, 292, 8, 42, 1, 43, 4, 43, 295, 8, 43, 11, 43, 12, 43, 296, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 305, 8, 43, 11, 43, 12, 43, 306, 1, 43, 1, 43, 3, 43, 311, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 316, 8, 44, 10, 44, 12, 44, 319, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 325, 8, 44, 10, 44, 12, 44, 328, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 337, 8, 44, 10, 44, 12, 44, 340, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 351, 8, 44, 10, 44, 12, 44, 354, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 362, 8, 44, 10, 44, 12, 44, 365, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 372, 8, 44, 10, 44, 12, 44, 375, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 385, 8, 44, 10, 44, 12, 44, 388, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 400, 8, 44, 10, 44, 12, 44, 403, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 409, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 416, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 421, 8, 46, 10, 46, 12, 46, 424, 9, 46, 1, 47, 1, 47, 1, 47, 1, 47, 4, 47, 430, 8, 47, 11, 47, 12, 47, 431, 1, 47, 1, 47, 4, 338, 352, 386, 401, 0, 48, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 95, 37, 1, 0, 17, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 3, 0, 32, 32, 45, 47, 95, 95, 471, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 1, 97, 1, 0, 0, 0, 3, 100, 1, 0, 0, 0, 5, 103, 1, 0, 0, 0, 7, 106, 1, 0, 0, 0, 9, 108, 1, 0, 0, 0, 11, 111, 1, 0, 0, 0, 13, 114, 1, 0, 0, 0, 15, 116, 1, 0, 0, 0, 17, 119, 1, 0, 0, 0, 19, 122, 1, 0, 0, 0, 21, 124, 1, 0, 0, 0, 23, 126, 1, 0, 0, 0, 25, 128, 1, 0, 0, 0, 27, 130, 1, 0, 0, 0, 29, 132, 1, 0, 0, 0, 31, 134, 1, 0, 0, 0, 33, 136, 1, 0, 0, 0, 35, 138, 1, 0, 0, 0, 37, 140, 1, 0, 0, 0, 39, 142, 1, 0, 0, 0, 41, 144, 1, 0, 0, 0, 43, 146, 1, 0, 0, 0, 45, 148, 1, 0, 0, 0, 47, 150, 1, 0, 0, 0, 49, 152, 1, 0, 0, 0, 51, 154, 1, 0, 0, 0, 53, 159, 1, 0, 0, 0, 55, 165, 1, 0, 0, 0, 57, 170, 1, 0, 0, 0, 59, 172, 1, 0, 0, 0, 61, 174, 1, 0, 0, 0, 63, 176, 1, 0, 0, 0, 65, 185, 1, 0, 0, 0, 67, 187, 1, 0, 0, 0, 69, 193, 1, 0, 0, 0, 71, 195, 1, 0, 0, 0, 73, 198, 1, 0, 0, 0, 75, 203, 1, 0, 0, 0, 77, 226, 1, 0, 0, 0, 79, 229, 1, 0, 0, 0, 81, 235, 1, 0, 0, 0, 83, 276, 1, 0, 0, 0, 85, 291, 1, 0, 0, 0, 87, 310, 1, 0, 0, 0, 89, 408, 1, 0, 0, 0, 91, 410, 1, 0, 0, 0, 93, 415, 1, 0, 0, 0, 95, 425, 1, 0, 0, 0, 97, 98, 5, 61, 0, 0, 98, 99, 5, 61, 0, 0, 99, 2, 1, 0, 0, 0, 100, 101, 5, 33, 0, 0, 101, 102, 5, 61, 0, 0, 102, 4, 1, 0, 0, 0, 103, 104, 5, 105, 0, 0, 104, 105, 5, 110, 0, 0, 105, 6, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 8, 1, 0, 0, 0, 108, 109, 5, 60, 0, 0, 109, 110, 5, 61, 0, 0, 110, 10, 1, 0, 0, 0, 111, 112, 5, 62, 0, 0, 112, 113, 5, 61, 0, 0, 113, 12, 1, 0, 0, 0, 114, 115, 5, 62, 0, 0, 115, 14, 1, 0, 0, 0, 116, 117, 5, 38, 0, 0, 117, 118, 5, 38, 0, 0, 118, 16, 1, 0, 0, 0, 119, 120, 5, 124, 0, 0, 120, 121, 5, 124, 0, 0, 121, 18, 1, 0, 0, 0, 122, 123, 5, 91, 0, 0, 123, 20, 1, 0, 0, 0, 124, 125, 5, 93, 0, 0, 125, 22, 1, 0, 0, 0, 126, 127, 5, 123, 0, 0, 127, 24, 1, 0, 0, 0, 128, 129, 5, 125, 0, 0, 129, 26, 1, 0, 0, 0, 130, 131, 5, 40, 0, 0, 131, 28, 1, 0, 0, 0, 132, 133, 5, 41, 0, 0, 133, 30, 1, 0, 0, 0, 134, 135, 5, 46, 0, 0, 135, 32, 1, 0, 0, 0, 136, 137, 5, 44, 0, 0, 137, 34, 1, 0, 0, 0, 138, 139, 5, 45, 0, 0, 139, 36, 1, 0, 0, 0, 140, 141, 5, 33, 0, 0, 141, 38, 1, 0, 0, 0, 142, 143, 5, 63, 0, 0, 143, 40, 1, 0, 0, 0, 144, 145, 5, 58, 0, 0, 145, 42, 1, 0, 0, 0, 146, 147, 5, 43, 0, 0, 147, 44, 1, 0, 0, 0, 148, 149, 5, 42, 0, 0, 149, 46, 1, 0, 0, 0, 150, 151, 5, 47, 0, 0, 151, 48, 1, 0, 0, 0, 152, 153, 5, 37, 0, 0, 153, 50, 1, 0, 0, 0, 154, 155, 5, 116, 0, 0, 155, 156, 5, 114, 0, 0, 156, 157, 5, 117, 0, 0, 157, 158, 5, 101, 0, 0, 158, 52, 1, 0, 0, 0, 159, 160, 5, 102, 0, 0, 160, 161, 5, 97, 0, 0, 161, 162, 5, 108, 0, 0, 162, 163, 5, 115, 0, 0, 163, 164, 5, 101, 0, 0, 164, 54, 1, 0, 0, 0, 165, 166, 5, 110, 0, 0, 166, 167, 5, 117, 0, 0, 167, 168, 5, 108, 0, 0, 168, 169, 5, 108, 0, 0, 169, 56, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 58, 1, 0, 0, 0, 172, 173, 7, 0, 0, 0, 173, 60, 1, 0, 0, 0, 174, 175, 2, 48, 57, 0, 175, 62, 1, 0, 0, 0, 176, 178, 7, 1, 0, 0, 177, 179, 7, 2, 0, 0, 178, 177, 1, 0, 0, 0, 178, 179, 1, 0, 0, 0, 179, 181, 1, 0, 0, 0, 180, 182, 3, 61, 30, 0, 181, 180, 1, 0, 0, 0, 182, 183, 1, 0, 0, 0, 183, 181, 1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184, 64, 1, 0, 0, 0, 185, 186, 7, 3, 0, 0, 186, 66, 1, 0, 0, 0, 187, 188, 7, 4, 0, 0, 188, 68, 1, 0, 0, 0, 189, 194, 3, 71, 35, 0, 190, 194, 3, 75, 37, 0, 191, 194, 3, 77, 38, 0, 192, 194, 3, 73, 36, 0, 193, 189, 1, 0, 0, 0, 193, 190, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 193, 192, 1, 0, 0, 0, 194, 70, 1, 0, 0, 0, 195, 196, 3, 57, 28, 0, 196, 197, 7, 5, 0, 0, 197, 72, 1, 0, 0, 0, 198, 199, 3, 57, 28, 0, 199, 200, 2, 48, 51, 0, 200, 201, 2, 48, 55, 0, 201, 202, 2, 48, 55, 0, 202, 74, 1, 0, 0, 0, 203, 204, 3, 57, 28, 0, 204, 205, 7, 6, 0, 0, 205, 206, 3, 65, 32, 0, 206, 207, 3, 65, 32, 0, 207, 76, 1, 0, 0, 0, 208, 209, 3, 57, 28, 0, 209, 210, 5, 117, 0, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 213, 3, 65, 32, 0, 213, 214, 3, 65, 32, 0, 214, 227, 1, 0, 0, 0, 215, 216, 3, 57, 28, 0, 216, 217, 5, 85, 0, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 224, 3, 65, 32, 0, 224, 225, 3, 65, 32, 0, 225, 227, 1, 0, 0, 0, 226, 208, 1, 0, 0, 0, 226, 215, 1, 0, 0, 0, 227, 78, 1, 0, 0, 0, 228, 230, 7, 7, 0, 0, 229, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 234, 6, 39, 0, 0, 234, 80, 1, 0, 0, 0, 235, 236, 5, 47, 0, 0, 236, 237, 5, 47, 0, 0, 237, 241, 1, 0, 0, 0, 238, 240, 8, 8, 0, 0, 239, 238, 1, 0, 0, 0, 240, 243, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 244, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 244, 245, 6, 40, 0, 0, 245, 82, 1, 0, 0, 0, 246, 248, 3, 61, 30, 0, 247, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, 253, 5, 46, 0, 0, 252, 254, 3, 61, 30, 0, 253, 252, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 258, 1, 0, 0, 0, 257, 259, 3, 63, 31, 0, 258, 257, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 277, 1, 0, 0, 0, 260, 262, 3, 61, 30, 0, 261, 260, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 265, 1, 0, 0, 0, 265, 266, 3, 63, 31, 0, 266, 277, 1, 0, 0, 0, 267, 269, 5, 46, 0, 0, 268, 270, 3, 61, 30, 0, 269, 268, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 269, 1, 0, 0, 0, 271, 272, 1, 0, 0, 0, 272, 274, 1, 0, 0, 0, 273, 275, 3, 63, 31, 0, 274, 273, 1, 0, 0, 0, 274, 275, 1, 0, 0, 0, 275, 277, 1, 0, 0, 0, 276, 247, 1, 0, 0, 0, 276, 261, 1, 0, 0, 0, 276, 267, 1, 0, 0, 0, 277, 84, 1, 0, 0, 0, 278, 280, 3, 61, 30, 0, 279, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 279, 1, 0, 0, 0, 281, 282, 1, 0, 0, 0, 282, 292, 1, 0, 0, 0, 283, 284, 5, 48, 0, 0, 284, 285, 5, 120, 0, 0, 285, 287, 1, 0, 0, 0, 286, 288, 3, 65, 32, 0, 287, 286, 1, 0, 0, 0, 288, 289, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289, 290, 1, 0, 0, 0, 290, 292, 1, 0, 0, 0, 291, 279, 1, 0, 0, 0, 291, 283, 1, 0, 0, 0, 292, 86, 1, 0, 0, 0, 293, 295, 3, 61, 30, 0, 294, 293, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 294, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 299, 7, 9, 0, 0, 299, 311, 1, 0, 0, 0, 300, 301, 5, 48, 0, 0, 301, 302, 5, 120, 0, 0, 302, 304, 1, 0, 0, 0, 303, 305, 3, 65, 32, 0, 304, 303, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 304, 1, 0, 0, 0, 306, 307, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308, 309, 7, 9, 0, 0, 309, 311, 1, 0, 0, 0, 310, 294, 1, 0, 0, 0, 310, 300, 1, 0, 0, 0, 311, 88, 1, 0, 0, 0, 312, 317, 5, 34, 0, 0, 313, 316, 3, 69, 34, 0, 314, 316, 8, 10, 0, 0, 315, 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 320, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 409, 5, 34, 0, 0, 321, 326, 5, 39, 0, 0, 322, 325, 3, 69, 34, 0, 323, 325, 8, 11, 0, 0, 324, 322, 1, 0, 0, 0, 324, 323, 1, 0, 0, 0, 325, 328, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 326, 327, 1, 0, 0, 0, 327, 329, 1, 0, 0, 0, 328, 326, 1, 0, 0, 0, 329, 409, 5, 39, 0, 0, 330, 331, 5, 34, 0, 0, 331, 332, 5, 34, 0, 0, 332, 333, 5, 34, 0, 0, 333, 338, 1, 0, 0, 0, 334, 337, 3, 69, 34, 0, 335, 337, 8, 12, 0, 0, 336, 334, 1, 0, 0, 0, 336, 335, 1, 0, 0, 0, 337, 340, 1, 0, 0, 0, 338, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 341, 1, 0, 0, 0, 340, 338, 1, 0, 0, 0, 341, 342, 5, 34, 0, 0, 342, 343, 5, 34, 0, 0, 343, 409, 5, 34, 0, 0, 344, 345, 5, 39, 0, 0, 345, 346, 5, 39, 0, 0, 346, 347, 5, 39, 0, 0, 347, 352, 1, 0, 0, 0, 348, 351, 3, 69, 34, 0, 349, 351, 8, 12, 0, 0, 350, 348, 1, 0, 0, 0, 350, 349, 1, 0, 0, 0, 351, 354, 1, 0, 0, 0, 352, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 355, 1, 0, 0, 0, 354, 352, 1, 0, 0, 0, 355, 356, 5, 39, 0, 0, 356, 357, 5, 39, 0, 0, 357, 409, 5, 39, 0, 0, 358, 359, 3, 67, 33, 0, 359, 363, 5, 34, 0, 0, 360, 362, 8, 13, 0, 0, 361, 360, 1, 0, 0, 0, 362, 365, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 363, 364, 1, 0, 0, 0, 364, 366, 1, 0, 0, 0, 365, 363, 1, 0, 0, 0, 366, 367, 5, 34, 0, 0, 367, 409, 1, 0, 0, 0, 368, 369, 3, 67, 33, 0, 369, 373, 5, 39, 0, 0, 370, 372, 8, 14, 0, 0, 371, 370, 1, 0, 0, 0, 372, 375, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 373, 374, 1, 0, 0, 0, 374, 376, 1, 0, 0, 0, 375, 373, 1, 0, 0, 0, 376, 377, 5, 39, 0, 0, 377, 409, 1, 0, 0, 0, 378, 379, 3, 67, 33, 0, 379, 380, 5, 34, 0, 0, 380, 381, 5, 34, 0, 0, 381, 382, 5, 34, 0, 0, 382, 386, 1, 0, 0, 0, 383, 385, 9, 0, 0, 0, 384, 383, 1, 0, 0, 0, 385, 388, 1, 0, 0, 0, 386, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 389, 1, 0, 0, 0, 388, 386, 1, 0, 0, 0, 389, 390, 5, 34, 0, 0, 390, 391, 5, 34, 0, 0, 391, 392, 5, 34, 0, 0, 392, 409, 1, 0, 0, 0, 393, 394, 3, 67, 33, 0, 394, 395, 5, 39, 0, 0, 395, 396, 5, 39, 0, 0, 396, 397, 5, 39, 0, 0, 397, 401, 1, 0, 0, 0, 398, 400, 9, 0, 0, 0, 399, 398, 1, 0, 0, 0, 400, 403, 1, 0, 0, 0, 401, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 404, 1, 0, 0, 0, 403, 401, 1, 0, 0, 0, 404, 405, 5, 39, 0, 0, 405, 406, 5, 39, 0, 0, 406, 407, 5, 39, 0, 0, 407, 409, 1, 0, 0, 0, 408, 312, 1, 0, 0, 0, 408, 321, 1, 0, 0, 0, 408, 330, 1, 0, 0, 0, 408, 344, 1, 0, 0, 0, 408, 358, 1, 0, 0, 0, 408, 368, 1, 0, 0, 0, 408, 378, 1, 0, 0, 0, 408, 393, 1, 0, 0, 0, 409, 90, 1, 0, 0, 0, 410, 411, 7, 15, 0, 0, 411, 412, 3, 89, 44, 0, 412, 92, 1, 0, 0, 0, 413, 416, 3, 59, 29, 0, 414, 416, 5, 95, 0, 0, 415, 413, 1, 0, 0, 0, 415, 414, 1, 0, 0, 0, 416, 422, 1, 0, 0, 0, 417, 421, 3, 59, 29, 0, 418, 421, 3, 61, 30, 0, 419, 421, 5, 95, 0, 0, 420, 417, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 419, 1, 0, 0, 0, 421, 424, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 422, 423, 1, 0, 0, 0, 423, 94, 1, 0, 0, 0, 424, 422, 1, 0, 0, 0, 425, 429, 5, 96, 0, 0, 426, 430, 3, 59, 29, 0, 427, 430, 3, 61, 30, 0, 428, 430, 7, 16, 0, 0, 429, 426, 1, 0, 0, 0, 429, 427, 1, 0, 0, 0, 429, 428, 1, 0, 0, 0, 430, 431, 1, 0, 0, 0, 431, 429, 1, 0, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 434, 5, 96, 0, 0, 434, 96, 1, 0, 0, 0, 38, 0, 178, 183, 193, 226, 231, 241, 249, 255, 258, 263, 271, 274, 276, 281, 289, 291, 296, 306, 310, 315, 317, 324, 326, 336, 338, 350, 352, 363, 373, 386, 401, 408, 415, 420, 422, 429, 431, 1, 0, 1, 0] -======= -[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +[4, 0, 37, 435, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 179, 8, 31, 1, 31, 4, 31, 182, 8, 31, 11, 31, 12, 31, 183, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 194, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 227, 8, 38, 1, 39, 4, 39, 230, 8, 39, 11, 39, 12, 39, 231, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 240, 8, 40, 10, 40, 12, 40, 243, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 248, 8, 41, 11, 41, 12, 41, 249, 1, 41, 1, 41, 4, 41, 254, 8, 41, 11, 41, 12, 41, 255, 1, 41, 3, 41, 259, 8, 41, 1, 41, 4, 41, 262, 8, 41, 11, 41, 12, 41, 263, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 270, 8, 41, 11, 41, 12, 41, 271, 1, 41, 3, 41, 275, 8, 41, 3, 41, 277, 8, 41, 1, 42, 4, 42, 280, 8, 42, 11, 42, 12, 42, 281, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 288, 8, 42, 11, 42, 12, 42, 289, 3, 42, 292, 8, 42, 1, 43, 4, 43, 295, 8, 43, 11, 43, 12, 43, 296, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 305, 8, 43, 11, 43, 12, 43, 306, 1, 43, 1, 43, 3, 43, 311, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 316, 8, 44, 10, 44, 12, 44, 319, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 325, 8, 44, 10, 44, 12, 44, 328, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 337, 8, 44, 10, 44, 12, 44, 340, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 351, 8, 44, 10, 44, 12, 44, 354, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 362, 8, 44, 10, 44, 12, 44, 365, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 372, 8, 44, 10, 44, 12, 44, 375, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 385, 8, 44, 10, 44, 12, 44, 388, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 400, 8, 44, 10, 44, 12, 44, 403, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 409, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 416, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 421, 8, 46, 10, 46, 12, 46, 424, 9, 46, 1, 47, 1, 47, 1, 47, 1, 47, 4, 47, 430, 8, 47, 11, 47, 12, 47, 431, 1, 47, 1, 47, 4, 338, 352, 386, 401, 0, 48, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 95, 37, 1, 0, 17, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 3, 0, 32, 32, 45, 47, 95, 95, 471, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 1, 97, 1, 0, 0, 0, 3, 100, 1, 0, 0, 0, 5, 103, 1, 0, 0, 0, 7, 106, 1, 0, 0, 0, 9, 108, 1, 0, 0, 0, 11, 111, 1, 0, 0, 0, 13, 114, 1, 0, 0, 0, 15, 116, 1, 0, 0, 0, 17, 119, 1, 0, 0, 0, 19, 122, 1, 0, 0, 0, 21, 124, 1, 0, 0, 0, 23, 126, 1, 0, 0, 0, 25, 128, 1, 0, 0, 0, 27, 130, 1, 0, 0, 0, 29, 132, 1, 0, 0, 0, 31, 134, 1, 0, 0, 0, 33, 136, 1, 0, 0, 0, 35, 138, 1, 0, 0, 0, 37, 140, 1, 0, 0, 0, 39, 142, 1, 0, 0, 0, 41, 144, 1, 0, 0, 0, 43, 146, 1, 0, 0, 0, 45, 148, 1, 0, 0, 0, 47, 150, 1, 0, 0, 0, 49, 152, 1, 0, 0, 0, 51, 154, 1, 0, 0, 0, 53, 159, 1, 0, 0, 0, 55, 165, 1, 0, 0, 0, 57, 170, 1, 0, 0, 0, 59, 172, 1, 0, 0, 0, 61, 174, 1, 0, 0, 0, 63, 176, 1, 0, 0, 0, 65, 185, 1, 0, 0, 0, 67, 187, 1, 0, 0, 0, 69, 193, 1, 0, 0, 0, 71, 195, 1, 0, 0, 0, 73, 198, 1, 0, 0, 0, 75, 203, 1, 0, 0, 0, 77, 226, 1, 0, 0, 0, 79, 229, 1, 0, 0, 0, 81, 235, 1, 0, 0, 0, 83, 276, 1, 0, 0, 0, 85, 291, 1, 0, 0, 0, 87, 310, 1, 0, 0, 0, 89, 408, 1, 0, 0, 0, 91, 410, 1, 0, 0, 0, 93, 415, 1, 0, 0, 0, 95, 425, 1, 0, 0, 0, 97, 98, 5, 61, 0, 0, 98, 99, 5, 61, 0, 0, 99, 2, 1, 0, 0, 0, 100, 101, 5, 33, 0, 0, 101, 102, 5, 61, 0, 0, 102, 4, 1, 0, 0, 0, 103, 104, 5, 105, 0, 0, 104, 105, 5, 110, 0, 0, 105, 6, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 8, 1, 0, 0, 0, 108, 109, 5, 60, 0, 0, 109, 110, 5, 61, 0, 0, 110, 10, 1, 0, 0, 0, 111, 112, 5, 62, 0, 0, 112, 113, 5, 61, 0, 0, 113, 12, 1, 0, 0, 0, 114, 115, 5, 62, 0, 0, 115, 14, 1, 0, 0, 0, 116, 117, 5, 38, 0, 0, 117, 118, 5, 38, 0, 0, 118, 16, 1, 0, 0, 0, 119, 120, 5, 124, 0, 0, 120, 121, 5, 124, 0, 0, 121, 18, 1, 0, 0, 0, 122, 123, 5, 91, 0, 0, 123, 20, 1, 0, 0, 0, 124, 125, 5, 93, 0, 0, 125, 22, 1, 0, 0, 0, 126, 127, 5, 123, 0, 0, 127, 24, 1, 0, 0, 0, 128, 129, 5, 125, 0, 0, 129, 26, 1, 0, 0, 0, 130, 131, 5, 40, 0, 0, 131, 28, 1, 0, 0, 0, 132, 133, 5, 41, 0, 0, 133, 30, 1, 0, 0, 0, 134, 135, 5, 46, 0, 0, 135, 32, 1, 0, 0, 0, 136, 137, 5, 44, 0, 0, 137, 34, 1, 0, 0, 0, 138, 139, 5, 45, 0, 0, 139, 36, 1, 0, 0, 0, 140, 141, 5, 33, 0, 0, 141, 38, 1, 0, 0, 0, 142, 143, 5, 63, 0, 0, 143, 40, 1, 0, 0, 0, 144, 145, 5, 58, 0, 0, 145, 42, 1, 0, 0, 0, 146, 147, 5, 43, 0, 0, 147, 44, 1, 0, 0, 0, 148, 149, 5, 42, 0, 0, 149, 46, 1, 0, 0, 0, 150, 151, 5, 47, 0, 0, 151, 48, 1, 0, 0, 0, 152, 153, 5, 37, 0, 0, 153, 50, 1, 0, 0, 0, 154, 155, 5, 116, 0, 0, 155, 156, 5, 114, 0, 0, 156, 157, 5, 117, 0, 0, 157, 158, 5, 101, 0, 0, 158, 52, 1, 0, 0, 0, 159, 160, 5, 102, 0, 0, 160, 161, 5, 97, 0, 0, 161, 162, 5, 108, 0, 0, 162, 163, 5, 115, 0, 0, 163, 164, 5, 101, 0, 0, 164, 54, 1, 0, 0, 0, 165, 166, 5, 110, 0, 0, 166, 167, 5, 117, 0, 0, 167, 168, 5, 108, 0, 0, 168, 169, 5, 108, 0, 0, 169, 56, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 58, 1, 0, 0, 0, 172, 173, 7, 0, 0, 0, 173, 60, 1, 0, 0, 0, 174, 175, 2, 48, 57, 0, 175, 62, 1, 0, 0, 0, 176, 178, 7, 1, 0, 0, 177, 179, 7, 2, 0, 0, 178, 177, 1, 0, 0, 0, 178, 179, 1, 0, 0, 0, 179, 181, 1, 0, 0, 0, 180, 182, 3, 61, 30, 0, 181, 180, 1, 0, 0, 0, 182, 183, 1, 0, 0, 0, 183, 181, 1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184, 64, 1, 0, 0, 0, 185, 186, 7, 3, 0, 0, 186, 66, 1, 0, 0, 0, 187, 188, 7, 4, 0, 0, 188, 68, 1, 0, 0, 0, 189, 194, 3, 71, 35, 0, 190, 194, 3, 75, 37, 0, 191, 194, 3, 77, 38, 0, 192, 194, 3, 73, 36, 0, 193, 189, 1, 0, 0, 0, 193, 190, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 193, 192, 1, 0, 0, 0, 194, 70, 1, 0, 0, 0, 195, 196, 3, 57, 28, 0, 196, 197, 7, 5, 0, 0, 197, 72, 1, 0, 0, 0, 198, 199, 3, 57, 28, 0, 199, 200, 2, 48, 51, 0, 200, 201, 2, 48, 55, 0, 201, 202, 2, 48, 55, 0, 202, 74, 1, 0, 0, 0, 203, 204, 3, 57, 28, 0, 204, 205, 7, 6, 0, 0, 205, 206, 3, 65, 32, 0, 206, 207, 3, 65, 32, 0, 207, 76, 1, 0, 0, 0, 208, 209, 3, 57, 28, 0, 209, 210, 5, 117, 0, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 213, 3, 65, 32, 0, 213, 214, 3, 65, 32, 0, 214, 227, 1, 0, 0, 0, 215, 216, 3, 57, 28, 0, 216, 217, 5, 85, 0, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 224, 3, 65, 32, 0, 224, 225, 3, 65, 32, 0, 225, 227, 1, 0, 0, 0, 226, 208, 1, 0, 0, 0, 226, 215, 1, 0, 0, 0, 227, 78, 1, 0, 0, 0, 228, 230, 7, 7, 0, 0, 229, 228, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 234, 6, 39, 0, 0, 234, 80, 1, 0, 0, 0, 235, 236, 5, 47, 0, 0, 236, 237, 5, 47, 0, 0, 237, 241, 1, 0, 0, 0, 238, 240, 8, 8, 0, 0, 239, 238, 1, 0, 0, 0, 240, 243, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 244, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 244, 245, 6, 40, 0, 0, 245, 82, 1, 0, 0, 0, 246, 248, 3, 61, 30, 0, 247, 246, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 251, 1, 0, 0, 0, 251, 253, 5, 46, 0, 0, 252, 254, 3, 61, 30, 0, 253, 252, 1, 0, 0, 0, 254, 255, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 258, 1, 0, 0, 0, 257, 259, 3, 63, 31, 0, 258, 257, 1, 0, 0, 0, 258, 259, 1, 0, 0, 0, 259, 277, 1, 0, 0, 0, 260, 262, 3, 61, 30, 0, 261, 260, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 265, 1, 0, 0, 0, 265, 266, 3, 63, 31, 0, 266, 277, 1, 0, 0, 0, 267, 269, 5, 46, 0, 0, 268, 270, 3, 61, 30, 0, 269, 268, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 269, 1, 0, 0, 0, 271, 272, 1, 0, 0, 0, 272, 274, 1, 0, 0, 0, 273, 275, 3, 63, 31, 0, 274, 273, 1, 0, 0, 0, 274, 275, 1, 0, 0, 0, 275, 277, 1, 0, 0, 0, 276, 247, 1, 0, 0, 0, 276, 261, 1, 0, 0, 0, 276, 267, 1, 0, 0, 0, 277, 84, 1, 0, 0, 0, 278, 280, 3, 61, 30, 0, 279, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 279, 1, 0, 0, 0, 281, 282, 1, 0, 0, 0, 282, 292, 1, 0, 0, 0, 283, 284, 5, 48, 0, 0, 284, 285, 5, 120, 0, 0, 285, 287, 1, 0, 0, 0, 286, 288, 3, 65, 32, 0, 287, 286, 1, 0, 0, 0, 288, 289, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289, 290, 1, 0, 0, 0, 290, 292, 1, 0, 0, 0, 291, 279, 1, 0, 0, 0, 291, 283, 1, 0, 0, 0, 292, 86, 1, 0, 0, 0, 293, 295, 3, 61, 30, 0, 294, 293, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 294, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 299, 7, 9, 0, 0, 299, 311, 1, 0, 0, 0, 300, 301, 5, 48, 0, 0, 301, 302, 5, 120, 0, 0, 302, 304, 1, 0, 0, 0, 303, 305, 3, 65, 32, 0, 304, 303, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 304, 1, 0, 0, 0, 306, 307, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308, 309, 7, 9, 0, 0, 309, 311, 1, 0, 0, 0, 310, 294, 1, 0, 0, 0, 310, 300, 1, 0, 0, 0, 311, 88, 1, 0, 0, 0, 312, 317, 5, 34, 0, 0, 313, 316, 3, 69, 34, 0, 314, 316, 8, 10, 0, 0, 315, 313, 1, 0, 0, 0, 315, 314, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 320, 1, 0, 0, 0, 319, 317, 1, 0, 0, 0, 320, 409, 5, 34, 0, 0, 321, 326, 5, 39, 0, 0, 322, 325, 3, 69, 34, 0, 323, 325, 8, 11, 0, 0, 324, 322, 1, 0, 0, 0, 324, 323, 1, 0, 0, 0, 325, 328, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 326, 327, 1, 0, 0, 0, 327, 329, 1, 0, 0, 0, 328, 326, 1, 0, 0, 0, 329, 409, 5, 39, 0, 0, 330, 331, 5, 34, 0, 0, 331, 332, 5, 34, 0, 0, 332, 333, 5, 34, 0, 0, 333, 338, 1, 0, 0, 0, 334, 337, 3, 69, 34, 0, 335, 337, 8, 12, 0, 0, 336, 334, 1, 0, 0, 0, 336, 335, 1, 0, 0, 0, 337, 340, 1, 0, 0, 0, 338, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 341, 1, 0, 0, 0, 340, 338, 1, 0, 0, 0, 341, 342, 5, 34, 0, 0, 342, 343, 5, 34, 0, 0, 343, 409, 5, 34, 0, 0, 344, 345, 5, 39, 0, 0, 345, 346, 5, 39, 0, 0, 346, 347, 5, 39, 0, 0, 347, 352, 1, 0, 0, 0, 348, 351, 3, 69, 34, 0, 349, 351, 8, 12, 0, 0, 350, 348, 1, 0, 0, 0, 350, 349, 1, 0, 0, 0, 351, 354, 1, 0, 0, 0, 352, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 355, 1, 0, 0, 0, 354, 352, 1, 0, 0, 0, 355, 356, 5, 39, 0, 0, 356, 357, 5, 39, 0, 0, 357, 409, 5, 39, 0, 0, 358, 359, 3, 67, 33, 0, 359, 363, 5, 34, 0, 0, 360, 362, 8, 13, 0, 0, 361, 360, 1, 0, 0, 0, 362, 365, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 363, 364, 1, 0, 0, 0, 364, 366, 1, 0, 0, 0, 365, 363, 1, 0, 0, 0, 366, 367, 5, 34, 0, 0, 367, 409, 1, 0, 0, 0, 368, 369, 3, 67, 33, 0, 369, 373, 5, 39, 0, 0, 370, 372, 8, 14, 0, 0, 371, 370, 1, 0, 0, 0, 372, 375, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 373, 374, 1, 0, 0, 0, 374, 376, 1, 0, 0, 0, 375, 373, 1, 0, 0, 0, 376, 377, 5, 39, 0, 0, 377, 409, 1, 0, 0, 0, 378, 379, 3, 67, 33, 0, 379, 380, 5, 34, 0, 0, 380, 381, 5, 34, 0, 0, 381, 382, 5, 34, 0, 0, 382, 386, 1, 0, 0, 0, 383, 385, 9, 0, 0, 0, 384, 383, 1, 0, 0, 0, 385, 388, 1, 0, 0, 0, 386, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 389, 1, 0, 0, 0, 388, 386, 1, 0, 0, 0, 389, 390, 5, 34, 0, 0, 390, 391, 5, 34, 0, 0, 391, 392, 5, 34, 0, 0, 392, 409, 1, 0, 0, 0, 393, 394, 3, 67, 33, 0, 394, 395, 5, 39, 0, 0, 395, 396, 5, 39, 0, 0, 396, 397, 5, 39, 0, 0, 397, 401, 1, 0, 0, 0, 398, 400, 9, 0, 0, 0, 399, 398, 1, 0, 0, 0, 400, 403, 1, 0, 0, 0, 401, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 404, 1, 0, 0, 0, 403, 401, 1, 0, 0, 0, 404, 405, 5, 39, 0, 0, 405, 406, 5, 39, 0, 0, 406, 407, 5, 39, 0, 0, 407, 409, 1, 0, 0, 0, 408, 312, 1, 0, 0, 0, 408, 321, 1, 0, 0, 0, 408, 330, 1, 0, 0, 0, 408, 344, 1, 0, 0, 0, 408, 358, 1, 0, 0, 0, 408, 368, 1, 0, 0, 0, 408, 378, 1, 0, 0, 0, 408, 393, 1, 0, 0, 0, 409, 90, 1, 0, 0, 0, 410, 411, 7, 15, 0, 0, 411, 412, 3, 89, 44, 0, 412, 92, 1, 0, 0, 0, 413, 416, 3, 59, 29, 0, 414, 416, 5, 95, 0, 0, 415, 413, 1, 0, 0, 0, 415, 414, 1, 0, 0, 0, 416, 422, 1, 0, 0, 0, 417, 421, 3, 59, 29, 0, 418, 421, 3, 61, 30, 0, 419, 421, 5, 95, 0, 0, 420, 417, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 419, 1, 0, 0, 0, 421, 424, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 422, 423, 1, 0, 0, 0, 423, 94, 1, 0, 0, 0, 424, 422, 1, 0, 0, 0, 425, 429, 5, 96, 0, 0, 426, 430, 3, 59, 29, 0, 427, 430, 3, 61, 30, 0, 428, 430, 7, 16, 0, 0, 429, 426, 1, 0, 0, 0, 429, 427, 1, 0, 0, 0, 429, 428, 1, 0, 0, 0, 430, 431, 1, 0, 0, 0, 431, 429, 1, 0, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 434, 5, 96, 0, 0, 434, 96, 1, 0, 0, 0, 38, 0, 178, 183, 193, 226, 231, 241, 249, 255, 258, 263, 271, 274, 276, 281, 289, 291, 296, 306, 310, 315, 317, 324, 326, 336, 338, 350, 352, 363, 373, 386, 401, 408, 415, 420, 422, 429, 431, 1, 0, 1, 0] \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens index b4fbf00e51..aa1f5eee6f 100644 --- a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens +++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens @@ -34,10 +34,7 @@ NUM_UINT=33 STRING=34 BYTES=35 IDENTIFIER=36 -<<<<<<< HEAD ESC_IDENTIFIER=37 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) '=='=1 '!='=2 'in'=3 diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go index acdcd41577..514f2082fe 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. -======= -// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" @@ -102,7 +98,6 @@ func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {} // ExitIndex is called when production Index is exited. func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {} -<<<<<<< HEAD // EnterIdent is called when production Ident is entered. func (s *BaseCELListener) EnterIdent(ctx *IdentContext) {} @@ -114,13 +109,6 @@ func (s *BaseCELListener) EnterGlobalCall(ctx *GlobalCallContext) {} // ExitGlobalCall is called when production GlobalCall is exited. func (s *BaseCELListener) ExitGlobalCall(ctx *GlobalCallContext) {} -======= -// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered. -func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} - -// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited. -func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterNested is called when production Nested is entered. func (s *BaseCELListener) EnterNested(ctx *NestedContext) {} @@ -182,7 +170,6 @@ func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext // ExitMapInitializerList is called when production mapInitializerList is exited. func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {} -<<<<<<< HEAD // EnterSimpleIdentifier is called when production SimpleIdentifier is entered. func (s *BaseCELListener) EnterSimpleIdentifier(ctx *SimpleIdentifierContext) {} @@ -195,8 +182,6 @@ func (s *BaseCELListener) EnterEscapedIdentifier(ctx *EscapedIdentifierContext) // ExitEscapedIdentifier is called when production EscapedIdentifier is exited. func (s *BaseCELListener) ExitEscapedIdentifier(ctx *EscapedIdentifierContext) {} -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterOptExpr is called when production optExpr is entered. func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go index 2b156faf16..8a12cb65e3 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go @@ -1,16 +1,8 @@ -<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. -======= -// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BaseCELVisitor struct { *antlr.BaseParseTreeVisitor } @@ -67,15 +59,11 @@ func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} { return v.VisitChildren(ctx) } -<<<<<<< HEAD func (v *BaseCELVisitor) VisitIdent(ctx *IdentContext) interface{} { return v.VisitChildren(ctx) } func (v *BaseCELVisitor) VisitGlobalCall(ctx *GlobalCallContext) interface{} { -======= -func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v.VisitChildren(ctx) } @@ -119,7 +107,6 @@ func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) return v.VisitChildren(ctx) } -<<<<<<< HEAD func (v *BaseCELVisitor) VisitSimpleIdentifier(ctx *SimpleIdentifierContext) interface{} { return v.VisitChildren(ctx) } @@ -128,8 +115,6 @@ func (v *BaseCELVisitor) VisitEscapedIdentifier(ctx *EscapedIdentifierContext) i return v.VisitChildren(ctx) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} { return v.VisitChildren(ctx) } diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go index dec4b1e458..896562f5fb 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go @@ -1,4 +1,3 @@ -<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen @@ -10,39 +9,19 @@ import ( "unicode" ) -======= -// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. - -package gen -import ( - "fmt" - "sync" - "unicode" - "github.com/antlr4-go/antlr/v4" -) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Suppress unused import error var _ = fmt.Printf var _ = sync.Once{} var _ = unicode.IsLetter -<<<<<<< HEAD type CELLexer struct { *antlr.BaseLexer channelNames []string modeNames []string -======= - -type CELLexer struct { - *antlr.BaseLexer - channelNames []string - modeNames []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO: EOF string } var CELLexerLexerStaticData struct { -<<<<<<< HEAD once sync.Once serializedATN []int32 ChannelNames []string @@ -301,260 +280,6 @@ func cellexerLexerInit() { for index, state := range atn.DecisionToState { decisionToDFA[index] = antlr.NewDFA(state, index) } -======= - once sync.Once - serializedATN []int32 - ChannelNames []string - ModeNames []string - LiteralNames []string - SymbolicNames []string - RuleNames []string - PredictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA -} - -func cellexerLexerInit() { - staticData := &CELLexerLexerStaticData - staticData.ChannelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", - } - staticData.ModeNames = []string{ - "DEFAULT_MODE", - } - staticData.LiteralNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", - "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", - } - staticData.SymbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", - "STRING", "BYTES", "IDENTIFIER", - } - staticData.RuleNames = []string{ - "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", - "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", - "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", - "BYTES", "IDENTIFIER", - } - staticData.PredictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, - 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, - 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, - 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, - 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, - 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, - 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, - 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, - 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, - 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, - 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, - 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, - 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, - 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, - 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, - 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, - 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, - 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, - 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, - 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, - 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, - 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, - 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, - 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, - 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, - 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, - 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, - 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, - 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, - 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, - 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, - 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, - 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, - 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, - 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, - 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, - 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, - 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, - 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, - 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, - 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, - 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, - 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, - 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, - 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, - 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, - 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, - 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, - 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, - 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, - 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, - 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, - 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, - 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, - 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, - 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, - 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, - 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, - 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, - 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, - 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, - 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, - 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, - 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, - 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, - 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, - 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, - 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, - 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, - 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, - 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, - 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, - 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, - 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, - 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, - 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, - 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, - 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, - 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, - 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, - 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, - 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, - 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, - 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, - 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, - 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, - 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, - 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, - 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, - 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, - 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, - 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, - 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, - 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, - 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, - 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, - 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, - 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, - 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, - 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, - 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, - 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, - 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, - 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, - 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, - 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, - 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, - 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, - 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, - 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, - 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, - 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, - 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, - 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, - 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, - 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, - 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, - 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, - 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, - 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, - 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, - 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, - 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, - 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, - 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, - 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, - 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, - 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, - 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, - 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, - 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, - 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, - 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, - 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, - 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, - 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, - 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, - 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, - 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, - 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, - 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, - 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, - 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, - 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, - 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, - 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, - 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, - 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, - 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, - 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, - 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, - 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, - 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, - 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, - 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, - 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, - 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, - 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, - 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, - 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, - 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, - 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, - 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, - 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, - 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, - 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, - 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, - 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, - 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, - 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, - 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, - 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, - 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, - 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, - 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, - 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, - 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, - 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, - 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, - 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, - 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, - 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, - 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, - 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, - 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, - 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, - 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, - 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, - 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, - 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, - 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, - 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, - 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, - 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, - 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, - 413, 418, 420, 1, 0, 1, 0, -} - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CELLexerInit initializes any static state used to implement CELLexer. By default the @@ -562,28 +287,16 @@ func cellexerLexerInit() { // NewCELLexer(). You can call this function if you wish to initialize the static state ahead // of time. func CELLexerInit() { -<<<<<<< HEAD staticData := &CELLexerLexerStaticData staticData.once.Do(cellexerLexerInit) -======= - staticData := &CELLexerLexerStaticData - staticData.once.Do(cellexerLexerInit) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewCELLexer produces a new lexer instance for the optional input antlr.CharStream. func NewCELLexer(input antlr.CharStream) *CELLexer { -<<<<<<< HEAD CELLexerInit() l := new(CELLexer) l.BaseLexer = antlr.NewBaseLexer(input) staticData := &CELLexerLexerStaticData -======= - CELLexerInit() - l := new(CELLexer) - l.BaseLexer = antlr.NewBaseLexer(input) - staticData := &CELLexerLexerStaticData ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) l.channelNames = staticData.ChannelNames l.modeNames = staticData.ModeNames @@ -598,7 +311,6 @@ func NewCELLexer(input antlr.CharStream) *CELLexer { // CELLexer tokens. const ( -<<<<<<< HEAD CELLexerEQUALS = 1 CELLexerNOT_EQUALS = 2 CELLexerIN = 3 @@ -637,43 +349,3 @@ const ( CELLexerIDENTIFIER = 36 CELLexerESC_IDENTIFIER = 37 ) -======= - CELLexerEQUALS = 1 - CELLexerNOT_EQUALS = 2 - CELLexerIN = 3 - CELLexerLESS = 4 - CELLexerLESS_EQUALS = 5 - CELLexerGREATER_EQUALS = 6 - CELLexerGREATER = 7 - CELLexerLOGICAL_AND = 8 - CELLexerLOGICAL_OR = 9 - CELLexerLBRACKET = 10 - CELLexerRPRACKET = 11 - CELLexerLBRACE = 12 - CELLexerRBRACE = 13 - CELLexerLPAREN = 14 - CELLexerRPAREN = 15 - CELLexerDOT = 16 - CELLexerCOMMA = 17 - CELLexerMINUS = 18 - CELLexerEXCLAM = 19 - CELLexerQUESTIONMARK = 20 - CELLexerCOLON = 21 - CELLexerPLUS = 22 - CELLexerSTAR = 23 - CELLexerSLASH = 24 - CELLexerPERCENT = 25 - CELLexerCEL_TRUE = 26 - CELLexerCEL_FALSE = 27 - CELLexerNUL = 28 - CELLexerWHITESPACE = 29 - CELLexerCOMMENT = 30 - CELLexerNUM_FLOAT = 31 - CELLexerNUM_INT = 32 - CELLexerNUM_UINT = 33 - CELLexerSTRING = 34 - CELLexerBYTES = 35 - CELLexerIDENTIFIER = 36 -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go index 3b81db4edf..da477c4b7f 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go @@ -1,16 +1,8 @@ -<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. -======= -// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // CELListener is a complete listener for a parse tree produced by CELParser. type CELListener interface { antlr.ParseTreeListener @@ -54,16 +46,11 @@ type CELListener interface { // EnterIndex is called when entering the Index production. EnterIndex(c *IndexContext) -<<<<<<< HEAD // EnterIdent is called when entering the Ident production. EnterIdent(c *IdentContext) // EnterGlobalCall is called when entering the GlobalCall production. EnterGlobalCall(c *GlobalCallContext) -======= - // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production. - EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterNested is called when entering the Nested production. EnterNested(c *NestedContext) @@ -95,15 +82,12 @@ type CELListener interface { // EnterMapInitializerList is called when entering the mapInitializerList production. EnterMapInitializerList(c *MapInitializerListContext) -<<<<<<< HEAD // EnterSimpleIdentifier is called when entering the SimpleIdentifier production. EnterSimpleIdentifier(c *SimpleIdentifierContext) // EnterEscapedIdentifier is called when entering the EscapedIdentifier production. EnterEscapedIdentifier(c *EscapedIdentifierContext) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnterOptExpr is called when entering the optExpr production. EnterOptExpr(c *OptExprContext) @@ -170,16 +154,11 @@ type CELListener interface { // ExitIndex is called when exiting the Index production. ExitIndex(c *IndexContext) -<<<<<<< HEAD // ExitIdent is called when exiting the Ident production. ExitIdent(c *IdentContext) // ExitGlobalCall is called when exiting the GlobalCall production. ExitGlobalCall(c *GlobalCallContext) -======= - // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production. - ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ExitNested is called when exiting the Nested production. ExitNested(c *NestedContext) @@ -211,15 +190,12 @@ type CELListener interface { // ExitMapInitializerList is called when exiting the mapInitializerList production. ExitMapInitializerList(c *MapInitializerListContext) -<<<<<<< HEAD // ExitSimpleIdentifier is called when exiting the SimpleIdentifier production. ExitSimpleIdentifier(c *SimpleIdentifierContext) // ExitEscapedIdentifier is called when exiting the EscapedIdentifier production. ExitEscapedIdentifier(c *EscapedIdentifierContext) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ExitOptExpr is called when exiting the optExpr production. ExitOptExpr(c *OptExprContext) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go index 310f1a6c53..38693df58d 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go @@ -1,18 +1,10 @@ -<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. -======= -// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import ( "fmt" "strconv" -<<<<<<< HEAD "sync" -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/antlr4-go/antlr/v4" ) @@ -22,16 +14,11 @@ var _ = fmt.Printf var _ = strconv.Itoa var _ = sync.Once{} -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type CELParser struct { *antlr.BaseParser } var CELParserStaticData struct { -<<<<<<< HEAD once sync.Once serializedATN []int32 LiteralNames []string @@ -191,163 +178,6 @@ func celParserInit() { for index, state := range atn.DecisionToState { decisionToDFA[index] = antlr.NewDFA(state, index) } -======= - once sync.Once - serializedATN []int32 - LiteralNames []string - SymbolicNames []string - RuleNames []string - PredictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA -} - -func celParserInit() { - staticData := &CELParserStaticData - staticData.LiteralNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", - "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", - } - staticData.SymbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", - "STRING", "BYTES", "IDENTIFIER", - } - staticData.RuleNames = []string{ - "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", - "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", - "optField", "mapInitializerList", "optExpr", "literal", - } - staticData.PredictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, - 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, - 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, - 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, - 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, - 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, - 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, - 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, - 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, - 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, - 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, - 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, - 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, - 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, - 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, - 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, - 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, - 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, - 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, - 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, - 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, - 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, - 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, - 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, - 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, - 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, - 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, - 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, - 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, - 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, - 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, - 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, - 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, - 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, - 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, - 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, - 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, - 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, - 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, - 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, - 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, - 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, - 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, - 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, - 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, - 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, - 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, - 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, - 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, - 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, - 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, - 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, - 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, - 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, - 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, - 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, - 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, - 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, - 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, - 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, - 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, - 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, - 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, - 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, - 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, - 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, - 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, - 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, - 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, - 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, - 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, - 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, - 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, - 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, - 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, - 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, - 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, - 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, - 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, - 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, - 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, - 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, - 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, - 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, - 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, - 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, - 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, - 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, - 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, - 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, - 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, - 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, - 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, - 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, - 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, - 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, - 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, - 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, - 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, - 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, - 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, - 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, - 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, - 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, - 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, - 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, - 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, - 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, - 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, - 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, - 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, - 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, - 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, - 240, 248, -} - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // CELParserInit initializes any static state used to implement CELParser. By default the @@ -355,13 +185,8 @@ func celParserInit() { // NewCELParser(). You can call this function if you wish to initialize the static state ahead // of time. func CELParserInit() { -<<<<<<< HEAD staticData := &CELParserStaticData staticData.once.Do(celParserInit) -======= - staticData := &CELParserStaticData - staticData.once.Do(celParserInit) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewCELParser produces a new parser instance for the optional input antlr.TokenStream. @@ -369,11 +194,7 @@ func NewCELParser(input antlr.TokenStream) *CELParser { CELParserInit() this := new(CELParser) this.BaseParser = antlr.NewBaseParser(input) -<<<<<<< HEAD staticData := &CELParserStaticData -======= - staticData := &CELParserStaticData ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) this.RuleNames = staticData.RuleNames this.LiteralNames = staticData.LiteralNames @@ -383,7 +204,6 @@ func NewCELParser(input antlr.TokenStream) *CELParser { return this } -<<<<<<< HEAD // CELParser tokens. const ( CELParserEOF = antlr.TokenEOF @@ -424,53 +244,10 @@ const ( CELParserBYTES = 35 CELParserIDENTIFIER = 36 CELParserESC_IDENTIFIER = 37 -======= - -// CELParser tokens. -const ( - CELParserEOF = antlr.TokenEOF - CELParserEQUALS = 1 - CELParserNOT_EQUALS = 2 - CELParserIN = 3 - CELParserLESS = 4 - CELParserLESS_EQUALS = 5 - CELParserGREATER_EQUALS = 6 - CELParserGREATER = 7 - CELParserLOGICAL_AND = 8 - CELParserLOGICAL_OR = 9 - CELParserLBRACKET = 10 - CELParserRPRACKET = 11 - CELParserLBRACE = 12 - CELParserRBRACE = 13 - CELParserLPAREN = 14 - CELParserRPAREN = 15 - CELParserDOT = 16 - CELParserCOMMA = 17 - CELParserMINUS = 18 - CELParserEXCLAM = 19 - CELParserQUESTIONMARK = 20 - CELParserCOLON = 21 - CELParserPLUS = 22 - CELParserSTAR = 23 - CELParserSLASH = 24 - CELParserPERCENT = 25 - CELParserCEL_TRUE = 26 - CELParserCEL_FALSE = 27 - CELParserNUL = 28 - CELParserWHITESPACE = 29 - CELParserCOMMENT = 30 - CELParserNUM_FLOAT = 31 - CELParserNUM_INT = 32 - CELParserNUM_UINT = 33 - CELParserSTRING = 34 - CELParserBYTES = 35 - CELParserIDENTIFIER = 36 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // CELParser rules. const ( -<<<<<<< HEAD CELParserRULE_start = 0 CELParserRULE_expr = 1 CELParserRULE_conditionalOr = 2 @@ -488,24 +265,6 @@ const ( CELParserRULE_escapeIdent = 14 CELParserRULE_optExpr = 15 CELParserRULE_literal = 16 -======= - CELParserRULE_start = 0 - CELParserRULE_expr = 1 - CELParserRULE_conditionalOr = 2 - CELParserRULE_conditionalAnd = 3 - CELParserRULE_relation = 4 - CELParserRULE_calc = 5 - CELParserRULE_unary = 6 - CELParserRULE_member = 7 - CELParserRULE_primary = 8 - CELParserRULE_exprList = 9 - CELParserRULE_listInit = 10 - CELParserRULE_fieldInitializerList = 11 - CELParserRULE_optField = 12 - CELParserRULE_mapInitializerList = 13 - CELParserRULE_optExpr = 14 - CELParserRULE_literal = 15 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // IStartContext is an interface to support dynamic dispatch. @@ -518,17 +277,9 @@ type IStartContext interface { // GetE returns the e rule contexts. GetE() IExprContext -<<<<<<< HEAD - // SetE sets the e rule contexts. - SetE(IExprContext) - -======= - // SetE sets the e rule contexts. SetE(IExprContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures EOF() antlr.TerminalNode Expr() IExprContext @@ -540,11 +291,7 @@ type IStartContext interface { type StartContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD e IExprContext -======= - e IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyStartContext() *StartContext { @@ -554,11 +301,7 @@ func NewEmptyStartContext() *StartContext { return p } -<<<<<<< HEAD func InitEmptyStartContext(p *StartContext) { -======= -func InitEmptyStartContext(p *StartContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_start } @@ -580,31 +323,17 @@ func (s *StartContext) GetParser() antlr.Parser { return s.parser } func (s *StartContext) GetE() IExprContext { return s.e } -<<<<<<< HEAD -func (s *StartContext) SetE(v IExprContext) { s.e = v } - -======= - func (s *StartContext) SetE(v IExprContext) { s.e = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StartContext) EOF() antlr.TerminalNode { return s.GetToken(CELParserEOF, 0) } func (s *StartContext) Expr() IExprContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -624,10 +353,6 @@ func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterStart(s) @@ -650,18 +375,11 @@ func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Start_() (localctx IStartContext) { localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 0, CELParserRULE_start) p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(34) var _x = p.Expr() @@ -677,26 +395,6 @@ func (p *CELParser) Start_() (localctx IStartContext) { } } -======= - p.SetState(32) - - var _x = p.Expr() - - - localctx.(*StartContext).e = _x - } - { - p.SetState(33) - p.Match(CELParserEOF) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -710,10 +408,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IExprContext is an interface to support dynamic dispatch. type IExprContext interface { antlr.ParserRuleContext @@ -722,19 +416,10 @@ type IExprContext interface { GetParser() antlr.Parser // GetOp returns the op token. -<<<<<<< HEAD GetOp() antlr.Token // SetOp sets the op token. SetOp(antlr.Token) -======= - GetOp() antlr.Token - - - // SetOp sets the op token. - SetOp(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IConditionalOrContext @@ -745,10 +430,6 @@ type IExprContext interface { // GetE2 returns the e2 rule contexts. GetE2() IExprContext -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetE sets the e rule contexts. SetE(IConditionalOrContext) @@ -758,10 +439,6 @@ type IExprContext interface { // SetE2 sets the e2 rule contexts. SetE2(IExprContext) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllConditionalOr() []IConditionalOrContext ConditionalOr(i int) IConditionalOrContext @@ -776,17 +453,10 @@ type IExprContext interface { type ExprContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD e IConditionalOrContext op antlr.Token e1 IConditionalOrContext e2 IExprContext -======= - e IConditionalOrContext - op antlr.Token - e1 IConditionalOrContext - e2 IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyExprContext() *ExprContext { @@ -796,11 +466,7 @@ func NewEmptyExprContext() *ExprContext { return p } -<<<<<<< HEAD func InitEmptyExprContext(p *ExprContext) { -======= -func InitEmptyExprContext(p *ExprContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_expr } @@ -822,35 +488,20 @@ func (s *ExprContext) GetParser() antlr.Parser { return s.parser } func (s *ExprContext) GetOp() antlr.Token { return s.op } -<<<<<<< HEAD -func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } - -======= - func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) GetE() IConditionalOrContext { return s.e } func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } func (s *ExprContext) GetE2() IExprContext { return s.e2 } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { children := s.GetChildren() len := 0 @@ -873,20 +524,12 @@ func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { } func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalOrContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -909,17 +552,10 @@ func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode { } func (s *ExprContext) Expr() IExprContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -939,10 +575,6 @@ func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExpr(s) @@ -965,12 +597,6 @@ func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Expr() (localctx IExprContext) { localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 2, CELParserRULE_expr) @@ -978,7 +604,6 @@ func (p *CELParser) Expr() (localctx IExprContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(37) var _x = p.ConditionalOr() @@ -986,66 +611,27 @@ func (p *CELParser) Expr() (localctx IExprContext) { localctx.(*ExprContext).e = _x } p.SetState(43) -======= - p.SetState(35) - - var _x = p.ConditionalOr() - - - localctx.(*ExprContext).e = _x - } - p.SetState(41) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(38) -======= - - if _la == CELParserQUESTIONMARK { - { - p.SetState(36) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*ExprContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(37) - - var _x = p.ConditionalOr() - - - localctx.(*ExprContext).e1 = _x - } - { - p.SetState(38) - p.Match(CELParserCOLON) - if p.HasError() { - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } { p.SetState(39) -<<<<<<< HEAD var _x = p.ConditionalOr() localctx.(*ExprContext).e1 = _x @@ -1063,21 +649,11 @@ func (p *CELParser) Expr() (localctx IExprContext) { var _x = p.Expr() -======= - var _x = p.Expr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ExprContext).e2 = _x } } -<<<<<<< HEAD -======= - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -1091,10 +667,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IConditionalOrContext is an interface to support dynamic dispatch. type IConditionalOrContext interface { antlr.ParserRuleContext @@ -1103,66 +675,34 @@ type IConditionalOrContext interface { GetParser() antlr.Parser // GetS9 returns the s9 token. -<<<<<<< HEAD GetS9() antlr.Token // SetS9 sets the s9 token. SetS9(antlr.Token) -======= - GetS9() antlr.Token - - - // SetS9 sets the s9 token. - SetS9(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetOps returns the ops token list. GetOps() []antlr.Token -<<<<<<< HEAD // SetOps sets the ops token list. SetOps([]antlr.Token) -======= - - // SetOps sets the ops token list. - SetOps([]antlr.Token) - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IConditionalAndContext // Get_conditionalAnd returns the _conditionalAnd rule contexts. Get_conditionalAnd() IConditionalAndContext -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetE sets the e rule contexts. SetE(IConditionalAndContext) // Set_conditionalAnd sets the _conditionalAnd rule contexts. Set_conditionalAnd(IConditionalAndContext) -<<<<<<< HEAD // GetE1 returns the e1 rule context list. GetE1() []IConditionalAndContext // SetE1 sets the e1 rule context list. SetE1([]IConditionalAndContext) -======= - - // GetE1 returns the e1 rule context list. - GetE1() []IConditionalAndContext - - - // SetE1 sets the e1 rule context list. - SetE1([]IConditionalAndContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllConditionalAnd() []IConditionalAndContext @@ -1176,21 +716,12 @@ type IConditionalOrContext interface { type ConditionalOrContext struct { antlr.BaseParserRuleContext -<<<<<<< HEAD parser antlr.Parser e IConditionalAndContext s9 antlr.Token ops []antlr.Token _conditionalAnd IConditionalAndContext e1 []IConditionalAndContext -======= - parser antlr.Parser - e IConditionalAndContext - s9 antlr.Token - ops []antlr.Token - _conditionalAnd IConditionalAndContext - e1 []IConditionalAndContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyConditionalOrContext() *ConditionalOrContext { @@ -1200,11 +731,7 @@ func NewEmptyConditionalOrContext() *ConditionalOrContext { return p } -<<<<<<< HEAD func InitEmptyConditionalOrContext(p *ConditionalOrContext) { -======= -func InitEmptyConditionalOrContext(p *ConditionalOrContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalOr } @@ -1226,51 +753,24 @@ func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } -<<<<<<< HEAD -func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } - -func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } - -func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } - -======= - func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } - func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } - func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } -<<<<<<< HEAD -func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } - -func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } - -======= - func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } - func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { children := s.GetChildren() len := 0 @@ -1293,20 +793,12 @@ func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { } func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalAndContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -1336,10 +828,6 @@ func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Reco return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalOr(s) @@ -1362,12 +850,6 @@ func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) @@ -1375,7 +857,6 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(45) var _x = p.ConditionalAnd() @@ -1383,65 +864,34 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { localctx.(*ConditionalOrContext).e = _x } p.SetState(50) -======= - p.SetState(43) - - var _x = p.ConditionalAnd() - - - localctx.(*ConditionalOrContext).e = _x - } - p.SetState(48) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD for _la == CELParserLOGICAL_OR { { p.SetState(46) -======= - - for _la == CELParserLOGICAL_OR { - { - p.SetState(44) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLOGICAL_OR) localctx.(*ConditionalOrContext).s9 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) { -<<<<<<< HEAD p.SetState(47) var _x = p.ConditionalAnd() -======= - p.SetState(45) - - var _x = p.ConditionalAnd() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ConditionalOrContext)._conditionalAnd = _x } localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) -<<<<<<< HEAD p.SetState(52) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -1450,19 +900,6 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { _la = p.GetTokenStream().LA(1) } -======= - - p.SetState(50) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - } - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -1476,10 +913,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IConditionalAndContext is an interface to support dynamic dispatch. type IConditionalAndContext interface { antlr.ParserRuleContext @@ -1488,66 +921,34 @@ type IConditionalAndContext interface { GetParser() antlr.Parser // GetS8 returns the s8 token. -<<<<<<< HEAD GetS8() antlr.Token // SetS8 sets the s8 token. SetS8(antlr.Token) -======= - GetS8() antlr.Token - - - // SetS8 sets the s8 token. - SetS8(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetOps returns the ops token list. GetOps() []antlr.Token -<<<<<<< HEAD - // SetOps sets the ops token list. - SetOps([]antlr.Token) - -======= - // SetOps sets the ops token list. SetOps([]antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IRelationContext // Get_relation returns the _relation rule contexts. Get_relation() IRelationContext -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetE sets the e rule contexts. SetE(IRelationContext) // Set_relation sets the _relation rule contexts. Set_relation(IRelationContext) -<<<<<<< HEAD // GetE1 returns the e1 rule context list. GetE1() []IRelationContext // SetE1 sets the e1 rule context list. SetE1([]IRelationContext) -======= - - // GetE1 returns the e1 rule context list. - GetE1() []IRelationContext - - - // SetE1 sets the e1 rule context list. - SetE1([]IRelationContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllRelation() []IRelationContext @@ -1561,21 +962,12 @@ type IConditionalAndContext interface { type ConditionalAndContext struct { antlr.BaseParserRuleContext -<<<<<<< HEAD parser antlr.Parser e IRelationContext s8 antlr.Token ops []antlr.Token _relation IRelationContext e1 []IRelationContext -======= - parser antlr.Parser - e IRelationContext - s8 antlr.Token - ops []antlr.Token - _relation IRelationContext - e1 []IRelationContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyConditionalAndContext() *ConditionalAndContext { @@ -1585,11 +977,7 @@ func NewEmptyConditionalAndContext() *ConditionalAndContext { return p } -<<<<<<< HEAD func InitEmptyConditionalAndContext(p *ConditionalAndContext) { -======= -func InitEmptyConditionalAndContext(p *ConditionalAndContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalAnd } @@ -1611,51 +999,24 @@ func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } -<<<<<<< HEAD func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } -======= - -func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } - - -func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } - - -func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } -<<<<<<< HEAD -func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } - -func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } - -======= - func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } - func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) AllRelation() []IRelationContext { children := s.GetChildren() len := 0 @@ -1678,20 +1039,12 @@ func (s *ConditionalAndContext) AllRelation() []IRelationContext { } func (s *ConditionalAndContext) Relation(i int) IRelationContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -1721,10 +1074,6 @@ func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Rec return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalAnd(s) @@ -1747,12 +1096,6 @@ func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) @@ -1760,58 +1103,34 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(53) -======= - p.SetState(51) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _x = p.relation(0) localctx.(*ConditionalAndContext).e = _x } -<<<<<<< HEAD p.SetState(58) -======= - p.SetState(56) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD for _la == CELParserLOGICAL_AND { { p.SetState(54) -======= - - for _la == CELParserLOGICAL_AND { - { - p.SetState(52) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLOGICAL_AND) localctx.(*ConditionalAndContext).s8 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) { -<<<<<<< HEAD p.SetState(55) -======= - p.SetState(53) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _x = p.relation(0) @@ -1819,7 +1138,6 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { } localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) -<<<<<<< HEAD p.SetState(60) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -1828,19 +1146,6 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { _la = p.GetTokenStream().LA(1) } -======= - - p.SetState(58) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - } - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -1854,10 +1159,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IRelationContext is an interface to support dynamic dispatch. type IRelationContext interface { antlr.ParserRuleContext @@ -1866,19 +1167,10 @@ type IRelationContext interface { GetParser() antlr.Parser // GetOp returns the op token. -<<<<<<< HEAD GetOp() antlr.Token // SetOp sets the op token. SetOp(antlr.Token) -======= - GetOp() antlr.Token - - - // SetOp sets the op token. - SetOp(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures Calc() ICalcContext @@ -1899,11 +1191,7 @@ type IRelationContext interface { type RelationContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD op antlr.Token -======= - op antlr.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyRelationContext() *RelationContext { @@ -1913,11 +1201,7 @@ func NewEmptyRelationContext() *RelationContext { return p } -<<<<<<< HEAD func InitEmptyRelationContext(p *RelationContext) { -======= -func InitEmptyRelationContext(p *RelationContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_relation } @@ -1939,7 +1223,6 @@ func (s *RelationContext) GetParser() antlr.Parser { return s.parser } func (s *RelationContext) GetOp() antlr.Token { return s.op } -<<<<<<< HEAD func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } func (s *RelationContext) Calc() ICalcContext { @@ -1947,17 +1230,6 @@ func (s *RelationContext) Calc() ICalcContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { t = ctx.(antlr.RuleContext) -======= - -func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } - - -func (s *RelationContext) Calc() ICalcContext { - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICalcContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -1991,20 +1263,12 @@ func (s *RelationContext) AllRelation() []IRelationContext { } func (s *RelationContext) Relation(i int) IRelationContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -2054,10 +1318,6 @@ func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterRelation(s) @@ -2080,13 +1340,6 @@ func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Relation() (localctx IRelationContext) { return p.relation(0) } @@ -2106,20 +1359,12 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(62) -======= - p.SetState(60) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.calc(0) } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) -<<<<<<< HEAD p.SetState(69) -======= - p.SetState(67) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2136,22 +1381,14 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { _prevctx = localctx localctx = NewRelationContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation) -<<<<<<< HEAD p.SetState(64) -======= - p.SetState(62) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 1)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) goto errorExit } { -<<<<<<< HEAD p.SetState(65) -======= - p.SetState(63) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _lt = p.GetTokenStream().LT(1) @@ -2159,11 +1396,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&254) != 0) { -======= - if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*RelationContext).op = _ri @@ -2173,7 +1406,6 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { } } { -<<<<<<< HEAD p.SetState(66) p.relation(2) } @@ -2184,32 +1416,13 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { if p.HasError() { goto errorExit } -======= - p.SetState(64) - p.relation(2) - } - - - } - p.SetState(69) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } -<<<<<<< HEAD errorExit: -======= - - - errorExit: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.HasError() { v := p.GetError() localctx.SetException(v) @@ -2222,10 +1435,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ICalcContext is an interface to support dynamic dispatch. type ICalcContext interface { antlr.ParserRuleContext @@ -2234,19 +1443,10 @@ type ICalcContext interface { GetParser() antlr.Parser // GetOp returns the op token. -<<<<<<< HEAD GetOp() antlr.Token // SetOp sets the op token. SetOp(antlr.Token) -======= - GetOp() antlr.Token - - - // SetOp sets the op token. - SetOp(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures Unary() IUnaryContext @@ -2265,11 +1465,7 @@ type ICalcContext interface { type CalcContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD op antlr.Token -======= - op antlr.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyCalcContext() *CalcContext { @@ -2279,11 +1475,7 @@ func NewEmptyCalcContext() *CalcContext { return p } -<<<<<<< HEAD func InitEmptyCalcContext(p *CalcContext) { -======= -func InitEmptyCalcContext(p *CalcContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_calc } @@ -2305,7 +1497,6 @@ func (s *CalcContext) GetParser() antlr.Parser { return s.parser } func (s *CalcContext) GetOp() antlr.Token { return s.op } -<<<<<<< HEAD func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } func (s *CalcContext) Unary() IUnaryContext { @@ -2313,17 +1504,6 @@ func (s *CalcContext) Unary() IUnaryContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(IUnaryContext); ok { t = ctx.(antlr.RuleContext) -======= - -func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } - - -func (s *CalcContext) Unary() IUnaryContext { - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IUnaryContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2357,20 +1537,12 @@ func (s *CalcContext) AllCalc() []ICalcContext { } func (s *CalcContext) Calc(i int) ICalcContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -2412,10 +1584,6 @@ func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCalc(s) @@ -2438,13 +1606,6 @@ func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Calc() (localctx ICalcContext) { return p.calc(0) } @@ -2464,20 +1625,12 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(73) -======= - p.SetState(71) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.Unary() } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) -<<<<<<< HEAD p.SetState(83) -======= - p.SetState(81) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2492,11 +1645,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.TriggerExitRuleEvent() } _prevctx = localctx -<<<<<<< HEAD p.SetState(81) -======= - p.SetState(79) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -2506,22 +1655,14 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { case 1: localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) -<<<<<<< HEAD p.SetState(75) -======= - p.SetState(73) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 2)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) goto errorExit } { -<<<<<<< HEAD p.SetState(76) -======= - p.SetState(74) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _lt = p.GetTokenStream().LT(1) @@ -2529,11 +1670,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&58720256) != 0) { -======= - if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*CalcContext).op = _ri @@ -2543,7 +1680,6 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { } } { -<<<<<<< HEAD p.SetState(77) p.calc(3) } @@ -2552,28 +1688,13 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) p.SetState(78) -======= - p.SetState(75) - p.calc(3) - } - - - case 2: - localctx = NewCalcContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) - p.SetState(76) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 1)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) goto errorExit } { -<<<<<<< HEAD p.SetState(79) -======= - p.SetState(77) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _lt = p.GetTokenStream().LT(1) @@ -2591,11 +1712,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { } } { -<<<<<<< HEAD p.SetState(80) -======= - p.SetState(78) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.calc(2) } @@ -2604,32 +1721,18 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { } } -<<<<<<< HEAD p.SetState(85) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } -======= - p.SetState(83) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } -<<<<<<< HEAD errorExit: -======= - - - errorExit: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.HasError() { v := p.GetError() localctx.SetException(v) @@ -2642,10 +1745,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IUnaryContext is an interface to support dynamic dispatch. type IUnaryContext interface { antlr.ParserRuleContext @@ -2668,11 +1767,7 @@ func NewEmptyUnaryContext() *UnaryContext { return p } -<<<<<<< HEAD func InitEmptyUnaryContext(p *UnaryContext) { -======= -func InitEmptyUnaryContext(p *UnaryContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_unary } @@ -2704,12 +1799,6 @@ func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type LogicalNotContext struct { UnaryContext s19 antlr.Token @@ -2726,25 +1815,12 @@ func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Log return p } -<<<<<<< HEAD func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } -======= - -func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } - - -func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } - - -func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v } func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { @@ -2752,17 +1828,10 @@ func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { } func (s *LogicalNotContext) Member() IMemberContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2782,10 +1851,6 @@ func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode { return s.GetToken(CELParserEXCLAM, i) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterLogicalNot(s) @@ -2808,10 +1873,6 @@ func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type MemberExprContext struct { UnaryContext } @@ -2831,17 +1892,10 @@ func (s *MemberExprContext) GetRuleContext() antlr.RuleContext { } func (s *MemberExprContext) Member() IMemberContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2853,10 +1907,6 @@ func (s *MemberExprContext) Member() IMemberContext { return t.(IMemberContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberExpr(s) @@ -2879,10 +1929,6 @@ func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type NegateContext struct { UnaryContext s18 antlr.Token @@ -2899,25 +1945,12 @@ func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateC return p } -<<<<<<< HEAD -func (s *NegateContext) GetS18() antlr.Token { return s.s18 } - -func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } - -func (s *NegateContext) GetOps() []antlr.Token { return s.ops } - -======= - func (s *NegateContext) GetS18() antlr.Token { return s.s18 } - func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } - func (s *NegateContext) GetOps() []antlr.Token { return s.ops } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v } func (s *NegateContext) GetRuleContext() antlr.RuleContext { @@ -2925,17 +1958,10 @@ func (s *NegateContext) GetRuleContext() antlr.RuleContext { } func (s *NegateContext) Member() IMemberContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -2955,10 +1981,6 @@ func (s *NegateContext) MINUS(i int) antlr.TerminalNode { return s.GetToken(CELParserMINUS, i) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNegate(s) @@ -2981,11 +2003,6 @@ func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 12, CELParserRULE_unary) @@ -2993,11 +2010,7 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { var _alt int -<<<<<<< HEAD p.SetState(99) -======= - p.SetState(97) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -3008,7 +2021,6 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewMemberExprContext(p, localctx) p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(86) p.member(0) } @@ -3017,50 +2029,26 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewLogicalNotContext(p, localctx) p.EnterOuterAlt(localctx, 2) p.SetState(88) -======= - p.SetState(84) - p.member(0) - } - - - case 2: - localctx = NewLogicalNotContext(p, localctx) - p.EnterOuterAlt(localctx, 2) - p.SetState(86) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD for ok := true; ok; ok = _la == CELParserEXCLAM { { p.SetState(87) -======= - - for ok := true; ok; ok = _la == CELParserEXCLAM { - { - p.SetState(85) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserEXCLAM) localctx.(*LogicalNotContext).s19 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19) -<<<<<<< HEAD p.SetState(90) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -3077,26 +2065,6 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewNegateContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(94) -======= - - p.SetState(88) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(90) - p.member(0) - } - - - case 3: - localctx = NewNegateContext(p, localctx) - p.EnterOuterAlt(localctx, 3) - p.SetState(92) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -3105,7 +2073,6 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { switch _alt { case 1: -<<<<<<< HEAD { p.SetState(93) @@ -3118,34 +2085,13 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { } } localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) -======= - { - p.SetState(91) - - var _m = p.Match(CELParserMINUS) - - localctx.(*NegateContext).s18 = _m - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) goto errorExit } -<<<<<<< HEAD p.SetState(96) -======= - p.SetState(94) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext()) if p.HasError() { @@ -3153,11 +2099,7 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { } } { -<<<<<<< HEAD p.SetState(98) -======= - p.SetState(96) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.member(0) } @@ -3165,10 +2107,6 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { goto errorExit } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -3182,10 +2120,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IMemberContext is an interface to support dynamic dispatch. type IMemberContext interface { antlr.ParserRuleContext @@ -3208,11 +2142,7 @@ func NewEmptyMemberContext() *MemberContext { return p } -<<<<<<< HEAD func InitEmptyMemberContext(p *MemberContext) { -======= -func InitEmptyMemberContext(p *MemberContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_member } @@ -3244,25 +2174,12 @@ func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD type MemberCallContext struct { MemberContext op antlr.Token id antlr.Token open antlr.Token args IExprListContext -======= - - - - -type MemberCallContext struct { - MemberContext - op antlr.Token - id antlr.Token - open antlr.Token - args IExprListContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext { @@ -3275,35 +2192,20 @@ func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Mem return p } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) GetOp() antlr.Token { return s.op } func (s *MemberCallContext) GetId() antlr.Token { return s.id } func (s *MemberCallContext) GetOpen() antlr.Token { return s.open } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v } func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v } func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v } -<<<<<<< HEAD -func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } - -======= - func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v } func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { @@ -3311,17 +2213,10 @@ func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { } func (s *MemberCallContext) Member() IMemberContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3350,17 +2245,10 @@ func (s *MemberCallContext) LPAREN() antlr.TerminalNode { } func (s *MemberCallContext) ExprList() IExprListContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprListContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3372,10 +2260,6 @@ func (s *MemberCallContext) ExprList() IExprListContext { return t.(IExprListContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberCall(s) @@ -3398,20 +2282,11 @@ func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD type SelectContext struct { MemberContext op antlr.Token opt antlr.Token id IEscapeIdentContext -======= - -type SelectContext struct { - MemberContext - op antlr.Token - opt antlr.Token - id antlr.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext { @@ -3424,48 +2299,27 @@ func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectC return p } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) GetOp() antlr.Token { return s.op } func (s *SelectContext) GetOpt() antlr.Token { return s.opt } -<<<<<<< HEAD -======= -func (s *SelectContext) GetId() antlr.Token { return s.id } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) SetOp(v antlr.Token) { s.op = v } func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v } -<<<<<<< HEAD func (s *SelectContext) GetId() IEscapeIdentContext { return s.id } func (s *SelectContext) SetId(v IEscapeIdentContext) { s.id = v } -======= -func (s *SelectContext) SetId(v antlr.Token) { s.id = v } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) GetRuleContext() antlr.RuleContext { return s } func (s *SelectContext) Member() IMemberContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3481,7 +2335,6 @@ func (s *SelectContext) DOT() antlr.TerminalNode { return s.GetToken(CELParserDOT, 0) } -<<<<<<< HEAD func (s *SelectContext) EscapeIdent() IEscapeIdentContext { var t antlr.RuleContext for _, ctx := range s.GetChildren() { @@ -3496,20 +2349,12 @@ func (s *SelectContext) EscapeIdent() IEscapeIdentContext { } return t.(IEscapeIdentContext) -======= -func (s *SelectContext) IDENTIFIER() antlr.TerminalNode { - return s.GetToken(CELParserIDENTIFIER, 0) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterSelect(s) @@ -3532,10 +2377,6 @@ func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type PrimaryExprContext struct { MemberContext } @@ -3555,17 +2396,10 @@ func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext { } func (s *PrimaryExprContext) Primary() IPrimaryContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IPrimaryContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IPrimaryContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3577,10 +2411,6 @@ func (s *PrimaryExprContext) Primary() IPrimaryContext { return t.(IPrimaryContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterPrimaryExpr(s) @@ -3603,20 +2433,11 @@ func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } -<<<<<<< HEAD type IndexContext struct { MemberContext op antlr.Token opt antlr.Token index IExprContext -======= - -type IndexContext struct { - MemberContext - op antlr.Token - opt antlr.Token - index IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext { @@ -3629,31 +2450,16 @@ func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexCon return p } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) GetOp() antlr.Token { return s.op } func (s *IndexContext) GetOpt() antlr.Token { return s.opt } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) SetOp(v antlr.Token) { s.op = v } func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v } -<<<<<<< HEAD func (s *IndexContext) GetIndex() IExprContext { return s.index } -======= - -func (s *IndexContext) GetIndex() IExprContext { return s.index } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) SetIndex(v IExprContext) { s.index = v } func (s *IndexContext) GetRuleContext() antlr.RuleContext { @@ -3661,17 +2467,10 @@ func (s *IndexContext) GetRuleContext() antlr.RuleContext { } func (s *IndexContext) Member() IMemberContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3692,17 +2491,10 @@ func (s *IndexContext) LBRACKET() antlr.TerminalNode { } func (s *IndexContext) Expr() IExprContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -3718,10 +2510,6 @@ func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterIndex(s) @@ -3744,11 +2532,6 @@ func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Member() (localctx IMemberContext) { return p.member(0) } @@ -3772,20 +2555,12 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { _prevctx = localctx { -<<<<<<< HEAD p.SetState(102) -======= - p.SetState(100) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.Primary() } p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) -<<<<<<< HEAD p.SetState(128) -======= - p.SetState(126) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -3800,11 +2575,7 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { p.TriggerExitRuleEvent() } _prevctx = localctx -<<<<<<< HEAD p.SetState(126) -======= - p.SetState(124) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -3814,74 +2585,45 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { case 1: localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) -<<<<<<< HEAD p.SetState(104) -======= - p.SetState(102) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 3)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) goto errorExit } { -<<<<<<< HEAD p.SetState(105) -======= - p.SetState(103) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserDOT) localctx.(*SelectContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(107) -======= - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(105) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(106) -======= - - if _la == CELParserQUESTIONMARK { - { - p.SetState(104) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*SelectContext).opt = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(109) var _x = p.EscapeIdent() @@ -3893,122 +2635,62 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(110) -======= - p.SetState(107) - - var _m = p.Match(CELParserIDENTIFIER) - - localctx.(*SelectContext).id = _m - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - case 2: - localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) - p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) - p.SetState(108) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 2)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) goto errorExit } { -<<<<<<< HEAD p.SetState(111) -======= - p.SetState(109) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserDOT) localctx.(*MemberCallContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } { p.SetState(112) -======= - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(110) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserIDENTIFIER) localctx.(*MemberCallContext).id = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } { p.SetState(113) -======= - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(111) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLPAREN) localctx.(*MemberCallContext).open = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(115) -======= - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(113) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 { { p.SetState(114) var _x = p.ExprList() -======= - - if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { - { - p.SetState(112) - - var _x = p.ExprList() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MemberCallContext).args = _x } } { -<<<<<<< HEAD p.SetState(117) p.Match(CELParserRPAREN) if p.HasError() { @@ -4021,84 +2703,44 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(118) -======= - p.SetState(115) - p.Match(CELParserRPAREN) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - case 3: - localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) - p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) - p.SetState(116) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !(p.Precpred(p.GetParserRuleContext(), 1)) { p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) goto errorExit } { -<<<<<<< HEAD p.SetState(119) -======= - p.SetState(117) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACKET) localctx.(*IndexContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(121) -======= - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(119) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(120) -======= - - if _la == CELParserQUESTIONMARK { - { - p.SetState(118) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*IndexContext).opt = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(123) var _x = p.Expr() @@ -4111,21 +2753,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { if p.HasError() { // Recognition error - abort rule goto errorExit -======= - p.SetState(121) - - var _x = p.Expr() - - - localctx.(*IndexContext).index = _x - } - { - p.SetState(122) - p.Match(CELParserRPRACKET) - if p.HasError() { - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -4134,32 +2761,18 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { } } -<<<<<<< HEAD p.SetState(130) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } -======= - p.SetState(128) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } -<<<<<<< HEAD errorExit: -======= - - - errorExit: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.HasError() { v := p.GetError() localctx.SetException(v) @@ -4172,10 +2785,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IPrimaryContext is an interface to support dynamic dispatch. type IPrimaryContext interface { antlr.ParserRuleContext @@ -4198,11 +2807,7 @@ func NewEmptyPrimaryContext() *PrimaryContext { return p } -<<<<<<< HEAD func InitEmptyPrimaryContext(p *PrimaryContext) { -======= -func InitEmptyPrimaryContext(p *PrimaryContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_primary } @@ -4234,20 +2839,10 @@ func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD type CreateListContext struct { PrimaryContext op antlr.Token elems IListInitContext -======= - - - -type CreateListContext struct { - PrimaryContext - op antlr.Token - elems IListInitContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext { @@ -4260,25 +2855,12 @@ func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Cre return p } -<<<<<<< HEAD -func (s *CreateListContext) GetOp() antlr.Token { return s.op } - -func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } - -func (s *CreateListContext) GetElems() IListInitContext { return s.elems } - -======= - func (s *CreateListContext) GetOp() antlr.Token { return s.op } - func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } - func (s *CreateListContext) GetElems() IListInitContext { return s.elems } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v } func (s *CreateListContext) GetRuleContext() antlr.RuleContext { @@ -4298,17 +2880,10 @@ func (s *CreateListContext) COMMA() antlr.TerminalNode { } func (s *CreateListContext) ListInit() IListInitContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IListInitContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IListInitContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -4320,10 +2895,6 @@ func (s *CreateListContext) ListInit() IListInitContext { return t.(IListInitContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateList(s) @@ -4346,7 +2917,6 @@ func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD type IdentContext struct { PrimaryContext leadingDot antlr.Token @@ -4409,13 +2979,6 @@ type CreateStructContext struct { PrimaryContext op antlr.Token entries IMapInitializerListContext -======= - -type CreateStructContext struct { - PrimaryContext - op antlr.Token - entries IMapInitializerListContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext { @@ -4428,25 +2991,12 @@ func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *C return p } -<<<<<<< HEAD -func (s *CreateStructContext) GetOp() antlr.Token { return s.op } - -func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } - -func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } - -======= - func (s *CreateStructContext) GetOp() antlr.Token { return s.op } - func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } - func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v } func (s *CreateStructContext) GetRuleContext() antlr.RuleContext { @@ -4466,17 +3016,10 @@ func (s *CreateStructContext) COMMA() antlr.TerminalNode { } func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMapInitializerListContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IMapInitializerListContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -4488,10 +3031,6 @@ func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { return t.(IMapInitializerListContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateStruct(s) @@ -4514,10 +3053,6 @@ func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ConstantLiteralContext struct { PrimaryContext } @@ -4537,17 +3072,10 @@ func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext { } func (s *ConstantLiteralContext) Literal() ILiteralContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(ILiteralContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILiteralContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -4559,10 +3087,6 @@ func (s *ConstantLiteralContext) Literal() ILiteralContext { return t.(ILiteralContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConstantLiteral(s) @@ -4585,17 +3109,10 @@ func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interfac } } -<<<<<<< HEAD type NestedContext struct { PrimaryContext e IExprContext -======= - -type NestedContext struct { - PrimaryContext - e IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) -} +} func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { var p = new(NestedContext) @@ -4607,15 +3124,8 @@ func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedC return p } -<<<<<<< HEAD -func (s *NestedContext) GetE() IExprContext { return s.e } - -======= - func (s *NestedContext) GetE() IExprContext { return s.e } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NestedContext) SetE(v IExprContext) { s.e = v } func (s *NestedContext) GetRuleContext() antlr.RuleContext { @@ -4631,17 +3141,10 @@ func (s *NestedContext) RPAREN() antlr.TerminalNode { } func (s *NestedContext) Expr() IExprContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -4653,10 +3156,6 @@ func (s *NestedContext) Expr() IExprContext { return t.(IExprContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNested(s) @@ -4679,7 +3178,6 @@ func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD type CreateMessageContext struct { PrimaryContext leadingDot antlr.Token @@ -4689,18 +3187,6 @@ type CreateMessageContext struct { ops []antlr.Token op antlr.Token entries IFieldInitializerListContext -======= - -type CreateMessageContext struct { - PrimaryContext - leadingDot antlr.Token - _IDENTIFIER antlr.Token - ids []antlr.Token - s16 antlr.Token - ops []antlr.Token - op antlr.Token - entries IFieldInitializerListContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext { @@ -4713,10 +3199,6 @@ func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) * return p } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER } @@ -4725,10 +3207,6 @@ func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 } func (s *CreateMessageContext) GetOp() antlr.Token { return s.op } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v } @@ -4737,31 +3215,16 @@ func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v } func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids } func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v } func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v } -<<<<<<< HEAD func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } -======= - -func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v } func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext { @@ -4797,17 +3260,10 @@ func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode { } func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext { -<<<<<<< HEAD var t antlr.RuleContext for _, ctx := range s.GetChildren() { if _, ok := ctx.(IFieldInitializerListContext); ok { t = ctx.(antlr.RuleContext) -======= - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFieldInitializerListContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -4819,10 +3275,6 @@ func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListConte return t.(IFieldInitializerListContext) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateMessage(s) @@ -4845,7 +3297,6 @@ func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } -<<<<<<< HEAD type GlobalCallContext struct { PrimaryContext leadingDot antlr.Token @@ -4856,19 +3307,6 @@ type GlobalCallContext struct { func NewGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *GlobalCallContext { var p = new(GlobalCallContext) -======= - -type IdentOrGlobalCallContext struct { - PrimaryContext - leadingDot antlr.Token - id antlr.Token - op antlr.Token - args IExprListContext -} - -func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext { - var p = new(IdentOrGlobalCallContext) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser @@ -4877,7 +3315,6 @@ func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContex return p } -<<<<<<< HEAD func (s *GlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *GlobalCallContext) GetId() antlr.Token { return s.id } @@ -4919,53 +3356,6 @@ func (s *GlobalCallContext) ExprList() IExprListContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { t = ctx.(antlr.RuleContext) -======= - -func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } - -func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id } - -func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op } - - -func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } - -func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v } - -func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v } - - -func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args } - - -func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v } - -func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *IdentOrGlobalCallContext) IDENTIFIER() antlr.TerminalNode { - return s.GetToken(CELParserIDENTIFIER, 0) -} - -func (s *IdentOrGlobalCallContext) RPAREN() antlr.TerminalNode { - return s.GetToken(CELParserRPAREN, 0) -} - -func (s *IdentOrGlobalCallContext) DOT() antlr.TerminalNode { - return s.GetToken(CELParserDOT, 0) -} - -func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode { - return s.GetToken(CELParserLPAREN, 0) -} - -func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprListContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -4977,7 +3367,6 @@ func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { return t.(IExprListContext) } -<<<<<<< HEAD func (s *GlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterGlobalCall(s) @@ -4994,46 +3383,18 @@ func (s *GlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { switch t := visitor.(type) { case CELVisitor: return t.VisitGlobalCall(s) -======= - -func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.EnterIdentOrGlobalCall(s) - } -} - -func (s *IdentOrGlobalCallContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(CELListener); ok { - listenerT.ExitIdentOrGlobalCall(s) - } -} - -func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { - switch t := visitor.(type) { - case CELVisitor: - return t.VisitIdentOrGlobalCall(s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return t.VisitChildren(s) } } -<<<<<<< HEAD -======= - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) Primary() (localctx IPrimaryContext) { localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 16, CELParserRULE_primary) var _la int -<<<<<<< HEAD p.SetState(184) -======= - p.SetState(180) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -5041,22 +3402,15 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) { case 1: -<<<<<<< HEAD localctx = NewIdentContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(132) -======= - localctx = NewIdentOrGlobalCallContext(p, localctx) - p.EnterOuterAlt(localctx, 1) - p.SetState(130) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserDOT { { p.SetState(131) @@ -5067,25 +3421,11 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit -======= - - if _la == CELParserDOT { - { - p.SetState(129) - - var _m = p.Match(CELParserDOT) - - localctx.(*IdentOrGlobalCallContext).leadingDot = _m - if p.HasError() { - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(134) var _m = p.Match(CELParserIDENTIFIER) @@ -5202,158 +3542,39 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.EnterOuterAlt(localctx, 4) { p.SetState(148) -======= - p.SetState(132) - - var _m = p.Match(CELParserIDENTIFIER) - - localctx.(*IdentOrGlobalCallContext).id = _m - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(138) - p.GetErrorHandler().Sync(p) - - - if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { - { - p.SetState(133) - - var _m = p.Match(CELParserLPAREN) - - localctx.(*IdentOrGlobalCallContext).op = _m - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(135) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - - - if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { - { - p.SetState(134) - - var _x = p.ExprList() - - - localctx.(*IdentOrGlobalCallContext).args = _x - } - - } - { - p.SetState(137) - p.Match(CELParserRPAREN) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - } else if p.HasError() { // JIM - goto errorExit - } - - - case 2: - localctx = NewNestedContext(p, localctx) - p.EnterOuterAlt(localctx, 2) - { - p.SetState(140) - p.Match(CELParserLPAREN) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(141) - - var _x = p.Expr() - - - localctx.(*NestedContext).e = _x - } - { - p.SetState(142) - p.Match(CELParserRPAREN) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - case 3: - localctx = NewCreateListContext(p, localctx) - p.EnterOuterAlt(localctx, 3) - { - p.SetState(144) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACKET) localctx.(*CreateListContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(150) -======= - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(146) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { { p.SetState(149) var _x = p.ListInit() -======= - - if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { - { - p.SetState(145) - - var _x = p.ListInit() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*CreateListContext).elems = _x } } -<<<<<<< HEAD p.SetState(153) -======= - p.SetState(149) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserCOMMA { { p.SetState(152) @@ -5361,22 +3582,11 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit -======= - - if _la == CELParserCOMMA { - { - p.SetState(148) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(155) p.Match(CELParserRPRACKET) if p.HasError() { @@ -5390,102 +3600,32 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.EnterOuterAlt(localctx, 5) { p.SetState(156) -======= - p.SetState(151) - p.Match(CELParserRPRACKET) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - case 4: - localctx = NewCreateStructContext(p, localctx) - p.EnterOuterAlt(localctx, 4) - { - p.SetState(152) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACE) localctx.(*CreateStructContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(158) -======= - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(154) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { { p.SetState(157) var _x = p.MapInitializerList() -======= - - if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { - { - p.SetState(153) - - var _x = p.MapInitializerList() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*CreateStructContext).entries = _x } } -<<<<<<< HEAD -======= - p.SetState(157) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - - - if _la == CELParserCOMMA { - { - p.SetState(156) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - } - { - p.SetState(159) - p.Match(CELParserRBRACE) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - case 5: - localctx = NewCreateMessageContext(p, localctx) - p.EnterOuterAlt(localctx, 5) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.SetState(161) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -5493,7 +3633,6 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserCOMMA { { p.SetState(160) @@ -5501,26 +3640,12 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit -======= - - if _la == CELParserDOT { - { - p.SetState(160) - - var _m = p.Match(CELParserDOT) - - localctx.(*CreateMessageContext).leadingDot = _m - if p.HasError() { - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { p.SetState(163) -<<<<<<< HEAD p.Match(CELParserRBRACE) if p.HasError() { // Recognition error - abort rule @@ -5554,82 +3679,49 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { } { p.SetState(167) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) p.SetState(172) -======= - // Recognition error - abort rule - goto errorExit - } - } - localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) - p.SetState(168) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD for _la == CELParserDOT { { p.SetState(168) -======= - - for _la == CELParserDOT { - { - p.SetState(164) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserDOT) localctx.(*CreateMessageContext).s16 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16) { -<<<<<<< HEAD p.SetState(169) -======= - p.SetState(165) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) -<<<<<<< HEAD p.SetState(174) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -5639,75 +3731,39 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { } { p.SetState(175) -======= - - p.SetState(170) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(171) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserLBRACE) localctx.(*CreateMessageContext).op = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } p.SetState(177) -======= - // Recognition error - abort rule - goto errorExit - } - } - p.SetState(173) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&206159478784) != 0 { { p.SetState(176) var _x = p.FieldInitializerList() -======= - - if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER { - { - p.SetState(172) - - var _x = p.FieldInitializerList() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*CreateMessageContext).entries = _x } } -<<<<<<< HEAD p.SetState(180) -======= - p.SetState(176) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserCOMMA { { p.SetState(179) @@ -5715,22 +3771,11 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { if p.HasError() { // Recognition error - abort rule goto errorExit -======= - - if _la == CELParserCOMMA { - { - p.SetState(175) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(182) p.Match(CELParserRBRACE) if p.HasError() { @@ -5744,22 +3789,6 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.EnterOuterAlt(localctx, 7) { p.SetState(183) -======= - p.SetState(178) - p.Match(CELParserRBRACE) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - case 6: - localctx = NewConstantLiteralContext(p, localctx) - p.EnterOuterAlt(localctx, 6) - { - p.SetState(179) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.Literal() } @@ -5767,10 +3796,6 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { goto errorExit } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -5784,10 +3809,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IExprListContext is an interface to support dynamic dispatch. type IExprListContext interface { antlr.ParserRuleContext @@ -5798,7 +3819,6 @@ type IExprListContext interface { // Get_expr returns the _expr rule contexts. Get_expr() IExprContext -<<<<<<< HEAD // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) @@ -5807,20 +3827,6 @@ type IExprListContext interface { // SetE sets the e rule context list. SetE([]IExprContext) -======= - - // Set_expr sets the _expr rule contexts. - Set_expr(IExprContext) - - - // GetE returns the e rule context list. - GetE() []IExprContext - - - // SetE sets the e rule context list. - SetE([]IExprContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllExpr() []IExprContext @@ -5835,13 +3841,8 @@ type IExprListContext interface { type ExprListContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD _expr IExprContext e []IExprContext -======= - _expr IExprContext - e []IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyExprListContext() *ExprListContext { @@ -5851,11 +3852,7 @@ func NewEmptyExprListContext() *ExprListContext { return p } -<<<<<<< HEAD func InitEmptyExprListContext(p *ExprListContext) { -======= -func InitEmptyExprListContext(p *ExprListContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_exprList } @@ -5877,25 +3874,12 @@ func (s *ExprListContext) GetParser() antlr.Parser { return s.parser } func (s *ExprListContext) Get_expr() IExprContext { return s._expr } -<<<<<<< HEAD -func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } - -func (s *ExprListContext) GetE() []IExprContext { return s.e } - -func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } - -======= - func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } - func (s *ExprListContext) GetE() []IExprContext { return s.e } - func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprListContext) AllExpr() []IExprContext { children := s.GetChildren() len := 0 @@ -5918,20 +3902,12 @@ func (s *ExprListContext) AllExpr() []IExprContext { } func (s *ExprListContext) Expr(i int) IExprContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -5961,10 +3937,6 @@ func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExprList(s) @@ -5987,12 +3959,6 @@ func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ExprList() (localctx IExprListContext) { localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 18, CELParserRULE_exprList) @@ -6000,7 +3966,6 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(186) var _x = p.Expr() @@ -6009,24 +3974,12 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) p.SetState(191) -======= - p.SetState(182) - - var _x = p.Expr() - - - localctx.(*ExprListContext)._expr = _x - } - localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) - p.SetState(187) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD for _la == CELParserCOMMA { { p.SetState(187) @@ -6041,29 +3994,10 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { var _x = p.Expr() -======= - - for _la == CELParserCOMMA { - { - p.SetState(183) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(184) - - var _x = p.Expr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ExprListContext)._expr = _x } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) -<<<<<<< HEAD p.SetState(193) p.GetErrorHandler().Sync(p) if p.HasError() { @@ -6072,19 +4006,6 @@ func (p *CELParser) ExprList() (localctx IExprListContext) { _la = p.GetTokenStream().LA(1) } -======= - - p.SetState(189) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } - _la = p.GetTokenStream().LA(1) - } - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -6098,10 +4019,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IListInitContext is an interface to support dynamic dispatch. type IListInitContext interface { antlr.ParserRuleContext @@ -6112,7 +4029,6 @@ type IListInitContext interface { // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext -<<<<<<< HEAD // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) @@ -6121,20 +4037,6 @@ type IListInitContext interface { // SetElems sets the elems rule context list. SetElems([]IOptExprContext) -======= - - // Set_optExpr sets the _optExpr rule contexts. - Set_optExpr(IOptExprContext) - - - // GetElems returns the elems rule context list. - GetElems() []IOptExprContext - - - // SetElems sets the elems rule context list. - SetElems([]IOptExprContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllOptExpr() []IOptExprContext @@ -6148,15 +4050,9 @@ type IListInitContext interface { type ListInitContext struct { antlr.BaseParserRuleContext -<<<<<<< HEAD parser antlr.Parser _optExpr IOptExprContext elems []IOptExprContext -======= - parser antlr.Parser - _optExpr IOptExprContext - elems []IOptExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyListInitContext() *ListInitContext { @@ -6166,11 +4062,7 @@ func NewEmptyListInitContext() *ListInitContext { return p } -<<<<<<< HEAD func InitEmptyListInitContext(p *ListInitContext) { -======= -func InitEmptyListInitContext(p *ListInitContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_listInit } @@ -6192,25 +4084,12 @@ func (s *ListInitContext) GetParser() antlr.Parser { return s.parser } func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr } -<<<<<<< HEAD -func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } - -func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } - -func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } - -======= - func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } - func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } - func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ListInitContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -6233,20 +4112,12 @@ func (s *ListInitContext) AllOptExpr() []IOptExprContext { } func (s *ListInitContext) OptExpr(i int) IOptExprContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -6276,10 +4147,6 @@ func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterListInit(s) @@ -6302,12 +4169,6 @@ func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) ListInit() (localctx IListInitContext) { localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 20, CELParserRULE_listInit) @@ -6315,7 +4176,6 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(194) var _x = p.OptExpr() @@ -6324,17 +4184,6 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) p.SetState(199) -======= - p.SetState(190) - - var _x = p.OptExpr() - - - localctx.(*ListInitContext)._optExpr = _x - } - localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) - p.SetState(195) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -6346,7 +4195,6 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { -<<<<<<< HEAD p.SetState(195) p.Match(CELParserCOMMA) if p.HasError() { @@ -6359,52 +4207,22 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { var _x = p.OptExpr() -======= - p.SetState(191) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(192) - - var _x = p.OptExpr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*ListInitContext)._optExpr = _x } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) -<<<<<<< HEAD } p.SetState(201) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } -======= - - } - p.SetState(197) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } -<<<<<<< HEAD -======= - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -6418,10 +4236,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IFieldInitializerListContext is an interface to support dynamic dispatch. type IFieldInitializerListContext interface { antlr.ParserRuleContext @@ -6430,75 +4244,40 @@ type IFieldInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. -<<<<<<< HEAD GetS21() antlr.Token // SetS21 sets the s21 token. SetS21(antlr.Token) -======= - GetS21() antlr.Token - - - // SetS21 sets the s21 token. - SetS21(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetCols returns the cols token list. GetCols() []antlr.Token -<<<<<<< HEAD - // SetCols sets the cols token list. - SetCols([]antlr.Token) - -======= - // SetCols sets the cols token list. SetCols([]antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Get_optField returns the _optField rule contexts. Get_optField() IOptFieldContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set_optField sets the _optField rule contexts. Set_optField(IOptFieldContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetFields returns the fields rule context list. GetFields() []IOptFieldContext // GetValues returns the values rule context list. GetValues() []IExprContext -<<<<<<< HEAD // SetFields sets the fields rule context list. SetFields([]IOptFieldContext) // SetValues sets the values rule context list. SetValues([]IExprContext) -======= - - // SetFields sets the fields rule context list. - SetFields([]IOptFieldContext) - - // SetValues sets the values rule context list. - SetValues([]IExprContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllOptField() []IOptFieldContext @@ -6516,7 +4295,6 @@ type IFieldInitializerListContext interface { type FieldInitializerListContext struct { antlr.BaseParserRuleContext -<<<<<<< HEAD parser antlr.Parser _optField IOptFieldContext fields []IOptFieldContext @@ -6524,15 +4302,6 @@ type FieldInitializerListContext struct { cols []antlr.Token _expr IExprContext values []IExprContext -======= - parser antlr.Parser - _optField IOptFieldContext - fields []IOptFieldContext - s21 antlr.Token - cols []antlr.Token - _expr IExprContext - values []IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { @@ -6542,11 +4311,7 @@ func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { return p } -<<<<<<< HEAD func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { -======= -func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_fieldInitializerList } @@ -6568,57 +4333,28 @@ func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 } -<<<<<<< HEAD -func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } - -func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } - -func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } - -======= - func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } - func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } - func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField } func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v } func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields } func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v } func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { children := s.GetChildren() len := 0 @@ -6641,20 +4377,12 @@ func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { } func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptFieldContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -6698,20 +4426,12 @@ func (s *FieldInitializerListContext) AllExpr() []IExprContext { } func (s *FieldInitializerListContext) Expr(i int) IExprContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -6741,10 +4461,6 @@ func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog ant return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterFieldInitializerList(s) @@ -6767,12 +4483,6 @@ func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) int } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) @@ -6780,44 +4490,26 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(202) var _x = p.OptField() -======= - p.SetState(198) - - var _x = p.OptField() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) { -<<<<<<< HEAD p.SetState(203) -======= - p.SetState(199) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { -<<<<<<< HEAD p.SetState(204) var _x = p.Expr() @@ -6826,17 +4518,6 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) p.SetState(212) -======= - p.SetState(200) - - var _x = p.Expr() - - - localctx.(*FieldInitializerListContext)._expr = _x - } - localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) - p.SetState(208) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -6848,7 +4529,6 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { -<<<<<<< HEAD p.SetState(205) p.Match(CELParserCOMMA) if p.HasError() { @@ -6861,89 +4541,42 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.OptField() -======= - p.SetState(201) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(202) - - var _x = p.OptField() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) { -<<<<<<< HEAD p.SetState(207) -======= - p.SetState(203) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { -<<<<<<< HEAD p.SetState(208) var _x = p.Expr() -======= - p.SetState(204) - - var _x = p.Expr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*FieldInitializerListContext)._expr = _x } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) -<<<<<<< HEAD } p.SetState(214) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } -======= - - } - p.SetState(210) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } -<<<<<<< HEAD -======= - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -6957,10 +4590,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IOptFieldContext is an interface to support dynamic dispatch. type IOptFieldContext interface { antlr.ParserRuleContext @@ -6969,7 +4598,6 @@ type IOptFieldContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. -<<<<<<< HEAD GetOpt() antlr.Token // SetOpt sets the opt token. @@ -6977,17 +4605,6 @@ type IOptFieldContext interface { // Getter signatures EscapeIdent() IEscapeIdentContext -======= - GetOpt() antlr.Token - - - // SetOpt sets the opt token. - SetOpt(antlr.Token) - - - // Getter signatures - IDENTIFIER() antlr.TerminalNode ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) QUESTIONMARK() antlr.TerminalNode // IsOptFieldContext differentiates from other interfaces. @@ -6997,11 +4614,7 @@ type IOptFieldContext interface { type OptFieldContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD opt antlr.Token -======= - opt antlr.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyOptFieldContext() *OptFieldContext { @@ -7011,11 +4624,7 @@ func NewEmptyOptFieldContext() *OptFieldContext { return p } -<<<<<<< HEAD func InitEmptyOptFieldContext(p *OptFieldContext) { -======= -func InitEmptyOptFieldContext(p *OptFieldContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optField } @@ -7037,7 +4646,6 @@ func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser } func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt } -<<<<<<< HEAD func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } func (s *OptFieldContext) EscapeIdent() IEscapeIdentContext { @@ -7054,14 +4662,6 @@ func (s *OptFieldContext) EscapeIdent() IEscapeIdentContext { } return t.(IEscapeIdentContext) -======= - -func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } - - -func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode { - return s.GetToken(CELParserIDENTIFIER, 0) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *OptFieldContext) QUESTIONMARK() antlr.TerminalNode { @@ -7076,10 +4676,6 @@ func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptField(s) @@ -7102,73 +4698,38 @@ func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) OptField() (localctx IOptFieldContext) { localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 24, CELParserRULE_optField) var _la int p.EnterOuterAlt(localctx, 1) -<<<<<<< HEAD p.SetState(216) -======= - p.SetState(212) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(215) -======= - - if _la == CELParserQUESTIONMARK { - { - p.SetState(211) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptFieldContext).opt = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(218) p.EscapeIdent() } -======= - p.SetState(214) - p.Match(CELParserIDENTIFIER) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -7182,10 +4743,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IMapInitializerListContext is an interface to support dynamic dispatch. type IMapInitializerListContext interface { antlr.ParserRuleContext @@ -7194,75 +4751,40 @@ type IMapInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. -<<<<<<< HEAD GetS21() antlr.Token // SetS21 sets the s21 token. SetS21(antlr.Token) -======= - GetS21() antlr.Token - - - // SetS21 sets the s21 token. - SetS21(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - - // GetCols returns the cols token list. - GetCols() []antlr.Token - -<<<<<<< HEAD - // SetCols sets the cols token list. - SetCols([]antlr.Token) -======= + // GetCols returns the cols token list. + GetCols() []antlr.Token // SetCols sets the cols token list. SetCols([]antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetKeys returns the keys rule context list. GetKeys() []IOptExprContext // GetValues returns the values rule context list. GetValues() []IExprContext -<<<<<<< HEAD // SetKeys sets the keys rule context list. SetKeys([]IOptExprContext) // SetValues sets the values rule context list. SetValues([]IExprContext) -======= - - // SetKeys sets the keys rule context list. - SetKeys([]IOptExprContext) - - // SetValues sets the values rule context list. - SetValues([]IExprContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures AllOptExpr() []IOptExprContext @@ -7280,7 +4802,6 @@ type IMapInitializerListContext interface { type MapInitializerListContext struct { antlr.BaseParserRuleContext -<<<<<<< HEAD parser antlr.Parser _optExpr IOptExprContext keys []IOptExprContext @@ -7288,15 +4809,6 @@ type MapInitializerListContext struct { cols []antlr.Token _expr IExprContext values []IExprContext -======= - parser antlr.Parser - _optExpr IOptExprContext - keys []IOptExprContext - s21 antlr.Token - cols []antlr.Token - _expr IExprContext - values []IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyMapInitializerListContext() *MapInitializerListContext { @@ -7306,11 +4818,7 @@ func NewEmptyMapInitializerListContext() *MapInitializerListContext { return p } -<<<<<<< HEAD func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { -======= -func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_mapInitializerList } @@ -7332,57 +4840,28 @@ func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser } func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 } -<<<<<<< HEAD -func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } - -func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } - -func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } - -======= - func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } - func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } - func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr } func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys } func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v } func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -7405,20 +4884,12 @@ func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { } func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -7462,20 +4933,12 @@ func (s *MapInitializerListContext) AllExpr() []IExprContext { } func (s *MapInitializerListContext) Expr(i int) IExprContext { -<<<<<<< HEAD var t antlr.RuleContext -======= - var t antlr.RuleContext; ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { -<<<<<<< HEAD t = ctx.(antlr.RuleContext) -======= - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } j++ @@ -7505,10 +4968,6 @@ func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMapInitializerList(s) @@ -7531,12 +4990,6 @@ func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) inter } } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) @@ -7544,44 +4997,26 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { p.EnterOuterAlt(localctx, 1) { -<<<<<<< HEAD p.SetState(220) var _x = p.OptExpr() -======= - p.SetState(216) - - var _x = p.OptExpr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) { -<<<<<<< HEAD p.SetState(221) -======= - p.SetState(217) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { -<<<<<<< HEAD p.SetState(222) var _x = p.Expr() @@ -7590,17 +5025,6 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) p.SetState(230) -======= - p.SetState(218) - - var _x = p.Expr() - - - localctx.(*MapInitializerListContext)._expr = _x - } - localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) - p.SetState(226) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit @@ -7612,7 +5036,6 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { -<<<<<<< HEAD p.SetState(223) p.Match(CELParserCOMMA) if p.HasError() { @@ -7625,85 +5048,42 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.OptExpr() -======= - p.SetState(219) - p.Match(CELParserCOMMA) - if p.HasError() { - // Recognition error - abort rule - goto errorExit - } - } - { - p.SetState(220) - - var _x = p.OptExpr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) { -<<<<<<< HEAD p.SetState(225) -======= - p.SetState(221) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { -<<<<<<< HEAD p.SetState(226) var _x = p.Expr() -======= - p.SetState(222) - - var _x = p.Expr() - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) localctx.(*MapInitializerListContext)._expr = _x } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) -<<<<<<< HEAD } p.SetState(232) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } -======= - - } - p.SetState(228) - p.GetErrorHandler().Sync(p) - if p.HasError() { - goto errorExit - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) if p.HasError() { goto errorExit } } -<<<<<<< HEAD errorExit: if p.HasError() { v := p.GetError() @@ -7913,9 +5293,6 @@ func (p *CELParser) EscapeIdent() (localctx IEscapeIdentContext) { p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) goto errorExit } -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { @@ -7930,10 +5307,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IOptExprContext is an interface to support dynamic dispatch. type IOptExprContext interface { antlr.ParserRuleContext @@ -7942,34 +5315,17 @@ type IOptExprContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. -<<<<<<< HEAD GetOpt() antlr.Token // SetOpt sets the opt token. SetOpt(antlr.Token) -======= - GetOpt() antlr.Token - - - // SetOpt sets the opt token. - SetOpt(antlr.Token) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetE returns the e rule contexts. GetE() IExprContext -<<<<<<< HEAD - // SetE sets the e rule contexts. - SetE(IExprContext) - -======= - // SetE sets the e rule contexts. SetE(IExprContext) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Getter signatures Expr() IExprContext QUESTIONMARK() antlr.TerminalNode @@ -7981,13 +5337,8 @@ type IOptExprContext interface { type OptExprContext struct { antlr.BaseParserRuleContext parser antlr.Parser -<<<<<<< HEAD opt antlr.Token e IExprContext -======= - opt antlr.Token - e IExprContext ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewEmptyOptExprContext() *OptExprContext { @@ -7997,11 +5348,7 @@ func NewEmptyOptExprContext() *OptExprContext { return p } -<<<<<<< HEAD func InitEmptyOptExprContext(p *OptExprContext) { -======= -func InitEmptyOptExprContext(p *OptExprContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optExpr } @@ -8023,7 +5370,6 @@ func (s *OptExprContext) GetParser() antlr.Parser { return s.parser } func (s *OptExprContext) GetOpt() antlr.Token { return s.opt } -<<<<<<< HEAD func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } func (s *OptExprContext) GetE() IExprContext { return s.e } @@ -8035,23 +5381,6 @@ func (s *OptExprContext) Expr() IExprContext { for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { t = ctx.(antlr.RuleContext) -======= - -func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } - - -func (s *OptExprContext) GetE() IExprContext { return s.e } - - -func (s *OptExprContext) SetE(v IExprContext) { s.e = v } - - -func (s *OptExprContext) Expr() IExprContext { - var t antlr.RuleContext; - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext); ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } } @@ -8075,10 +5404,6 @@ func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptExpr(s) @@ -8101,7 +5426,6 @@ func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD func (p *CELParser) OptExpr() (localctx IOptExprContext) { localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 30, CELParserRULE_optExpr) @@ -8109,52 +5433,27 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { p.EnterOuterAlt(localctx, 1) p.SetState(238) -======= - - - -func (p *CELParser) OptExpr() (localctx IOptExprContext) { - localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 28, CELParserRULE_optExpr) - var _la int - - p.EnterOuterAlt(localctx, 1) - p.SetState(230) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserQUESTIONMARK { { p.SetState(237) -======= - - if _la == CELParserQUESTIONMARK { - { - p.SetState(229) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptExprContext).opt = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(240) var _x = p.Expr() @@ -8162,18 +5461,6 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { localctx.(*OptExprContext).e = _x } -======= - p.SetState(232) - - var _x = p.Expr() - - - localctx.(*OptExprContext).e = _x - } - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -8187,10 +5474,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ILiteralContext is an interface to support dynamic dispatch. type ILiteralContext interface { antlr.ParserRuleContext @@ -8213,11 +5496,7 @@ func NewEmptyLiteralContext() *LiteralContext { return p } -<<<<<<< HEAD func InitEmptyLiteralContext(p *LiteralContext) { -======= -func InitEmptyLiteralContext(p *LiteralContext) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_literal } @@ -8249,12 +5528,6 @@ func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } -<<<<<<< HEAD -======= - - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BytesContext struct { LiteralContext tok antlr.Token @@ -8270,15 +5543,8 @@ func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesCon return p } -<<<<<<< HEAD -func (s *BytesContext) GetTok() antlr.Token { return s.tok } - -======= - func (s *BytesContext) GetTok() antlr.Token { return s.tok } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v } func (s *BytesContext) GetRuleContext() antlr.RuleContext { @@ -8289,10 +5555,6 @@ func (s *BytesContext) BYTES() antlr.TerminalNode { return s.GetToken(CELParserBYTES, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBytes(s) @@ -8315,10 +5577,6 @@ func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type UintContext struct { LiteralContext tok antlr.Token @@ -8334,15 +5592,8 @@ func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintConte return p } -<<<<<<< HEAD func (s *UintContext) GetTok() antlr.Token { return s.tok } -======= - -func (s *UintContext) GetTok() antlr.Token { return s.tok } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *UintContext) SetTok(v antlr.Token) { s.tok = v } func (s *UintContext) GetRuleContext() antlr.RuleContext { @@ -8353,10 +5604,6 @@ func (s *UintContext) NUM_UINT() antlr.TerminalNode { return s.GetToken(CELParserNUM_UINT, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterUint(s) @@ -8379,10 +5626,6 @@ func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type NullContext struct { LiteralContext tok antlr.Token @@ -8398,15 +5641,8 @@ func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullConte return p } -<<<<<<< HEAD -func (s *NullContext) GetTok() antlr.Token { return s.tok } - -======= - func (s *NullContext) GetTok() antlr.Token { return s.tok } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NullContext) SetTok(v antlr.Token) { s.tok = v } func (s *NullContext) GetRuleContext() antlr.RuleContext { @@ -8417,10 +5653,6 @@ func (s *NullContext) NUL() antlr.TerminalNode { return s.GetToken(CELParserNUL, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNull(s) @@ -8443,10 +5675,6 @@ func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BoolFalseContext struct { LiteralContext tok antlr.Token @@ -8462,15 +5690,8 @@ func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *Bool return p } -<<<<<<< HEAD func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } -======= - -func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext { @@ -8481,10 +5702,6 @@ func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode { return s.GetToken(CELParserCEL_FALSE, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolFalse(s) @@ -8507,10 +5724,6 @@ func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type StringContext struct { LiteralContext tok antlr.Token @@ -8526,15 +5739,8 @@ func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringC return p } -<<<<<<< HEAD -func (s *StringContext) GetTok() antlr.Token { return s.tok } - -======= - func (s *StringContext) GetTok() antlr.Token { return s.tok } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StringContext) SetTok(v antlr.Token) { s.tok = v } func (s *StringContext) GetRuleContext() antlr.RuleContext { @@ -8545,10 +5751,6 @@ func (s *StringContext) STRING() antlr.TerminalNode { return s.GetToken(CELParserSTRING, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterString(s) @@ -8571,18 +5773,10 @@ func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD type DoubleContext struct { LiteralContext sign antlr.Token tok antlr.Token -======= - -type DoubleContext struct { - LiteralContext - sign antlr.Token - tok antlr.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext { @@ -8595,18 +5789,10 @@ func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleC return p } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *DoubleContext) GetSign() antlr.Token { return s.sign } func (s *DoubleContext) GetTok() antlr.Token { return s.tok } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v } func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v } @@ -8623,10 +5809,6 @@ func (s *DoubleContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterDouble(s) @@ -8649,10 +5831,6 @@ func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type BoolTrueContext struct { LiteralContext tok antlr.Token @@ -8668,15 +5846,8 @@ func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolT return p } -<<<<<<< HEAD func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } -======= - -func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext { @@ -8687,10 +5858,6 @@ func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode { return s.GetToken(CELParserCEL_TRUE, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolTrue(s) @@ -8713,18 +5880,10 @@ func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD type IntContext struct { LiteralContext sign antlr.Token tok antlr.Token -======= - -type IntContext struct { - LiteralContext - sign antlr.Token - tok antlr.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext { @@ -8737,18 +5896,10 @@ func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext return p } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IntContext) GetSign() antlr.Token { return s.sign } func (s *IntContext) GetTok() antlr.Token { return s.tok } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IntContext) SetSign(v antlr.Token) { s.sign = v } func (s *IntContext) SetTok(v antlr.Token) { s.tok = v } @@ -8765,10 +5916,6 @@ func (s *IntContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterInt(s) @@ -8791,113 +5938,64 @@ func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -<<<<<<< HEAD func (p *CELParser) Literal() (localctx ILiteralContext) { localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 32, CELParserRULE_literal) var _la int p.SetState(256) -======= - - -func (p *CELParser) Literal() (localctx ILiteralContext) { - localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 30, CELParserRULE_literal) - var _la int - - p.SetState(248) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } -<<<<<<< HEAD switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 35, p.GetParserRuleContext()) { case 1: localctx = NewIntContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(243) -======= - switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) { - case 1: - localctx = NewIntContext(p, localctx) - p.EnterOuterAlt(localctx, 1) - p.SetState(235) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserMINUS { { p.SetState(242) -======= - - if _la == CELParserMINUS { - { - p.SetState(234) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserMINUS) localctx.(*IntContext).sign = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(245) -======= - p.SetState(237) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUM_INT) localctx.(*IntContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } -======= - // Recognition error - abort rule - goto errorExit - } - } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 2: localctx = NewUintContext(p, localctx) p.EnterOuterAlt(localctx, 2) { -<<<<<<< HEAD p.SetState(246) -======= - p.SetState(238) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUM_UINT) localctx.(*UintContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } @@ -8907,208 +6005,110 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { localctx = NewDoubleContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(248) -======= - // Recognition error - abort rule - goto errorExit - } - } - - - case 3: - localctx = NewDoubleContext(p, localctx) - p.EnterOuterAlt(localctx, 3) - p.SetState(240) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.GetErrorHandler().Sync(p) if p.HasError() { goto errorExit } _la = p.GetTokenStream().LA(1) -<<<<<<< HEAD if _la == CELParserMINUS { { p.SetState(247) -======= - - if _la == CELParserMINUS { - { - p.SetState(239) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserMINUS) localctx.(*DoubleContext).sign = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } { -<<<<<<< HEAD p.SetState(250) -======= - p.SetState(242) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUM_FLOAT) localctx.(*DoubleContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } -======= - // Recognition error - abort rule - goto errorExit - } - } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 4: localctx = NewStringContext(p, localctx) p.EnterOuterAlt(localctx, 4) { -<<<<<<< HEAD p.SetState(251) -======= - p.SetState(243) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserSTRING) localctx.(*StringContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } -======= - // Recognition error - abort rule - goto errorExit - } - } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 5: localctx = NewBytesContext(p, localctx) p.EnterOuterAlt(localctx, 5) { -<<<<<<< HEAD p.SetState(252) -======= - p.SetState(244) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserBYTES) localctx.(*BytesContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } -======= - // Recognition error - abort rule - goto errorExit - } - } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 6: localctx = NewBoolTrueContext(p, localctx) p.EnterOuterAlt(localctx, 6) { -<<<<<<< HEAD p.SetState(253) -======= - p.SetState(245) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCEL_TRUE) localctx.(*BoolTrueContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } -======= - // Recognition error - abort rule - goto errorExit - } - } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 7: localctx = NewBoolFalseContext(p, localctx) p.EnterOuterAlt(localctx, 7) { -<<<<<<< HEAD p.SetState(254) -======= - p.SetState(246) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserCEL_FALSE) localctx.(*BoolFalseContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit } } -======= - // Recognition error - abort rule - goto errorExit - } - } - - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case 8: localctx = NewNullContext(p, localctx) p.EnterOuterAlt(localctx, 8) { -<<<<<<< HEAD p.SetState(255) -======= - p.SetState(247) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _m = p.Match(CELParserNUL) localctx.(*NullContext).tok = _m if p.HasError() { -<<<<<<< HEAD // Recognition error - abort rule goto errorExit -======= - // Recognition error - abort rule - goto errorExit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -9116,10 +6116,6 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { goto errorExit } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) errorExit: if p.HasError() { v := p.GetError() @@ -9133,7 +6129,6 @@ errorExit: goto errorExit // Trick to prevent compiler error if the label is not used } -<<<<<<< HEAD func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { switch ruleIndex { case 4: @@ -9156,26 +6151,6 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int t = localctx.(*MemberContext) } return p.Member_Sempred(t, predIndex) -======= - -func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { - switch ruleIndex { - case 4: - var t *RelationContext = nil - if localctx != nil { t = localctx.(*RelationContext) } - return p.Relation_Sempred(t, predIndex) - - case 5: - var t *CalcContext = nil - if localctx != nil { t = localctx.(*CalcContext) } - return p.Calc_Sempred(t, predIndex) - - case 7: - var t *MemberContext = nil - if localctx != nil { t = localctx.(*MemberContext) } - return p.Member_Sempred(t, predIndex) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(ruleIndex)) @@ -9185,11 +6160,7 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool { switch predIndex { case 0: -<<<<<<< HEAD return p.Precpred(p.GetParserRuleContext(), 1) -======= - return p.Precpred(p.GetParserRuleContext(), 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -9199,17 +6170,10 @@ func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool { switch predIndex { case 1: -<<<<<<< HEAD return p.Precpred(p.GetParserRuleContext(), 2) case 2: return p.Precpred(p.GetParserRuleContext(), 1) -======= - return p.Precpred(p.GetParserRuleContext(), 2) - - case 2: - return p.Precpred(p.GetParserRuleContext(), 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -9219,7 +6183,6 @@ func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool { switch predIndex { case 3: -<<<<<<< HEAD return p.Precpred(p.GetParserRuleContext(), 3) case 4: @@ -9227,21 +6190,8 @@ func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bo case 5: return p.Precpred(p.GetParserRuleContext(), 1) -======= - return p.Precpred(p.GetParserRuleContext(), 3) - - case 4: - return p.Precpred(p.GetParserRuleContext(), 2) - - case 5: - return p.Precpred(p.GetParserRuleContext(), 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) } } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go index fccb0b182d..7cefe5c571 100644 --- a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go +++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go @@ -1,16 +1,8 @@ -<<<<<<< HEAD // Code generated from /usr/local/google/home/jdtatum/github/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. -======= -// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gen // CEL import "github.com/antlr4-go/antlr/v4" -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A complete Visitor for a parse tree produced by CELParser. type CELVisitor interface { antlr.ParseTreeVisitor @@ -54,16 +46,11 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#Index. VisitIndex(ctx *IndexContext) interface{} -<<<<<<< HEAD // Visit a parse tree produced by CELParser#Ident. VisitIdent(ctx *IdentContext) interface{} // Visit a parse tree produced by CELParser#GlobalCall. VisitGlobalCall(ctx *GlobalCallContext) interface{} -======= - // Visit a parse tree produced by CELParser#IdentOrGlobalCall. - VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Visit a parse tree produced by CELParser#Nested. VisitNested(ctx *NestedContext) interface{} @@ -95,15 +82,12 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#mapInitializerList. VisitMapInitializerList(ctx *MapInitializerListContext) interface{} -<<<<<<< HEAD // Visit a parse tree produced by CELParser#SimpleIdentifier. VisitSimpleIdentifier(ctx *SimpleIdentifierContext) interface{} // Visit a parse tree produced by CELParser#EscapedIdentifier. VisitEscapedIdentifier(ctx *EscapedIdentifierContext) interface{} -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Visit a parse tree produced by CELParser#optExpr. VisitOptExpr(ctx *OptExprContext) interface{} @@ -130,9 +114,4 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#Null. VisitNull(ctx *NullContext) interface{} -<<<<<<< HEAD -} -======= - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index 56ee89cacc..c13296dd5c 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -115,11 +115,7 @@ func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Exp func (p *parserHelper) newComprehension(ctx any, iterRange ast.Expr, -<<<<<<< HEAD iterVar, -======= - iterVar string, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) accuVar string, accuInit ast.Expr, condition ast.Expr, @@ -129,7 +125,6 @@ func (p *parserHelper) newComprehension(ctx any, p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) } -<<<<<<< HEAD func (p *parserHelper) newComprehensionTwoVar(ctx any, iterRange ast.Expr, iterVar, iterVar2, @@ -142,8 +137,6 @@ func (p *parserHelper) newComprehensionTwoVar(ctx any, p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *parserHelper) newID(ctx any) int64 { if id, isID := ctx.(int64); isID { return id @@ -159,24 +152,12 @@ func (p *parserHelper) id(ctx any) int64 { var offset ast.OffsetRange switch c := ctx.(type) { case antlr.ParserRuleContext: -<<<<<<< HEAD start := c.GetStart() offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) offset.Stop = offset.Start + int32(len(c.GetText())) case antlr.Token: offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) offset.Stop = offset.Start + int32(len(c.GetText())) -======= - start, stop := c.GetStart(), c.GetStop() - if stop == nil { - stop = start - } - offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) - offset.Stop = p.sourceInfo.ComputeOffset(int32(stop.GetLine()), int32(stop.GetColumn())) - case antlr.Token: - offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) - offset.Stop = offset.Start ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case common.Location: offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) offset.Stop = offset.Start @@ -192,7 +173,6 @@ func (p *parserHelper) id(ctx any) int64 { return id } -<<<<<<< HEAD func (p *parserHelper) deleteID(id int64) { p.sourceInfo.ClearOffsetRange(id) if id == p.nextID-1 { @@ -200,19 +180,14 @@ func (p *parserHelper) deleteID(id int64) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (p *parserHelper) getLocation(id int64) common.Location { return p.sourceInfo.GetStartLocation(id) } -<<<<<<< HEAD func (p *parserHelper) getLocationByOffset(offset int32) common.Location { return p.getSourceInfo().GetLocationByOffset(offset) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // buildMacroCallArg iterates the expression and returns a new expression // where all macros have been replaced by their IDs in MacroCalls func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { @@ -420,15 +395,10 @@ func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { cond := e.Copy(compre.LoopCondition()) step := e.Copy(compre.LoopStep()) result := e.Copy(compre.Result()) -<<<<<<< HEAD // All comprehensions can be represented by the two-variable comprehension since the // differentiation between one and two-variable is whether the iterVar2 value is non-empty. return e.exprFactory.NewComprehensionTwoVar(copyID, iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result) -======= - return e.exprFactory.NewComprehension(copyID, - iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return e.exprFactory.NewUnspecifiedExpr(copyID) } @@ -476,7 +446,6 @@ func (e *exprHelper) NewComprehension( e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) } -<<<<<<< HEAD // NewComprehensionTwoVar implements the ExprHelper interface method. func (e *exprHelper) NewComprehensionTwoVar( iterRange ast.Expr, @@ -491,8 +460,6 @@ func (e *exprHelper) NewComprehensionTwoVar( e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewIdent implements the ExprHelper interface method. func (e *exprHelper) NewIdent(name string) ast.Expr { return e.exprFactory.NewIdent(e.nextMacroID(), name) @@ -503,14 +470,11 @@ func (e *exprHelper) NewAccuIdent() ast.Expr { return e.exprFactory.NewAccuIdent(e.nextMacroID()) } -<<<<<<< HEAD // AccuIdentName implements the ExprHelper interface method. func (e *exprHelper) AccuIdentName() string { return e.exprFactory.AccuIdentName() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewGlobalCall implements the ExprHelper interface method. func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr { return e.exprFactory.NewCall(e.nextMacroID(), function, args...) diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go index 375c9355bd..6b3b648d34 100644 --- a/vendor/github.com/google/cel-go/parser/macro.go +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -170,20 +170,12 @@ type ExprHelper interface { // NewStructField creates a new struct field initializer from the field name and value. NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr -<<<<<<< HEAD // NewComprehension creates a new one-variable comprehension instruction. // // - iterRange represents the expression that resolves to a list or map where the elements or // keys (respectively) will be iterated over. // - iterVar is the variable name for the list element value, or the map key, depending on the // range type. -======= - // NewComprehension creates a new comprehension instruction. - // - // - iterRange represents the expression that resolves to a list or map where the elements or - // keys (respectively) will be iterated over. - // - iterVar is the iteration variable name. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // - accuVar is the accumulation variable name, typically parser.AccumulatorName. // - accuInit is the initial expression whose value will be set for the accuVar prior to // folding. @@ -195,7 +187,6 @@ type ExprHelper interface { // environment in the step and condition expressions. Presently, the name __result__ is commonly // used by built-in macros but this may change in the future. NewComprehension(iterRange ast.Expr, -<<<<<<< HEAD iterVar, accuVar string, accuInit, @@ -226,13 +217,6 @@ type ExprHelper interface { accuInit, condition, step, -======= - iterVar string, - accuVar string, - accuInit ast.Expr, - condition ast.Expr, - step ast.Expr, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) result ast.Expr) ast.Expr // NewIdent creates an identifier Expr value. @@ -241,12 +225,9 @@ type ExprHelper interface { // NewAccuIdent returns an accumulator identifier for use with comprehension results. NewAccuIdent() ast.Expr -<<<<<<< HEAD // AccuIdentName returns the name of the accumulator identifier. AccuIdentName() string -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewCall creates a function call Expr value for a global (free) function. NewCall(function string, args ...ast.Expr) ast.Expr @@ -281,7 +262,6 @@ var ( // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one // element in range the predicate holds. -<<<<<<< HEAD // Deprecated: Use ExistsOneMacroNew ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne) @@ -289,10 +269,6 @@ var ( // element in range the predicate holds. ExistsOneMacroNew = NewReceiverMacro("existsOne", 2, MakeExistsOne) -======= - ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MapMacro expands "range.map(var, function)" into a comprehension which applies the function // to each element in the range to produce a new list. MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap) @@ -312,10 +288,7 @@ var ( AllMacro, ExistsMacro, ExistsOneMacro, -<<<<<<< HEAD ExistsOneMacroNew, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MapMacro, MapFilterMacro, FilterMacro, @@ -328,14 +301,11 @@ var ( // AccumulatorName is the traditional variable name assigned to the fold accumulator variable. const AccumulatorName = "__result__" -<<<<<<< HEAD // HiddenAccumulatorName is a proposed update to the default fold accumlator variable. // @result is not normally accessible from source, preventing accidental or intentional collisions // in user expressions. const HiddenAccumulatorName = "@result" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type quantifierKind int const ( @@ -380,13 +350,10 @@ func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common if !found { return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } -<<<<<<< HEAD accu := eh.AccuIdentName() if v == accu || v == AccumulatorName { return nil, eh.NewError(args[0].ID(), "iteration variable overwrites accumulator variable") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var fn ast.Expr var filter ast.Expr @@ -406,11 +373,7 @@ func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common if filter != nil { step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) } -<<<<<<< HEAD return eh.NewComprehension(target, v, accu, init, condition, step, eh.NewAccuIdent()), nil -======= - return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MakeFilter expands the input call arguments into a comprehension which produces a list which contains @@ -421,24 +384,17 @@ func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *com if !found { return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } -<<<<<<< HEAD accu := eh.AccuIdentName() if v == accu || v == AccumulatorName { return nil, eh.NewError(args[0].ID(), "iteration variable overwrites accumulator variable") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) filter := args[1] init := eh.NewList() condition := eh.NewLiteral(types.True) step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0])) step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) -<<<<<<< HEAD return eh.NewComprehension(target, v, accu, init, condition, step, eh.NewAccuIdent()), nil -======= - return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MakeHas expands the input call arguments into a presence test, e.g. has(.field) @@ -455,13 +411,10 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] if !found { return nil, eh.NewError(args[0].ID(), "argument must be a simple name") } -<<<<<<< HEAD accu := eh.AccuIdentName() if v == accu || v == AccumulatorName { return nil, eh.NewError(args[0].ID(), "iteration variable overwrites accumulator variable") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var init ast.Expr var condition ast.Expr @@ -481,7 +434,6 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) result = eh.NewAccuIdent() case quantifierExistsOne: -<<<<<<< HEAD init = eh.NewLiteral(types.Int(0)) condition = eh.NewLiteral(types.True) step = eh.NewCall(operators.Conditional, args[1], @@ -491,19 +443,6 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args [] return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) } return eh.NewComprehension(target, v, accu, init, condition, step, result), nil -======= - zeroExpr := eh.NewLiteral(types.Int(0)) - oneExpr := eh.NewLiteral(types.Int(1)) - init = zeroExpr - condition = eh.NewLiteral(types.True) - step = eh.NewCall(operators.Conditional, args[1], - eh.NewCall(operators.Add, eh.NewAccuIdent(), oneExpr), eh.NewAccuIdent()) - result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), oneExpr) - default: - return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) - } - return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func extractIdent(e ast.Expr) (string, bool) { diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go index be580150be..4eb30f83e0 100644 --- a/vendor/github.com/google/cel-go/parser/options.go +++ b/vendor/github.com/google/cel-go/parser/options.go @@ -26,11 +26,8 @@ type options struct { populateMacroCalls bool enableOptionalSyntax bool enableVariadicOperatorASTs bool -<<<<<<< HEAD enableIdentEscapeSyntax bool enableHiddenAccumulatorName bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Option configures the behavior of the parser. @@ -132,7 +129,6 @@ func EnableOptionalSyntax(optionalSyntax bool) Option { } } -<<<<<<< HEAD // EnableIdentEscapeSyntax enables backtick (`) escaped field identifiers. This // supports extended types of characters in identifiers, e.g. foo.`baz-bar`. func EnableIdentEscapeSyntax(enableIdentEscapeSyntax bool) Option { @@ -154,8 +150,6 @@ func EnableHiddenAccumulatorName(enabled bool) Option { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative // operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])` // diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index e222c19cf9..b5ec73ec64 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -17,10 +17,7 @@ package parser import ( -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "regexp" "strconv" @@ -44,10 +41,7 @@ type Parser struct { // NewParser builds and returns a new Parser using the provided options. func NewParser(opts ...Option) (*Parser, error) { p := &Parser{} -<<<<<<< HEAD p.enableHiddenAccumulatorName = true -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, opt := range opts { if err := opt(&p.options); err != nil { return nil, err @@ -96,15 +90,11 @@ func mustNewParser(opts ...Option) *Parser { // Parse parses the expression represented by source and returns the result. func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { errs := common.NewErrors(source) -<<<<<<< HEAD accu := AccumulatorName if p.enableHiddenAccumulatorName { accu = HiddenAccumulatorName } fac := ast.NewExprFactoryWithAccumulator(accu) -======= - fac := ast.NewExprFactory() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) impl := parser{ errors: &parseErrors{errs}, exprFactory: fac, @@ -117,10 +107,7 @@ func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { populateMacroCalls: p.populateMacroCalls, enableOptionalSyntax: p.enableOptionalSyntax, enableVariadicOperatorASTs: p.enableVariadicOperatorASTs, -<<<<<<< HEAD enableIdentEscapeSyntax: p.enableIdentEscapeSyntax, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } buf, ok := source.(runes.Buffer) if !ok { @@ -163,7 +150,6 @@ var reservedIds = map[string]struct{}{ "while": {}, } -<<<<<<< HEAD func unescapeIdent(in string) (string, error) { if len(in) <= 2 { return "", errors.New("invalid escaped identifier: underflow") @@ -185,8 +171,6 @@ func (p *parser) normalizeIdent(ctx gen.IEscapeIdentContext) (string, error) { return "", errors.New("unsupported ident kind") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Parse converts a source input a parsed expression. // This function calls ParseWithMacros with AllMacros. // @@ -340,10 +324,7 @@ type parser struct { populateMacroCalls bool enableOptionalSyntax bool enableVariadicOperatorASTs bool -<<<<<<< HEAD enableIdentEscapeSyntax bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var _ gen.CELVisitor = (*parser)(nil) @@ -417,15 +398,10 @@ func (p *parser) Visit(tree antlr.ParseTree) any { return out case *gen.LogicalNotContext: return p.VisitLogicalNot(tree) -<<<<<<< HEAD case *gen.IdentContext: return p.VisitIdent(tree) case *gen.GlobalCallContext: return p.VisitGlobalCall(tree) -======= - case *gen.IdentOrGlobalCallContext: - return p.VisitIdentOrGlobalCall(tree) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *gen.SelectContext: p.checkAndIncrementRecursionDepth() out := p.VisitSelect(tree) @@ -593,14 +569,10 @@ func (p *parser) VisitSelect(ctx *gen.SelectContext) any { if ctx.GetId() == nil || ctx.GetOp() == nil { return p.helper.newExpr(ctx) } -<<<<<<< HEAD id, err := p.normalizeIdent(ctx.GetId()) if err != nil { p.reportError(ctx.GetId(), "%v", err) } -======= - id := ctx.GetId().GetText() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctx.GetOpt() != nil { if !p.enableOptionalSyntax { return p.reportError(ctx.GetOp(), "unsupported syntax '.?'") @@ -684,7 +656,6 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext p.reportError(optField, "unsupported syntax '?'") continue } -<<<<<<< HEAD // The field may be empty due to a prior error. fieldName, err := p.normalizeIdent(optField.EscapeIdent()) @@ -693,14 +664,6 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext continue } -======= - // The field may be empty due to a prior error. - id := optField.IDENTIFIER() - if id == nil { - return []ast.EntryExpr{} - } - fieldName := id.GetText() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) value := p.Visit(vals[i]).(ast.Expr) field := p.helper.newObjectField(initID, fieldName, value, optional) result[i] = field @@ -708,13 +671,8 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext return result } -<<<<<<< HEAD // Visit a parse tree produced by CELParser#Ident. func (p *parser) VisitIdent(ctx *gen.IdentContext) any { -======= -// Visit a parse tree produced by CELParser#IdentOrGlobalCall. -func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) identName := "" if ctx.GetLeadingDot() != nil { identName = "." @@ -729,7 +687,6 @@ func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any { return p.reportError(ctx, "reserved identifier: %s", id) } identName += id -<<<<<<< HEAD return p.helper.newIdent(ctx.GetId(), identName) } @@ -754,15 +711,6 @@ func (p *parser) VisitGlobalCall(ctx *gen.GlobalCallContext) any { } -======= - if ctx.GetOp() != nil { - opID := p.helper.id(ctx.GetOp()) - return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...) - } - return p.helper.newIdent(ctx.GetId(), identName) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Visit a parse tree produced by CELParser#CreateList. func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any { listID := p.helper.id(ctx.GetOp()) @@ -861,11 +809,7 @@ func (p *parser) VisitDouble(ctx *gen.DoubleContext) any { // Visit a parse tree produced by CELParser#String. func (p *parser) VisitString(ctx *gen.StringContext) any { -<<<<<<< HEAD s := p.unquote(ctx, ctx.GetTok().GetText(), false) -======= - s := p.unquote(ctx, ctx.GetText(), false) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return p.helper.newLiteralString(ctx, s) } @@ -965,12 +909,8 @@ func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { // ANTLR Parse listener implementations func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { -<<<<<<< HEAD offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) l := p.helper.getLocationByOffset(offset) -======= - l := p.helper.source.NewLocation(line, column) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error // messages out of ANTLR to prevent future breaking changes related to error message content. @@ -1030,19 +970,12 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg expr, err := macro.Expander()(eh, target, args) // An error indicates that the macro was matched, but the arguments were not well-formed. if err != nil { -<<<<<<< HEAD loc := err.Location if loc == nil { loc = p.helper.getLocation(exprID) } p.helper.deleteID(exprID) return p.reportError(loc, "%s", err.Message), true -======= - if err.Location != nil { - return p.reportError(err.Location, err.Message), true - } - return p.reportError(p.helper.getLocation(exprID), err.Message), true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A nil value from the macro indicates that the macro implementation decided that // an expansion should not be performed. @@ -1052,10 +985,7 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg if p.populateMacroCalls { p.helper.addMacroCall(expr.ID(), function, target, args...) } -<<<<<<< HEAD p.helper.deleteID(exprID) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return expr, true } diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go index 6dd6aac752..43cc9b901b 100644 --- a/vendor/github.com/google/cel-go/parser/unescape.go +++ b/vendor/github.com/google/cel-go/parser/unescape.go @@ -15,11 +15,7 @@ package parser import ( -<<<<<<< HEAD "errors" -======= - "fmt" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "unicode/utf8" ) @@ -34,11 +30,7 @@ func unescape(value string, isBytes bool) (string, error) { // Nothing to unescape / decode. if n < 2 { -<<<<<<< HEAD return value, errors.New("unable to unescape string") -======= - return value, fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Raw string preceded by the 'r|R' prefix. @@ -51,11 +43,7 @@ func unescape(value string, isBytes bool) (string, error) { // Quoted string of some form, must have same first and last char. if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') { -<<<<<<< HEAD return value, errors.New("unable to unescape string") -======= - return value, fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Normalize the multi-line CEL string representation to a standard @@ -63,20 +51,12 @@ func unescape(value string, isBytes bool) (string, error) { if n >= 6 { if strings.HasPrefix(value, "'''") { if !strings.HasSuffix(value, "'''") { -<<<<<<< HEAD return value, errors.New("unable to unescape string") -======= - return value, fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } value = "\"" + value[3:n-3] + "\"" } else if strings.HasPrefix(value, `"""`) { if !strings.HasSuffix(value, `"""`) { -<<<<<<< HEAD return value, errors.New("unable to unescape string") -======= - return value, fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } value = "\"" + value[3:n-3] + "\"" } @@ -110,17 +90,10 @@ func unescape(value string, isBytes bool) (string, error) { // unescapeChar takes a string input and returns the following info: // -<<<<<<< HEAD // value - the escaped unicode rune at the front of the string. // encode - the value should be unicode-encoded // tail - the remainder of the input string. // err - error value, if the character could not be unescaped. -======= -// value - the escaped unicode rune at the front of the string. -// encode - the value should be unicode-encoded -// tail - the remainder of the input string. -// err - error value, if the character could not be unescaped. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // When encode is true the return value may still fit within a single byte, // but unicode encoding is attempted which is more expensive than when the @@ -140,11 +113,7 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, // 2. Last character is the start of an escape sequence. if len(s) <= 1 { -<<<<<<< HEAD err = errors.New("unable to unescape string, found '\\' as last character") -======= - err = fmt.Errorf("unable to unescape string, found '\\' as last character") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } @@ -188,53 +157,32 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, case 'u': n = 4 if isBytes { -<<<<<<< HEAD err = errors.New("unable to unescape string") -======= - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } case 'U': n = 8 if isBytes { -<<<<<<< HEAD err = errors.New("unable to unescape string") -======= - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } var v rune if len(s) < n { -<<<<<<< HEAD err = errors.New("unable to unescape string") -======= - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } for j := 0; j < n; j++ { x, ok := unhex(s[j]) if !ok { -<<<<<<< HEAD err = errors.New("unable to unescape string") -======= - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } v = v<<4 | x } s = s[n:] -<<<<<<< HEAD if !isBytes && !utf8.ValidRune(v) { err = errors.New("invalid unicode code point") -======= - if !isBytes && v > utf8.MaxRune { - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } value = v @@ -242,33 +190,20 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7] case '0', '1', '2', '3': if len(s) < 2 { -<<<<<<< HEAD err = errors.New("unable to unescape octal sequence in string") -======= - err = fmt.Errorf("unable to unescape octal sequence in string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } v := rune(c - '0') for j := 0; j < 2; j++ { x := s[j] if x < '0' || x > '7' { -<<<<<<< HEAD err = errors.New("unable to unescape octal sequence in string") -======= - err = fmt.Errorf("unable to unescape octal sequence in string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } v = v*8 + rune(x-'0') } -<<<<<<< HEAD if !isBytes && !utf8.ValidRune(v) { err = errors.New("invalid unicode code point") -======= - if !isBytes && v > utf8.MaxRune { - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } value = v @@ -277,11 +212,7 @@ func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, // Unknown escape sequence. default: -<<<<<<< HEAD err = errors.New("unable to unescape string") -======= - err = fmt.Errorf("unable to unescape string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tail = s diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go index 8bcc30c44c..ffd5b18e47 100644 --- a/vendor/github.com/google/cel-go/parser/unparser.go +++ b/vendor/github.com/google/cel-go/parser/unparser.go @@ -17,20 +17,14 @@ package parser import ( "errors" "fmt" -<<<<<<< HEAD "regexp" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "strings" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" -<<<<<<< HEAD "github.com/google/cel-go/common/types/ref" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Unparse takes an input expression and source position information and generates a human-readable @@ -73,7 +67,6 @@ func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (strin return un.str.String(), nil } -<<<<<<< HEAD var identifierPartPattern *regexp.Regexp = regexp.MustCompile(`^[A-Za-z_][0-9A-Za-z_]*$`) func maybeQuoteField(field string) string { @@ -83,8 +76,6 @@ func maybeQuoteField(field string) string { return field } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // unparser visits an expression to reconstruct a human-readable string from an AST. type unparser struct { str strings.Builder @@ -283,7 +274,6 @@ func (un *unparser) visitCallUnary(expr ast.Expr) error { return un.visitMaybeNested(args[0], nested) } -<<<<<<< HEAD func (un *unparser) visitConstVal(val ref.Val) error { optional := false if optVal, ok := val.(*types.Optional); ok { @@ -295,10 +285,6 @@ func (un *unparser) visitConstVal(val ref.Val) error { un.str.WriteString("optional.of(") val = optVal.GetValue() } -======= -func (un *unparser) visitConst(expr ast.Expr) error { - val := expr.AsLiteral() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch val := val.(type) { case types.Bool: un.str.WriteString(strconv.FormatBool(bool(val))) @@ -327,7 +313,6 @@ func (un *unparser) visitConst(expr ast.Expr) error { ui := strconv.FormatUint(uint64(val), 10) un.str.WriteString(ui) un.str.WriteString("u") -<<<<<<< HEAD case *types.Optional: if err := un.visitConstVal(val); err != nil { return err @@ -343,9 +328,6 @@ func (un *unparser) visitConst(expr ast.Expr) error { func (un *unparser) visitConst(expr ast.Expr) error { val := expr.AsLiteral() if err := un.visitConstVal(val); err != nil { -======= - default: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("unsupported constant: %v", expr) } return nil @@ -404,11 +386,7 @@ func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op stri return err } un.str.WriteString(op) -<<<<<<< HEAD un.str.WriteString(maybeQuoteField(field)) -======= - un.str.WriteString(field) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if testOnly { un.str.WriteString(")") } @@ -426,11 +404,7 @@ func (un *unparser) visitStructMsg(expr ast.Expr) error { if field.IsOptional() { un.str.WriteString("?") } -<<<<<<< HEAD un.str.WriteString(maybeQuoteField(f)) -======= - un.str.WriteString(f) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) un.str.WriteString(": ") v := field.Value() err := un.visit(v) diff --git a/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/google/certificate-transparency-go/AUTHORS index c12bb6e120..ad514665ef 100644 --- a/vendor/github.com/google/certificate-transparency-go/AUTHORS +++ b/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -9,10 +9,6 @@ # Please keep the list sorted. Alex Cohn -<<<<<<< HEAD -======= -Comodo CA Limited ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Ed Maste Elisha Silas Fiaz Hossain @@ -27,10 +23,7 @@ Nicholas Galbreath Oliver Weidner PrimeKey Solutions AB Ruslan Kovalov -<<<<<<< HEAD Sectigo Limited -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Venafi, Inc. Vladimir Rutsky Ximin Luo diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md index f73183ba71..5cb7b7d433 100644 --- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -2,7 +2,6 @@ ## HEAD -<<<<<<< HEAD ## v1.3.1 * Add AllLogListSignatureURL by @AlexLaroche in https://github.com/google/certificate-transparency-go/pull/1634 @@ -177,8 +176,6 @@ A new flag `http_idle_timeout` is added to set the HTTP server's idle timeout va * Bump the all-deps group with 3 updates by @dependabot in https://github.com/google/certificate-transparency-go/pull/1603 * Bump distroless/base-debian12 from `6ae5fe6` to `8fe31fb` in /trillian/examples/deployment/docker/ctfe in the all-deps group by @dependabot in https://github.com/google/certificate-transparency-go/pull/1604 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## v1.2.1 ### Fixes diff --git a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS index 3c2475353a..3a98a7e1ef 100644 --- a/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS +++ b/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -52,11 +52,7 @@ Paul Lietar Pavel Kalinnikov Pierre Phaneuf Rob Percival -<<<<<<< HEAD Rob Stradling -======= -Rob Stradling ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Roger Ng Roland Shoemaker Ruslan Kovalov diff --git a/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/google/certificate-transparency-go/README.md index 5e9003adf6..b528c55755 100644 --- a/vendor/github.com/google/certificate-transparency-go/README.md +++ b/vendor/github.com/google/certificate-transparency-go/README.md @@ -6,11 +6,7 @@ This repository holds Go code related to [Certificate Transparency](https://www.certificate-transparency.org/) (CT). The -<<<<<<< HEAD repository requires Go version 1.22. -======= -repository requires Go version 1.21. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - [Repository Structure](#repository-structure) - [Trillian CT Personality](#trillian-ct-personality) diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go index ee385fa08a..6d47570076 100644 --- a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go +++ b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go @@ -51,11 +51,7 @@ func Error(err error) error { if perr != nil { return err // If the URL can't be parsed, just return the original error. } -<<<<<<< HEAD uerr.URL = URL(u) // Update the URL to the redacted URL. -======= - uerr.URL = URL(u).String() // Update the URL to the redacted URL. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return uerr } @@ -77,11 +73,7 @@ var paramAllowlist = map[string]struct{}{ } // URL redacts potentially sensitive query parameter values from the URL's query string. -<<<<<<< HEAD func URL(u *url.URL) string { -======= -func URL(u *url.URL) *url.URL { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) qs := u.Query() for k, v := range qs { for i := range v { @@ -93,9 +85,5 @@ func URL(u *url.URL) *url.URL { } r := *u r.RawQuery = qs.Encode() -<<<<<<< HEAD return r.Redacted() -======= - return &r ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go index 0127a31344..6e8814d808 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -84,11 +84,7 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { } // Resolve implements Keychain. -<<<<<<< HEAD func (dk *defaultKeychain) ResolveContext(_ context.Context, target Resource) (Authenticator, error) { -======= -func (dk *defaultKeychain) ResolveContext(ctx context.Context, target Resource) (Authenticator, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dk.mu.Lock() defer dk.mu.Unlock() @@ -208,11 +204,7 @@ func (w wrapper) Resolve(r Resource) (Authenticator, error) { return w.ResolveContext(context.Background(), r) } -<<<<<<< HEAD func (w wrapper) ResolveContext(_ context.Context, r Resource) (Authenticator, error) { -======= -func (w wrapper) ResolveContext(ctx context.Context, r Resource) (Authenticator, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) u, p, err := w.h.Get(r.RegistryStr()) if err != nil { return Anonymous, nil diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go index 305b9bee98..0a04867723 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -44,11 +44,7 @@ func ParseReference(s string, opts ...Option) (Reference, error) { if d, err := NewDigest(s, opts...); err == nil { return d, nil } -<<<<<<< HEAD return nil, newErrBadName("could not parse reference: %s", s) -======= - return nil, newErrBadName("could not parse reference: " + s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type stringConst string diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index 3e26ac702b..4207740c35 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -21,10 +21,7 @@ import ( "errors" "fmt" "io" -<<<<<<< HEAD "maps" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "path/filepath" "strings" "time" @@ -169,27 +166,16 @@ func Annotations(f partial.WithRawManifest, anns map[string]string) partial.With if img, ok := f.(v1.Image); ok { return &image{ base: img, -<<<<<<< HEAD annotations: maps.Clone(anns), -======= - annotations: anns, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } if idx, ok := f.(v1.ImageIndex); ok { return &index{ base: idx, -<<<<<<< HEAD annotations: maps.Clone(anns), } } return arbitraryRawManifest{a: f, anns: maps.Clone(anns)} -======= - annotations: anns, - } - } - return arbitraryRawManifest{a: f, anns: anns} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type arbitraryRawManifest struct { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go index 6dadba8090..4bc6f70a85 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go @@ -61,11 +61,7 @@ func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, } defer resp.Body.Close() -<<<<<<< HEAD if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest, http.StatusNotAcceptable); err != nil { -======= - if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index 5ebf6c8d4f..ea652d4ae8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -24,15 +24,10 @@ import ( "net/http" "net/url" "strings" -<<<<<<< HEAD "sync" authchallenge "github.com/docker/distribution/registry/client/auth/challenge" -======= - - authchallenge "github.com/docker/distribution/registry/client/auth/challenge" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/logs" @@ -105,10 +100,7 @@ func fromChallenge(reg name.Registry, auth authn.Authenticator, t http.RoundTrip } type bearerTransport struct { -<<<<<<< HEAD mx sync.RWMutex -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Wrapped by bearerTransport. inner http.RoundTripper // Basic credentials that we exchange for bearer tokens. @@ -150,14 +142,10 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // the registry with which we are interacting. // In case of redirect http.Client can use an empty Host, check URL too. if matchesHost(bt.registry.RegistryStr(), in, bt.scheme) { -<<<<<<< HEAD bt.mx.RLock() localToken := bt.bearer.RegistryToken bt.mx.RUnlock() hdr := fmt.Sprintf("Bearer %s", localToken) -======= - hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) in.Header.Set("Authorization", hdr) } return bt.inner.RoundTrip(in) @@ -174,19 +162,12 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { res.Body.Close() newScopes := []string{} -<<<<<<< HEAD bt.mx.Lock() got := stringSet(bt.scopes) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, wac := range challenges { // TODO(jonjohnsonjr): Should we also update "realm" or "service"? if want, ok := wac.Parameters["scope"]; ok { // Add any scopes that we don't already request. -<<<<<<< HEAD -======= - got := stringSet(bt.scopes) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := got[want]; !ok { newScopes = append(newScopes, want) } @@ -198,10 +179,7 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { // otherwise the registry might just ignore it :/ newScopes = append(newScopes, bt.scopes...) bt.scopes = newScopes -<<<<<<< HEAD bt.mx.Unlock() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. @@ -226,13 +204,9 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { } if auth.RegistryToken != "" { -<<<<<<< HEAD bt.mx.Lock() bt.bearer.RegistryToken = auth.RegistryToken bt.mx.Unlock() -======= - bt.bearer.RegistryToken = auth.RegistryToken ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -248,13 +222,9 @@ func (bt *bearerTransport) refresh(ctx context.Context) error { // Find a token to turn into a Bearer authenticator if response.Token != "" { -<<<<<<< HEAD bt.mx.Lock() bt.bearer.RegistryToken = response.Token bt.mx.Unlock() -======= - bt.bearer.RegistryToken = response.Token ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // If we obtained a refresh token from the oauth flow, use that for refresh() now. @@ -348,13 +318,9 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { } v := url.Values{} -<<<<<<< HEAD bt.mx.RLock() v.Set("scope", strings.Join(bt.scopes, " ")) bt.mx.RUnlock() -======= - v.Set("scope", strings.Join(bt.scopes, " ")) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if bt.service != "" { v.Set("service", bt.service) } @@ -410,13 +376,9 @@ func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { client := http.Client{Transport: b} v := u.Query() -<<<<<<< HEAD bt.mx.RLock() v["scope"] = bt.scopes bt.mx.RUnlock() -======= - v["scope"] = bt.scopes ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v.Set("service", bt.service) u.RawQuery = v.Encode() diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index 5c2b890a5d..f47c77a2ba 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -297,11 +297,8 @@ const ( ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 // Internal use only. ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 -<<<<<<< HEAD // Internal use only. ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 ValidatePeerCertificateChainReq_VerificationMode = 6 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -313,10 +310,7 @@ var ( 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", -<<<<<<< HEAD 6: "RESERVED_CUSTOM_VERIFICATION_MODE_6", -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ "UNSPECIFIED": 0, @@ -325,10 +319,7 @@ var ( "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, -<<<<<<< HEAD "RESERVED_CUSTOM_VERIFICATION_MODE_6": 6, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ) @@ -1991,13 +1982,8 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, -<<<<<<< HEAD 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9d, 0x06, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, -======= - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, - 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, @@ -2031,11 +2017,7 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, -<<<<<<< HEAD 0x69, 0x63, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, -======= - 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, @@ -2047,7 +2029,6 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, -<<<<<<< HEAD 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x36, 0x10, 0x06, @@ -2185,143 +2166,6 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, - 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, - 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, - 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, - 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, - 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, - 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, - 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, - 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, - 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, - 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, - 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, - 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, - 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, - 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, - 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, - 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, - 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, - 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, - 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, - 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, - 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, - 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, - 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, - 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, - 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, - 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, - 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, - 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, - 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 9430b5b360..0cc78547e9 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -64,21 +64,13 @@ type s2av2TransportCreds struct { localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake -<<<<<<< HEAD getS2AStream stream.GetS2AStream -======= - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) serverAuthorizationPolicy []byte } // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -<<<<<<< HEAD func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream stream.GetS2AStream, serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { -======= -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -109,11 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -<<<<<<< HEAD func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream stream.GetS2AStream) (credentials.TransportCredentials, error) { -======= -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -318,14 +306,9 @@ func NewClientTLSConfig( tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, -<<<<<<< HEAD serverAuthorizationPolicy []byte, getStream stream.GetS2AStream) (*tls.Config, error) { s2AStream, err := createStream(ctx, s2av2Address, transportCreds, getStream) -======= - serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -368,11 +351,7 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -<<<<<<< HEAD func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream stream.GetS2AStream) (stream.S2AStream, error) { -======= -func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 54898f4ad6..6ca75f5608 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -75,11 +75,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) } -<<<<<<< HEAD // Extract TLS configuration from SessionResp. -======= - // Extract TLS configiguration from SessionResp. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() var cert tls.Certificate diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 139bb2d025..c52fccddf8 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,10 +35,7 @@ import ( "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" "github.com/google/s2a-go/retry" -<<<<<<< HEAD "github.com/google/s2a-go/stream" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/proto" @@ -334,10 +331,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, -<<<<<<< HEAD getStream: opts.getS2AStream, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } return &s2aTLSClientConfigFactory{ @@ -346,10 +340,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, -<<<<<<< HEAD getStream: opts.getS2AStream, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, nil } @@ -359,10 +350,7 @@ type s2aTLSClientConfigFactory struct { tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte -<<<<<<< HEAD getStream stream.GetS2AStream -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (f *s2aTLSClientConfigFactory) Build( @@ -371,11 +359,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } -<<<<<<< HEAD return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy, f.getStream) -======= - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -390,11 +374,8 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 case ReservedCustomVerificationMode5: return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 -<<<<<<< HEAD case ReservedCustomVerificationMode6: return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index c5c35e935b..b7a277f9e3 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -19,10 +19,6 @@ package s2a import ( -<<<<<<< HEAD -======= - "context" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/tls" "errors" "sync" @@ -31,11 +27,7 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" -<<<<<<< HEAD s2av1pb "github.com/google/s2a-go/internal/proto/common_go_proto" -======= - s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) @@ -43,7 +35,6 @@ import ( type Identity interface { // Name returns the name of the identity. Name() string -<<<<<<< HEAD Attributes() map[string]string } @@ -55,8 +46,6 @@ func (u *UnspecifiedID) Name() string { return "" } func (u *UnspecifiedID) Attributes() map[string]string { return u.Attr -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type spiffeID struct { @@ -65,17 +54,10 @@ type spiffeID struct { func (s *spiffeID) Name() string { return s.spiffeID } -<<<<<<< HEAD func (spiffeID) Attributes() map[string]string { return nil } // NewSpiffeID creates a SPIFFE ID from id. func NewSpiffeID(id string) Identity { return &spiffeID{spiffeID: id} } -======= -// NewSpiffeID creates a SPIFFE ID from id. -func NewSpiffeID(id string) Identity { - return &spiffeID{spiffeID: id} -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type hostname struct { hostname string @@ -83,17 +65,10 @@ type hostname struct { func (h *hostname) Name() string { return h.hostname } -<<<<<<< HEAD func (hostname) Attributes() map[string]string { return nil } // NewHostname creates a hostname from name. func NewHostname(name string) Identity { return &hostname{hostname: name} } -======= -// NewHostname creates a hostname from name. -func NewHostname(name string) Identity { - return &hostname{hostname: name} -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type uid struct { uid string @@ -101,17 +76,10 @@ type uid struct { func (h *uid) Name() string { return h.uid } -<<<<<<< HEAD func (uid) Attributes() map[string]string { return nil } // NewUID creates a UID from name. func NewUID(name string) Identity { return &uid{uid: name} } -======= -// NewUID creates a UID from name. -func NewUID(name string) Identity { - return &uid{uid: name} -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // VerificationModeType specifies the mode that S2A must use to verify the peer // certificate chain. @@ -125,10 +93,7 @@ const ( ReservedCustomVerificationMode3 ReservedCustomVerificationMode4 ReservedCustomVerificationMode5 -<<<<<<< HEAD ReservedCustomVerificationMode6 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ClientOptions contains the client-side options used to establish a secure @@ -183,11 +148,7 @@ type ClientOptions struct { FallbackOpts *FallbackOptions // Generates an S2AStream interface for talking to the S2A server. -<<<<<<< HEAD getS2AStream stream.GetS2AStream -======= - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Serialized user specified policy for server authorization. serverAuthorizationPolicy []byte @@ -241,11 +202,7 @@ type ServerOptions struct { VerificationMode VerificationModeType // Generates an S2AStream interface for talking to the S2A server. -<<<<<<< HEAD getS2AStream stream.GetS2AStream -======= - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // DefaultServerOptions returns the default server options. @@ -256,17 +213,12 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -<<<<<<< HEAD func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) { -======= -func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if identity == nil { return nil, nil } switch id := identity.(type) { case *spiffeID: -<<<<<<< HEAD return &s2av1pb.Identity{ IdentityOneof: &s2av1pb.Identity_SpiffeId{SpiffeId: id.Name()}, Attributes: id.Attributes(), @@ -285,13 +237,6 @@ func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { return &s2av1pb.Identity{ Attributes: id.Attributes(), }, nil -======= - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil - case *hostname: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil - case *uid: - return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.New("unrecognized identity type") } @@ -303,7 +248,6 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { } switch id := identity.(type) { case *spiffeID: -<<<<<<< HEAD return &s2apb.Identity{ IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}, Attributes: id.Attributes(), @@ -322,13 +266,6 @@ func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { return &s2apb.Identity{ Attributes: id.Attributes(), }, nil -======= - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil - case *hostname: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil - case *uid: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil, errors.New("unrecognized identity type") } diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go index 7f1d232dd7..ae2d5eb4c1 100644 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -20,11 +20,8 @@ package stream import ( -<<<<<<< HEAD "context" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -37,9 +34,6 @@ type S2AStream interface { // Closes the channel to the S2A server. CloseSend() error } -<<<<<<< HEAD // GetS2AStream type is for generating an S2AStream interface for talking to the S2A server. type GetS2AStream func(ctx context.Context, s2av2Address string, opts ...string) (S2AStream, error) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 6c74a69143..a8c082dd61 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,7 +1,3 @@ { -<<<<<<< HEAD "v2": "2.14.1" -======= - "v2": "2.14.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 45f6146368..17cced15ec 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,6 +1,5 @@ # Changelog -<<<<<<< HEAD ## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19) @@ -13,8 +12,6 @@ * fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index d0b40e56b2..2b284a24a4 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,8 +30,4 @@ package internal // Version is the current tagged release of the library. -<<<<<<< HEAD const Version = "2.14.1" -======= -const Version = "2.14.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go index fb696a3220..e47ab32acc 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go @@ -44,11 +44,7 @@ import ( // New returns a new [slog.Logger] default logger, or the provided logger if // non-nil. The returned logger will be a no-op logger unless the environment -<<<<<<< HEAD // variable GOOGLE_SDK_GO_LOGGING_LEVEL is set. -======= -// variable GOOGLE_SDK_DEBUG_LOGGING is set. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func New(l *slog.Logger) *slog.Logger { if l != nil { return l diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index ee799210ca..2e50082ad1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,11 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -<<<<<<< HEAD // 'sep' into an int64 slice. -======= -// 'sep' into a int64 slice. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -122,11 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -<<<<<<< HEAD // 'sep' into an int32 slice. -======= -// 'sep' into a int32 slice. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -198,11 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -<<<<<<< HEAD // base64 without padding, are separated by 'sep' into a slice of byte slices. -======= -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 355de3293a..41cd4f5030 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,7 +81,6 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } -<<<<<<< HEAD // HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { st := mux.streamErrorHandler(ctx, err) @@ -97,8 +96,6 @@ func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index b03924697e..2fcd7af3c4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,11 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -<<<<<<< HEAD // fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask -======= -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index fa156307fd..0fa9076566 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -64,7 +64,6 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if !wroteHeader { -<<<<<<< HEAD var contentType string if sct, ok := marshaler.(StreamContentType); ok { contentType = sct.StreamContentType(respRw) @@ -72,9 +71,6 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal contentType = marshaler.ContentType(respRw) } w.Header().Set("Content-Type", contentType) -======= - w.Header().Set("Content-Type", marshaler.ContentType(respRw)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var buf []byte @@ -204,11 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } -<<<<<<< HEAD if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { -======= - if _, err = w.Write(buf); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) grpclog.Errorf("Failed to write response: %v", err) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 0cd66b13bc..b1dfc37af9 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,7 +48,6 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } -<<<<<<< HEAD // StreamContentType defines the streaming content type. type StreamContentType interface { @@ -57,5 +56,3 @@ type StreamContentType interface { // in the case of a streamed response. StreamContentType(v interface{}) string } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index f8c366e023..07c28112c8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,13 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -<<<<<<< HEAD // with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler // with an "application/json" Content-Type. -======= -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index 5a6897fd5d..f710036b35 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,11 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -<<<<<<< HEAD // and returns a pointer to an int64 whose value is same as the parsed integer. -======= -// and returns a pointer to a int64 whose value is same as the parsed integer. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -54,11 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -<<<<<<< HEAD // and returns a pointer to an int32 whose value is same as the parsed integer. -======= -// and returns a pointer to a int32 whose value is same as the parsed integer. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index 8fa2f66bef..0a1ca7e06f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,11 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set -<<<<<<< HEAD if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { -======= - if of := fieldDescriptor.ContainingOneof(); of != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -295,15 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } -<<<<<<< HEAD timestamp := timestamppb.New(t) if ok := timestamp.IsValid(); !ok { return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) } msg = timestamp -======= - msg = timestamppb.New(t) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index 2844a697aa..38ca39cc53 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,10 +1,6 @@ package utilities -<<<<<<< HEAD // OpCode is an opcode of compiled path patterns. -======= -// An OpCode is a opcode of compiled path patterns. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index 120cdae689..66aa5f2dcc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,11 +5,7 @@ import ( "strings" ) -<<<<<<< HEAD // flagInterface is a cut down interface to `flag` -======= -// flagInterface is an cut down interface to `flag` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/in-toto/archivista/pkg/api/client.go b/vendor/github.com/in-toto/archivista/pkg/api/client.go deleted file mode 100644 index bab00e2058..0000000000 --- a/vendor/github.com/in-toto/archivista/pkg/api/client.go +++ /dev/null @@ -1,126 +0,0 @@ -// client.go -package api - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "net/http" - "net/url" - - "github.com/in-toto/go-witness/dsse" -) - -// Client wraps HTTP calls to an Archivista service. -type Client struct { - baseURL string - httpClient *http.Client -} - -// NewClient creates a new Archivista API client. -func NewClient(baseURL string) (*Client, error) { - // Validate baseURL. - _, err := url.ParseRequestURI(baseURL) - if err != nil { - return nil, err - } - return &Client{ - baseURL: baseURL, - httpClient: &http.Client{}, - }, nil -} - -// UploadResponse represents the response from Archivista after a successful upload. - -// UploadDSSE uploads a DSSE envelope to Archivista. -// Note that this method now accepts a dsse.Envelope rather than a pointer to an UploadRequest. -func (c *Client) UploadDSSE(ctx context.Context, envelope dsse.Envelope) (*UploadResponse, error) { - uploadURL, err := url.JoinPath(c.baseURL, "upload") - if err != nil { - return nil, err - } - - bodyBytes, err := json.Marshal(envelope) - if err != nil { - return nil, err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadURL, bytes.NewReader(bodyBytes)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - respBytes, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, errors.New(string(respBytes)) - } - - var uploadResp UploadResponse - if err := json.Unmarshal(respBytes, &uploadResp); err != nil { - return nil, err - } - return &uploadResp, nil -} - -// Artifact represents a retrieved artifact from Archivista. -type Artifact struct { - Payload []byte `json:"payload"` - Signature []byte `json:"signature"` -} - -// GetArtifact retrieves a DSSE envelope by key from Archivista, -// decodes it as a dsse.Envelope, and converts it into an Artifact. -// It uses the envelope's payload and (if available) the first signature. -func (c *Client) GetArtifact(ctx context.Context, key string) (*Artifact, error) { - downloadURL, err := url.JoinPath(c.baseURL, "download", key) - if err != nil { - return nil, err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - respBytes, _ := io.ReadAll(resp.Body) - return nil, errors.New(string(respBytes)) - } - - // Decode the response into a DSSE envelope. - var envelope dsse.Envelope - if err := json.NewDecoder(resp.Body).Decode(&envelope); err != nil { - return nil, err - } - - // Ensure that at least one signature exists. - if len(envelope.Signatures) == 0 { - return nil, errors.New("no signatures in DSSE envelope") - } - - // Create an Artifact using the envelope's payload and the first signature. - artifact := &Artifact{ - Payload: envelope.Payload, - Signature: envelope.Signatures[0].Signature, - } - return artifact, nil -} diff --git a/vendor/github.com/in-toto/archivista/pkg/api/download.go b/vendor/github.com/in-toto/archivista/pkg/api/download.go index 12db87f07f..df76ae6918 100644 --- a/vendor/github.com/in-toto/archivista/pkg/api/download.go +++ b/vendor/github.com/in-toto/archivista/pkg/api/download.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD -// Copyright 2023 The Witness Contributors -======= // Copyright 2023-2024 The Witness Contributors ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,11 +26,6 @@ import ( "github.com/in-toto/go-witness/dsse" ) -<<<<<<< HEAD -func Download(ctx context.Context, baseUrl string, gitoid string) (dsse.Envelope, error) { - buf := &bytes.Buffer{} - if err := DownloadWithWriter(ctx, baseUrl, gitoid, buf); err != nil { -======= func DownloadReadCloser(ctx context.Context, baseURL string, gitoid string) (io.ReadCloser, error) { return DownloadReadCloserWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid) } @@ -69,7 +60,6 @@ func DownloadReadCloserWithHTTPClient(ctx context.Context, client *http.Client, func Download(ctx context.Context, baseURL string, gitoid string) (dsse.Envelope, error) { buf := &bytes.Buffer{} if err := DownloadWithWriter(ctx, baseURL, gitoid, buf); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return dsse.Envelope{}, err } @@ -82,26 +72,17 @@ func Download(ctx context.Context, baseURL string, gitoid string) (dsse.Envelope return env, nil } -<<<<<<< HEAD -func DownloadWithWriter(ctx context.Context, baseUrl, gitoid string, dst io.Writer) error { - downloadUrl, err := url.JoinPath(baseUrl, "download", gitoid) -======= func DownloadWithWriter(ctx context.Context, baseURL string, gitoid string, dst io.Writer) error { return DownloadWithWriterWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid, dst) } func DownloadWithWriterWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string, dst io.Writer) error { downloadUrl, err := url.JoinPath(baseURL, "download", gitoid) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } -<<<<<<< HEAD - req, err := http.NewRequestWithContext(ctx, "GET", downloadUrl, nil) -======= req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadUrl, nil) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return err } diff --git a/vendor/github.com/in-toto/archivista/pkg/api/graphql.go b/vendor/github.com/in-toto/archivista/pkg/api/graphql.go index e7023cd6bf..a7f29d237c 100644 --- a/vendor/github.com/in-toto/archivista/pkg/api/graphql.go +++ b/vendor/github.com/in-toto/archivista/pkg/api/graphql.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD -// Copyright 2023 The Witness Contributors -======= // Copyright 2023-2024 The Witness Contributors ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -29,23 +25,6 @@ import ( "net/url" ) -<<<<<<< HEAD -type graphQLError struct { - Message string `json:"message"` -} - -type graphQLResponse[T any] struct { - Data T `json:"data,omitempty"` - Errors []graphQLError `json:"errors,omitempty"` -} - -type graphQLRequestBody[TVars any] struct { - Query string `json:"query"` - Variables TVars `json:"variables,omitempty"` -} - -func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars) (TRes, error) { -======= const RetrieveSubjectsQuery = `query($gitoid: String!) { subjects( where: { @@ -102,18 +81,13 @@ func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query strin } func GraphQlQueryWithHeaders[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars, headers map[string]string) (TRes, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var response TRes queryUrl, err := url.JoinPath(baseUrl, "query") if err != nil { return response, err } -<<<<<<< HEAD - requestBody := graphQLRequestBody[TVars]{ -======= requestBody := GraphQLRequestBodyGeneric[TVars]{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Query: query, Variables: vars, } @@ -123,22 +97,15 @@ func GraphQlQueryWithHeaders[TRes any, TVars any](ctx context.Context, baseUrl, return response, err } -<<<<<<< HEAD - req, err := http.NewRequestWithContext(ctx, "POST", queryUrl, bytes.NewReader(reqBody)) -======= req, err := http.NewRequestWithContext(ctx, http.MethodPost, queryUrl, bytes.NewReader(reqBody)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return response, err } -<<<<<<< HEAD -======= for k, v := range headers { req.Header.Set(k, v) } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) req.Header.Set("Content-Type", "application/json") hc := &http.Client{} res, err := hc.Do(req) @@ -157,11 +124,7 @@ func GraphQlQueryWithHeaders[TRes any, TVars any](ctx context.Context, baseUrl, } dec := json.NewDecoder(res.Body) -<<<<<<< HEAD - gqlRes := graphQLResponse[TRes]{} -======= gqlRes := GraphQLResponseGeneric[TRes]{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := dec.Decode(&gqlRes); err != nil { return response, err } diff --git a/vendor/github.com/in-toto/archivista/pkg/api/upload.go b/vendor/github.com/in-toto/archivista/pkg/api/upload.go index 83ffca0144..3bf9934420 100644 --- a/vendor/github.com/in-toto/archivista/pkg/api/upload.go +++ b/vendor/github.com/in-toto/archivista/pkg/api/upload.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD -// Copyright 2023 The Witness Contributors -======= // Copyright 2023-2024 The Archivista Contributors ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,39 +33,18 @@ type UploadResponse struct { // Deprecated: Use UploadResponse instead. It will be removed in version >= v0.6.0 type StoreResponse = UploadResponse -<<<<<<< HEAD -// Deprecated: Use Upload instead. It will be removed in version >= v0.6.0 -func Store(ctx context.Context, baseUrl string, envelope dsse.Envelope) (StoreResponse, error) { - return Upload(ctx, baseUrl, envelope) -} - -func Upload(ctx context.Context, baseUrl string, envelope dsse.Envelope) (StoreResponse, error) { -======= // Deprecated: Use Store instead. It will be removed in version >= v0.6.0 func Upload(ctx context.Context, baseURL string, envelope dsse.Envelope) (UploadResponse, error) { return Store(ctx, baseURL, envelope) } func Store(ctx context.Context, baseURL string, envelope dsse.Envelope) (StoreResponse, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) buf := &bytes.Buffer{} enc := json.NewEncoder(buf) if err := enc.Encode(envelope); err != nil { return StoreResponse{}, err } -<<<<<<< HEAD - return UploadWithReader(ctx, baseUrl, buf) -} - -// Deprecated: Use UploadWithReader instead. It will be removed in version >= v0.6.0 -func StoreWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreResponse, error) { - return UploadWithReader(ctx, baseUrl, r) -} - -func UploadWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreResponse, error) { - uploadPath, err := url.JoinPath(baseUrl, "upload") -======= return StoreWithReader(ctx, baseURL, buf) } @@ -79,7 +54,6 @@ func StoreWithReader(ctx context.Context, baseURL string, r io.Reader) (StoreRes func StoreWithReaderWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, r io.Reader) (StoreResponse, error) { uploadPath, err := url.JoinPath(baseURL, "upload") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return UploadResponse{}, err } diff --git a/vendor/github.com/jjti/go-spancheck/.gitignore b/vendor/github.com/jjti/go-spancheck/.gitignore index 0662f46ec6..04b66d911b 100644 --- a/vendor/github.com/jjti/go-spancheck/.gitignore +++ b/vendor/github.com/jjti/go-spancheck/.gitignore @@ -17,8 +17,5 @@ # Dependency directories (remove the comment below to include it) # vendor/ src/ -<<<<<<< HEAD -.vscode -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +.vscode \ No newline at end of file diff --git a/vendor/github.com/jjti/go-spancheck/.golangci.yml b/vendor/github.com/jjti/go-spancheck/.golangci.yml index 7daf588696..5d6ab12875 100644 --- a/vendor/github.com/jjti/go-spancheck/.golangci.yml +++ b/vendor/github.com/jjti/go-spancheck/.golangci.yml @@ -17,10 +17,6 @@ linters: - errcheck - errname - errorlint -<<<<<<< HEAD -======= - - exhaustive # checks exhaustiveness of enum switch statements ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - exportloopref # checks for pointers to enclosing loop variables - gci - gochecknoinits # checks that no init functions are present in Go code @@ -62,15 +58,6 @@ linters-settings: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - prefix(github.com/jjti) -<<<<<<< HEAD -======= - exhaustive: - # Program elements to check for exhaustiveness. - # Default: [ switch ] - check: - - switch - - map ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocritic: settings: captLocal: diff --git a/vendor/github.com/jjti/go-spancheck/go.work b/vendor/github.com/jjti/go-spancheck/go.work index 9394068d30..ff04ca17e2 100644 --- a/vendor/github.com/jjti/go-spancheck/go.work +++ b/vendor/github.com/jjti/go-spancheck/go.work @@ -1,8 +1,4 @@ -<<<<<<< HEAD go 1.22.1 -======= -go 1.20 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) use ( . diff --git a/vendor/github.com/jjti/go-spancheck/go.work.sum b/vendor/github.com/jjti/go-spancheck/go.work.sum index ea65c356d4..c96d590d61 100644 --- a/vendor/github.com/jjti/go-spancheck/go.work.sum +++ b/vendor/github.com/jjti/go-spancheck/go.work.sum @@ -1,6 +1,5 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -<<<<<<< HEAD golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= @@ -10,7 +9,3 @@ golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= -======= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/jjti/go-spancheck/spancheck.go b/vendor/github.com/jjti/go-spancheck/spancheck.go index 20375c8e4e..49e5817285 100644 --- a/vendor/github.com/jjti/go-spancheck/spancheck.go +++ b/vendor/github.com/jjti/go-spancheck/spancheck.go @@ -309,14 +309,11 @@ outer: } seen[b] = true -<<<<<<< HEAD // Skip successors that are not nested within this current block. if _, ok := nestedBlockTypes[b.Kind]; !ok { continue } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Prune the search if the block uses v. if blockUses(pass, b) { continue @@ -338,7 +335,6 @@ outer: return search(defBlock.Succs) } -<<<<<<< HEAD var nestedBlockTypes = map[cfg.BlockKind]struct{}{ cfg.KindBody: {}, cfg.KindForBody: {}, @@ -354,8 +350,6 @@ var nestedBlockTypes = map[cfg.BlockKind]struct{}{ cfg.KindSwitchNextCase: {}, } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // usesCall reports whether stmts contain a use of the selName call on variable v. func usesCall( pass *analysis.Pass, @@ -366,19 +360,12 @@ func usesCall( startSpanMatchers []spanStartMatcher, depth int, ) bool { -<<<<<<< HEAD if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just two levels deep. return false } cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) -======= - if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just one level deep check. - return false - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) found, reAssigned := false, false for _, subStmt := range stmts { stack := []ast.Node{} @@ -386,10 +373,6 @@ func usesCall( switch n := n.(type) { case *ast.FuncLit: if len(stack) > 0 { -<<<<<<< HEAD -======= - cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) g := cfgs.FuncLit(n) if g != nil && len(g.Blocks) > 0 { return usesCall(pass, g.Blocks[0].Nodes, sv, selName, ignoreCheckSig, startSpanMatchers, depth+1) @@ -405,7 +388,6 @@ func usesCall( return false } } -<<<<<<< HEAD case *ast.DeferStmt: if n.Call == nil { break @@ -432,8 +414,6 @@ func usesCall( } } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case nil: if len(stack) > 0 { stack = stack[:len(stack)-1] // pop diff --git a/vendor/github.com/julz/importas/analyzer.go b/vendor/github.com/julz/importas/analyzer.go index 302671b235..25bc09b82f 100644 --- a/vendor/github.com/julz/importas/analyzer.go +++ b/vendor/github.com/julz/importas/analyzer.go @@ -13,11 +13,7 @@ import ( ) var config = &Config{ -<<<<<<< HEAD RequiredAlias: make([][]string, 0), -======= - RequiredAlias: make(map[string]string), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var Analyzer = &analysis.Analyzer{ @@ -133,7 +129,6 @@ func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, orig // skip identifiers pointing to a different import statement. continue } -<<<<<<< HEAD pos := use.Pos() end := use.End() replacement := packageReplacement @@ -147,13 +142,6 @@ func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, orig Pos: pos, End: end, NewText: []byte(replacement), -======= - - result = append(result, analysis.TextEdit{ - Pos: use.Pos(), - End: use.End(), - NewText: []byte(packageReplacement), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } diff --git a/vendor/github.com/julz/importas/config.go b/vendor/github.com/julz/importas/config.go index d44cf23047..58be86c75f 100644 --- a/vendor/github.com/julz/importas/config.go +++ b/vendor/github.com/julz/importas/config.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "regexp" -<<<<<<< HEAD "sync" ) @@ -25,20 +24,6 @@ func (c *Config) CompileRegexp() error { rules := make([]*Rule, 0, len(c.RequiredAlias)) for _, aliases := range c.RequiredAlias { path, alias := aliases[0], aliases[1] -======= -) - -type Config struct { - RequiredAlias map[string]string - Rules []*Rule - DisallowUnaliased bool - DisallowExtraAliases bool -} - -func (c *Config) CompileRegexp() error { - rules := make([]*Rule, 0, len(c.RequiredAlias)) - for path, alias := range c.RequiredAlias { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reg, err := regexp.Compile(fmt.Sprintf("^%s$", path)) if err != nil { return err @@ -49,23 +34,15 @@ func (c *Config) CompileRegexp() error { Alias: alias, }) } -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.Rules = rules return nil } func (c *Config) findRule(path string) *Rule { -<<<<<<< HEAD c.muRules.Lock() rules := c.Rules c.muRules.Unlock() for _, rule := range rules { -======= - for _, rule := range c.Rules { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if rule.Regexp.MatchString(path) { return rule } diff --git a/vendor/github.com/julz/importas/flags.go b/vendor/github.com/julz/importas/flags.go index e6a2d5ef58..cc3f1f3aae 100644 --- a/vendor/github.com/julz/importas/flags.go +++ b/vendor/github.com/julz/importas/flags.go @@ -7,23 +7,16 @@ import ( "strings" ) -<<<<<<< HEAD var errWrongAlias = errors.New("import flag must be of form path:alias") func flags(config *Config) flag.FlagSet { fs := flag.FlagSet{} fs.Var(&config.RequiredAlias, "alias", "required import alias in form path:alias") -======= -func flags(config *Config) flag.FlagSet { - fs := flag.FlagSet{} - fs.Var(stringMap(config.RequiredAlias), "alias", "required import alias in form path:alias") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) fs.BoolVar(&config.DisallowUnaliased, "no-unaliased", false, "do not allow unaliased imports of aliased packages") fs.BoolVar(&config.DisallowExtraAliases, "no-extra-aliases", false, "do not allow non-required aliases") return fs } -<<<<<<< HEAD type aliasList [][]string func (v *aliasList) Set(val string) error { @@ -37,20 +30,4 @@ func (v *aliasList) Set(val string) error { func (v *aliasList) String() string { return fmt.Sprintf("%v", ([][]string)(*v)) -======= -type stringMap map[string]string - -func (v stringMap) Set(val string) error { - spl := strings.SplitN(val, ":", 2) - if len(spl) != 2 { - return errors.New("import flag must be of form path:alias") - } - - v[spl[0]] = spl[1] - return nil -} - -func (v stringMap) String() string { - return fmt.Sprintf("%v", (map[string]string)(v)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/ldez/gomoddirectives/.golangci.yml b/vendor/github.com/ldez/gomoddirectives/.golangci.yml index c18bf11f61..7f25666569 100644 --- a/vendor/github.com/ldez/gomoddirectives/.golangci.yml +++ b/vendor/github.com/ldez/gomoddirectives/.golangci.yml @@ -1,4 +1,3 @@ -<<<<<<< HEAD linters: enable-all: true disable: @@ -21,19 +20,12 @@ linters: - exhaustive - exhaustruct - varnamelen -======= -run: - timeout: 2m ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linters-settings: govet: enable-all: true -<<<<<<< HEAD disable: - fieldalignment -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocyclo: min-complexity: 12 goconst: @@ -79,42 +71,6 @@ linters-settings: rules: json: pascal -<<<<<<< HEAD -======= -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - cyclop # duplicate of gocyclo - - lll - - dupl - - prealloc - - bodyclose - - wsl - - nlreturn - - gomnd - - testpackage - - paralleltest - - tparallel - - goerr113 - - wrapcheck - - exhaustive - - exhaustruct - - varnamelen - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) issues: exclude-use-default: false max-issues-per-linter: 0 @@ -125,29 +81,20 @@ issues: exclude-rules: - path: "(.+)_test.go" linters: -<<<<<<< HEAD - funlen - goconst - maintidx - path: cmd/gomoddirectives/gomoddirectives.go linters: - forbidigo -======= - - funlen - - goconst - - path: cmd/gomoddirectives/gomoddirectives.go ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) text: 'use of `fmt.Println` forbidden' output: show-stats: true sort-results: true -<<<<<<< HEAD sort-order: - linter - file run: timeout: 2m -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ldez/gomoddirectives/LICENSE b/vendor/github.com/ldez/gomoddirectives/LICENSE index d7cff29fdf..c1bf0c3288 100644 --- a/vendor/github.com/ldez/gomoddirectives/LICENSE +++ b/vendor/github.com/ldez/gomoddirectives/LICENSE @@ -175,11 +175,7 @@ END OF TERMS AND CONDITIONS -<<<<<<< HEAD Copyright 2024 Fernandez Ludovic -======= - Copyright 2021 Fernandez Ludovic ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go index 145af85289..22d01d627e 100644 --- a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go +++ b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go @@ -1,38 +1,25 @@ -<<<<<<< HEAD // Package gomoddirectives a linter that handle directives into `go.mod`. -======= -// Package gomoddirectives a linter that handle `replace`, `retract`, `exclude` directives into `go.mod`. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gomoddirectives import ( "fmt" "go/token" -<<<<<<< HEAD "regexp" "strings" "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" "golang.org/x/tools/go/analysis" -======= - "strings" - - "golang.org/x/mod/modfile" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( reasonRetract = "a comment is mandatory to explain why the version has been retracted" reasonExclude = "exclude directive is not allowed" -<<<<<<< HEAD reasonToolchain = "toolchain directive is not allowed" reasonToolchainPattern = "toolchain directive (%s) doesn't match the pattern '%s'" reasonTool = "tool directive is not allowed" reasonGoDebug = "godebug directive is not allowed" reasonGoVersion = "go directive (%s) doesn't match the pattern '%s'" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reasonReplaceLocal = "local replacement are not allowed" reasonReplace = "replacement are not allowed" reasonReplaceIdentical = "the original module and the replacement are identical" @@ -65,7 +52,6 @@ type Options struct { ReplaceAllowLocal bool ExcludeForbidden bool RetractAllowNoExplanation bool -<<<<<<< HEAD ToolchainForbidden bool ToolchainPattern *regexp.Regexp ToolForbidden bool @@ -96,8 +82,6 @@ func AnalyzePass(pass *analysis.Pass, opts Options) ([]Result, error) { } return AnalyzeFile(f, opts), nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Analyze analyzes a project. @@ -112,7 +96,6 @@ func Analyze(opts Options) ([]Result, error) { // AnalyzeFile analyzes a mod file. func AnalyzeFile(file *modfile.File, opts Options) []Result { -<<<<<<< HEAD checks := []func(file *modfile.File, opts Options) []Result{ checkRetractDirectives, checkExcludeDirectives, @@ -227,70 +210,21 @@ func checkReplaceDirectives(file *modfile.File, opts Options) []Result { } uniqReplace[replace.Old.Path+replace.Old.Version] = struct{}{} -======= - var results []Result - - if !opts.RetractAllowNoExplanation { - for _, r := range file.Retract { - if r.Rationale != "" { - continue - } - - results = append(results, NewResult(file, r.Syntax, reasonRetract)) - } - } - - if opts.ExcludeForbidden { - for _, e := range file.Exclude { - results = append(results, NewResult(file, e.Syntax, reasonExclude)) - } - } - - uniqReplace := map[string]struct{}{} - - for _, r := range file.Replace { - reason := check(opts, r) - if reason != "" { - results = append(results, NewResult(file, r.Syntax, reason)) - continue - } - - if r.Old.Path == r.New.Path && r.Old.Version == r.New.Version { - results = append(results, NewResult(file, r.Syntax, reasonReplaceIdentical)) - continue - } - - if _, ok := uniqReplace[r.Old.Path+r.Old.Version]; ok { - results = append(results, NewResult(file, r.Syntax, reasonReplaceDuplicate)) - } - - uniqReplace[r.Old.Path+r.Old.Version] = struct{}{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return results } -<<<<<<< HEAD func checkReplaceDirective(opts Options, r *modfile.Replace) string { if isLocal(r) { if opts.ReplaceAllowLocal { -======= -func check(o Options, r *modfile.Replace) string { - if isLocal(r) { - if o.ReplaceAllowLocal { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "" } return fmt.Sprintf("%s: %s", reasonReplaceLocal, r.Old.Path) } -<<<<<<< HEAD for _, v := range opts.ReplaceAllowList { -======= - for _, v := range o.ReplaceAllowList { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if r.Old.Path == v { return "" } @@ -299,7 +233,6 @@ func check(o Options, r *modfile.Replace) string { return fmt.Sprintf("%s: %s", reasonReplace, r.Old.Path) } -<<<<<<< HEAD func checkGoDebugDirectives(file *modfile.File, opts Options) []Result { if !opts.GoDebugForbidden { return nil @@ -314,8 +247,6 @@ func checkGoDebugDirectives(file *modfile.File, opts Options) []Result { return results } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Filesystem paths found in "replace" directives are represented by a path with an empty version. // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 func isLocal(r *modfile.Replace) bool { diff --git a/vendor/github.com/ldez/gomoddirectives/module.go b/vendor/github.com/ldez/gomoddirectives/module.go index 413c288775..53cf1f59e1 100644 --- a/vendor/github.com/ldez/gomoddirectives/module.go +++ b/vendor/github.com/ldez/gomoddirectives/module.go @@ -1,7 +1,6 @@ package gomoddirectives import ( -<<<<<<< HEAD "errors" "fmt" "os" @@ -28,47 +27,6 @@ func GetModuleFile() (*modfile.File, error) { func parseGoMod(goMod string) (*modfile.File, error) { raw, err := os.ReadFile(filepath.Clean(goMod)) -======= - "bytes" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - - "golang.org/x/mod/modfile" -) - -type modInfo struct { - Path string `json:"Path"` - Dir string `json:"Dir"` - GoMod string `json:"GoMod"` - GoVersion string `json:"GoVersion"` - Main bool `json:"Main"` -} - -// GetModuleFile gets module file. -func GetModuleFile() (*modfile.File, error) { - // https://github.com/golang/go/issues/44753#issuecomment-790089020 - cmd := exec.Command("go", "list", "-m", "-json") - - raw, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("command go list: %w: %s", err, string(raw)) - } - - var v modInfo - err = json.NewDecoder(bytes.NewBuffer(raw)).Decode(&v) - if err != nil { - return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(raw)) - } - - if v.GoMod == "" { - return nil, errors.New("working directory is not part of a module") - } - - raw, err = os.ReadFile(v.GoMod) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("reading go.mod file: %w", err) } diff --git a/vendor/github.com/ldez/gomoddirectives/readme.md b/vendor/github.com/ldez/gomoddirectives/readme.md index 56aca698ce..04738bd81c 100644 --- a/vendor/github.com/ldez/gomoddirectives/readme.md +++ b/vendor/github.com/ldez/gomoddirectives/readme.md @@ -1,6 +1,5 @@ # gomoddirectives -<<<<<<< HEAD A linter that handle directives into `go.mod`. [![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) @@ -191,19 +190,3 @@ module example.com/foo go 1.22.0 ``` -======= -[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) -[![Build Status](https://github.com/ldez/gomoddirectives/workflows/Main/badge.svg?branch=master)](https://github.com/ldez/gomoddirectives/actions) - -A linter that handle [`replace`](https://golang.org/ref/mod#go-mod-file-replace), [`retract`](https://golang.org/ref/mod#go-mod-file-retract), [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives into `go.mod`. - -Features: - -- ban all [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- allow only local [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- allow only some [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- force explanation for [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives -- ban all [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives -- detect duplicated [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- detect identical [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ldez/tagliatelle/.golangci.yml b/vendor/github.com/ldez/tagliatelle/.golangci.yml index 048fc4ab87..01c76dca99 100644 --- a/vendor/github.com/ldez/tagliatelle/.golangci.yml +++ b/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -1,4 +1,3 @@ -<<<<<<< HEAD linters: enable-all: true disable: @@ -24,12 +23,6 @@ linters: - nilnil - errchkjson - nonamedreturns -======= -run: - timeout: 5m - skip-files: [ ] - skip-dirs: [ ] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) linters-settings: govet: @@ -37,11 +30,7 @@ linters-settings: disable: - fieldalignment gocyclo: -<<<<<<< HEAD min-complexity: 20 -======= - min-complexity: 15 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goconst: min-len: 5 min-occurrences: 3 @@ -56,7 +45,6 @@ linters-settings: gofumpt: extra-rules: true depguard: -<<<<<<< HEAD rules: main: deny: @@ -64,13 +52,6 @@ linters-settings: desc: not allowed - pkg: "github.com/pkg/errors" desc: Should be replaced by standard lib errors package -======= - list-type: denylist - include-go-root: false - packages: - - github.com/sirupsen/logrus - - github.com/pkg/errors ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gocritic: enabled-tags: - diagnostic @@ -85,7 +66,6 @@ linters-settings: hugeParam: sizeThreshold: 100 -<<<<<<< HEAD issues: exclude-use-default: false max-issues-per-linter: 0 @@ -95,48 +75,3 @@ issues: run: timeout: 5m -======= -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - cyclop # duplicate of gocyclo - - lll - - dupl - - wsl - - nlreturn - - gomnd - - goerr113 - - wrapcheck - - exhaustive - - exhaustruct - - testpackage - - tparallel - - paralleltest - - prealloc - - ifshort - - forcetypeassert - - varnamelen - - nilnil - - errchkjson - - nonamedreturns - -issues: - exclude-use-default: false - max-per-linter: 0 - max-same-issues: 0 - exclude: - - 'package-comments: should have a package comment' ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/ldez/tagliatelle/readme.md b/vendor/github.com/ldez/tagliatelle/readme.md index e664d9f4c6..52d10304b1 100644 --- a/vendor/github.com/ldez/tagliatelle/readme.md +++ b/vendor/github.com/ldez/tagliatelle/readme.md @@ -97,7 +97,6 @@ type Foo struct { } ``` -<<<<<<< HEAD ## What this linter is about This linter is about validating tags according to rules you define. @@ -106,17 +105,6 @@ The linter also allows to fix tags according to the rules you defined. This linter is not intended to validate the fact a tag in valid or not. ## How to use the linter -======= -## What this tool is about - -This tool is about validating tags according to rules you define. -The tool also allows to fix tags according to the rules you defined. - -This tool is not intended to validate the fact a tag in valid or not. -To do that, you can use `go vet`, or use [golangci-lint](https://golangci-lint.run) ["go vet"](https://golangci-lint.run/usage/linters/#govet) linter. - -## How to use the tool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### As a golangci-lint linter @@ -125,7 +113,6 @@ Define the rules, you want via your [golangci-lint](https://golangci-lint.run) c ```yaml linters-settings: tagliatelle: -<<<<<<< HEAD # Checks the struct tag name case. case: # Defines the association between tag name and case. @@ -269,19 +256,6 @@ linters-settings: overrides: - pkg: foo/bar ignore: true -======= - # Check the struck tag name case. - case: - # Use the struct field name to check the name of the struct tag. - # Default: false - use-field-name: true - rules: - # Any struct tag type can be used. - # Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`. - json: camel - yaml: camel - xml: camel ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` More information here https://golangci-lint.run/usage/linters/#tagliatelle @@ -306,23 +280,14 @@ Here are the default rules for the well known and used tags, when using tagliate - `bson`: `camel` - `avro`: `snake` - `header`: `header` -<<<<<<< HEAD - `env`: `upperSnake` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - `envconfig`: `upperSnake` ### Custom Rules -<<<<<<< HEAD The linter is not limited to the tags used in example, **you can use it to validate any tag**. You can add your own tag, for example `whatever` and tells the linter you want to use `kebab`. -======= -The tool is not limited to the tags used in example, you can use it to validate any tag. - -You can add your own tag, for example `whatever` and tells the tool you want to use `kebab`. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) This option is only available via [golangci-lint](https://golangci-lint.run). @@ -331,7 +296,6 @@ linters-settings: tagliatelle: # Check the struck tag name case. case: -<<<<<<< HEAD rules: # Any struct tag type can be used. # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` @@ -343,16 +307,4 @@ linters-settings: # Use the struct field name to check the name of the struct tag. # Default: false use-field-name: true -======= - # Use the struct field name to check the name of the struct tag. - # Default: false - use-field-name: true - rules: - # Any struct tag type can be used. - # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` - json: camel - yaml: camel - xml: camel - whatever: kebab ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` diff --git a/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/vendor/github.com/ldez/tagliatelle/tagliatelle.go index 13625b7eb5..99c7da2d04 100644 --- a/vendor/github.com/ldez/tagliatelle/tagliatelle.go +++ b/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "go/ast" -<<<<<<< HEAD "maps" "path" "path/filepath" @@ -15,12 +14,6 @@ import ( "strings" iradix "github.com/hashicorp/go-immutable-radix/v2" -======= - "reflect" - "strings" - - "github.com/ettle/strcase" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -28,7 +21,6 @@ import ( // Config the tagliatelle configuration. type Config struct { -<<<<<<< HEAD Base Overrides []Overrides } @@ -53,10 +45,6 @@ type ExtendedRule struct { Case string ExtraInitialisms bool InitialismOverrides map[string]bool -======= - Rules map[string]string - UseFieldName bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // New creates an analyzer. @@ -64,33 +52,18 @@ func New(config Config) *analysis.Analyzer { return &analysis.Analyzer{ Name: "tagliatelle", Doc: "Checks the struct tags.", -<<<<<<< HEAD Run: func(pass *analysis.Pass) (any, error) { if len(config.Rules) == 0 && len(config.ExtendedRules) == 0 && len(config.Overrides) == 0 { -======= - Run: func(pass *analysis.Pass) (interface{}, error) { - if len(config.Rules) == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, nil } return run(pass, config) }, -<<<<<<< HEAD Requires: []*analysis.Analyzer{inspect.Analyzer}, } } func run(pass *analysis.Pass, config Config) (any, error) { -======= - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - }, - } -} - -func run(pass *analysis.Pass, config Config) (interface{}, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) isp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !ok { return nil, errors.New("missing inspect analyser") @@ -100,7 +73,6 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { (*ast.StructType)(nil), } -<<<<<<< HEAD cfg := config.Base if pass.Module != nil { radixTree := createRadixTree(config, pass.Module.Path) @@ -111,8 +83,6 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { return nil, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) isp.Preorder(nodeFilter, func(n ast.Node) { node, ok := n.(*ast.StructType) if !ok { @@ -120,22 +90,14 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { } for _, field := range node.Fields.List { -<<<<<<< HEAD analyze(pass, cfg, node, field) -======= - analyze(pass, config, node, field) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }) return nil, nil } -<<<<<<< HEAD func analyze(pass *analysis.Pass, config Base, n *ast.StructType, field *ast.Field) { -======= -func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.Field) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n.Fields == nil || n.Fields.NumFields() < 1 { // skip empty structs return @@ -152,7 +114,6 @@ func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.F return } -<<<<<<< HEAD cleanRules(config) if slices.Contains(config.IgnoredFields, fieldName) { @@ -221,51 +182,6 @@ func report(pass *analysis.Pass, config Base, key, convName, fieldName string, n if value != converter(expected) { pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) -======= - for key, convName := range config.Rules { - if convName == "" { - continue - } - - value, flags, ok := lookupTagValue(field.Tag, key) - if !ok { - // skip when no struct tag for the key - continue - } - - if value == "-" { - // skip when skipped :) - continue - } - - // TODO(ldez): need to be rethink. - // This is an exception because of a bug. - // https://github.com/ldez/tagliatelle/issues/8 - // For now, tagliatelle should try to remain neutral in terms of format. - if hasTagFlag(flags, "inline") { - // skip for inline children (no name to lint) - continue - } - - if value == "" { - value = fieldName - } - - converter, err := getConverter(convName) - if err != nil { - pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) - continue - } - - expected := value - if config.UseFieldName { - expected = fieldName - } - - if value != converter(expected) { - pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -325,7 +241,6 @@ func hasTagFlag(flags []string, query string) bool { return false } -<<<<<<< HEAD func createRadixTree(config Config, modPath string) *iradix.Tree[Base] { r := iradix.New[Base]() @@ -385,39 +300,3 @@ func cleanRules(config Base) { delete(config.Rules, k) } } -======= -func getConverter(c string) (func(s string) string, error) { - switch c { - case "camel": - return strcase.ToCamel, nil - case "pascal": - return strcase.ToPascal, nil - case "kebab": - return strcase.ToKebab, nil - case "snake": - return strcase.ToSnake, nil - case "goCamel": - return strcase.ToGoCamel, nil - case "goPascal": - return strcase.ToGoPascal, nil - case "goKebab": - return strcase.ToGoKebab, nil - case "goSnake": - return strcase.ToGoSnake, nil - case "header": - return toHeader, nil - case "upper": - return strings.ToUpper, nil - case "upperSnake": - return strcase.ToSNAKE, nil - case "lower": - return strings.ToLower, nil - default: - return nil, fmt.Errorf("unsupported case: %s", c) - } -} - -func toHeader(s string) string { - return strcase.ToCase(s, strcase.TitleCase, '-') -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24fb..0000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 1509ade854..4872685f46 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,18 +1,9 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -<<<<<<< HEAD -======= -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) # Overview -<<<<<<< HEAD -======= -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) properties is a Go library for reading and writing properties files. It supports reading from multiple files or URLs and Spring style recursive @@ -105,33 +96,3 @@ $ go get -u github.com/magiconair/properties ## ToDo * Dump contents with passwords and secrets obscured -<<<<<<< HEAD -======= - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go index 70443fd8e0..f5e252f8d9 100644 --- a/vendor/github.com/magiconair/properties/decode.go +++ b/vendor/github.com/magiconair/properties/decode.go @@ -189,21 +189,12 @@ func dec(p *Properties, key string, def *string, opts map[string]string, v refle for i := 0; i < v.NumField(); i++ { fv := v.Field(i) fk, def, opts := keydef(t.Field(i)) -<<<<<<< HEAD if fk == "-" { continue } if !fv.CanSet() { return fmt.Errorf("cannot set %s", t.Field(i).Name) } -======= - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if key != "" { fk = key + "." + fk } diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index 03d1062064..6567e0c719 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -6,11 +6,7 @@ package properties import ( "fmt" -<<<<<<< HEAD "io" -======= - "io/ioutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "os" "strings" @@ -56,7 +52,6 @@ func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { return l.loadBytes(buf, l.Encoding) } -<<<<<<< HEAD // LoadReader reads an io.Reader into a Properties struct. func (l *Loader) LoadReader(r io.Reader) (*Properties, error) { if buf, err := io.ReadAll(r); err != nil { @@ -66,8 +61,6 @@ func (l *Loader) LoadReader(r io.Reader) (*Properties, error) { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // LoadAll reads the content of multiple URLs or files in the given order into // a Properties struct. If IgnoreMissing is true then a 404 status code or // missing file will not be reported as error. Encoding sets the encoding for @@ -107,11 +100,7 @@ func (l *Loader) LoadAll(names []string) (*Properties, error) { // If IgnoreMissing is true then a missing file will not be // reported as error. func (l *Loader) LoadFile(filename string) (*Properties, error) { -<<<<<<< HEAD data, err := os.ReadFile(filename) -======= - data, err := ioutil.ReadFile(filename) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { if l.IgnoreMissing && os.IsNotExist(err) { LogPrintf("properties: %s not found. skipping", filename) @@ -146,11 +135,7 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) } -<<<<<<< HEAD body, err := io.ReadAll(resp.Body) -======= - body, err := ioutil.ReadAll(resp.Body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) } @@ -209,15 +194,12 @@ func LoadFile(filename string, enc Encoding) (*Properties, error) { return l.LoadAll([]string{filename}) } -<<<<<<< HEAD // LoadReader reads an io.Reader into a Properties struct. func LoadReader(r io.Reader, enc Encoding) (*Properties, error) { l := &Loader{Encoding: enc} return l.LoadReader(r) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // LoadFiles reads multiple files in the given order into // a Properties struct. If 'ignoreMissing' is true then // non-existent files will not be reported as error. @@ -257,15 +239,12 @@ func MustLoadString(s string) *Properties { return must(LoadString(s)) } -<<<<<<< HEAD // MustLoadSReader reads an io.Reader into a Properties struct and // panics on error. func MustLoadReader(r io.Reader, enc Encoding) *Properties { return must(LoadReader(r, enc)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MustLoadFile reads a file into a Properties struct and // panics on error. func MustLoadFile(filename string, enc Encoding) *Properties { diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 1232f36cfb..ae634d1cc0 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,11 +1,8 @@ -<<<<<<< HEAD ## 1.5.1 * Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] * Fix map of slices not decoding properly in certain cases. [GH-266] -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## 1.5.0 * New option `IgnoreUntaggedFields` to ignore decoding to any fields diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 44a7b7f97c..c1f99da032 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -271,15 +271,11 @@ func TextUnmarshallerHookFunc() DecodeHookFuncType { if !ok { return data, nil } -<<<<<<< HEAD str, ok := data.(string) if !ok { str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() } if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { -======= - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, err } return result, nil diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 6045ea0749..7581806a79 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -458,11 +458,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { -<<<<<<< HEAD return fmt.Errorf("error decoding '%s': %w", name, err) -======= - return fmt.Errorf("error decoding '%s': %s", name, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -1127,11 +1123,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) -<<<<<<< HEAD } else if valSlice.Len() > dataVal.Len() { valSlice = valSlice.Slice(0, dataVal.Len()) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Accumulate any errors diff --git a/vendor/github.com/nunnatsa/ginkgolinter/README.md b/vendor/github.com/nunnatsa/ginkgolinter/README.md index 7435a290f7..83c436359f 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/README.md +++ b/vendor/github.com/nunnatsa/ginkgolinter/README.md @@ -180,11 +180,7 @@ var _ = Describe("checking something", Focus, func() { These container, or the `Focus` spec, must not be part of the final source code, and should only be used locally by the developer. -<<<<<<< HEAD ***This rule is disabled by default***. Use the `--forbid-focus-container` command line flag to enable it. -======= -***This rule is disabled by default***. Use the `--forbid-focus-container=true` command line flag to enable it. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Comparing values from different types [BUG] @@ -193,11 +189,7 @@ The `Equal` and the `BeIdentical` matchers also check the type, not only the val The following code will fail in runtime: ```go x := 5 // x is int -<<<<<<< HEAD Expect(x).Should(Equal(uint(5)) // x and uint(5) are with different -======= -Expect(x).Should(Eqaul(uint(5)) // x and uint(5) are with different ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` When using negative checks, it's even worse, because we get a false positive: ``` @@ -210,11 +202,7 @@ using casting, or use the `BeEquivalentTo` matcher. The linter can't guess what is the best solution in each case, and so it won't auto-fix this warning. -<<<<<<< HEAD To suppress this warning entirely, use the `--suppress-type-compare-assertion` command line parameter. -======= -To suppress this warning entirely, use the `--suppress-type-compare-assertion=true` command line parameter. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) To suppress a specific file or line, use the `// ginkgo-linter:ignore-type-compare-warning` comment (see [below](#suppress-warning-from-the-code)) @@ -246,11 +234,7 @@ flag **is** set. ***Note***: This rule work with best-effort approach. It can't find many cases, like const defined not in the same package, or when using variables. -<<<<<<< HEAD The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or -======= -The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). This rule checks if the async (`Eventually` or `Consistently`) timeout duration, is not shorter than the polling interval. @@ -290,11 +274,7 @@ a Gomega object as their first parameter, and returns nothing, e.g. this is a va ***Note***: This rule **does not** support auto-fix. ### Avoid Spec Pollution: Don't Initialize Variables in Container Nodes [BUG/STYLE]: -<<<<<<< HEAD ***Note***: Only applied when the `--forbid-spec-pollution` flag is set (disabled by default). -======= -***Note***: Only applied when the `--forbid-spec-pollution=true` flag is set (disabled by default). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) According to [ginkgo documentation](https://onsi.github.io/ginkgo/#avoid-spec-pollution-dont-initialize-variables-in-container-nodes), no variable should be assigned within a container node (`Describe`, `Context`, `When` or their `F`, `P` or `X` forms) @@ -471,21 +451,13 @@ Expect("abc").ShouldNot(BeEmpty()) // => Expect("abc").ToNot(BeEmpty()) ``` This rule support auto fixing. -<<<<<<< HEAD ***This rule is disabled by default***. Use the `--force-expect-to` command line flag to enable it. -======= -***This rule is disabled by default***. Use the `--force-expect-to=true` command line flag to enable it. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Async timing interval: multiple timeout or polling intervals [STYLE] ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. -<<<<<<< HEAD The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or -======= -The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). The linter checks that there is up to one polling argument and up to one timeout argument. @@ -503,11 +475,7 @@ Eventually(aFunc, time.Second*10, time.Millisecond * 500).WithPolling(time.Milli ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. -<<<<<<< HEAD gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): -======= -gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) * a `time.Duration` value * any kind of numeric value (int(8/16/32/64), uint(8/16/32/64) or float(32/64), as the number of seconds. * duration string like `"12s"` @@ -554,17 +522,12 @@ will trigger a warning with a suggestion to replace the mather to ```go Expect(myErrorFunc()).To(Succeed()) ``` -<<<<<<< HEAD ***This rule is disabled by default***. Use the `--force-succeed` command line flag to enable it. -======= -***This rule is disabled by default***. Use the `--force-succeed=true` command line flag to enable it. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ***Note***: This rule **does** support auto-fix, when the `--fix` command line parameter is used. ## Suppress the linter ### Suppress warning from command line -<<<<<<< HEAD * Use the `--suppress-len-assertion` flag to suppress the wrong length and cap assertions warning * Use the `--suppress-nil-assertion` flag to suppress the wrong nil assertion warning * Use the `--suppress-err-assertion` flag to suppress the wrong error assertion warning @@ -573,16 +536,6 @@ will trigger a warning with a suggestion to replace the mather to * Use the `--forbid-focus-container` flag to activate the focused container assertion (deactivated by default) * Use the `--suppress-type-compare-assertion` to suppress the type compare assertion warning * Use the `--allow-havelen-0` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from -======= -* Use the `--suppress-len-assertion=true` flag to suppress the wrong length and cap assertions warning -* Use the `--suppress-nil-assertion=true` flag to suppress the wrong nil assertion warning -* Use the `--suppress-err-assertion=true` flag to suppress the wrong error assertion warning -* Use the `--suppress-compare-assertion=true` flag to suppress the wrong comparison assertion warning -* Use the `--suppress-async-assertion=true` flag to suppress the function call in async assertion warning -* Use the `--forbid-focus-container=true` flag to activate the focused container assertion (deactivated by default) -* Use the `--suppress-type-compare-assertion=true` to suppress the type compare assertion warning -* Use the `--allow-havelen-0=true` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) command line, and not from a comment. ### Suppress warning from the code @@ -606,11 +559,7 @@ To suppress the wrong async assertion warning, add a comment with (only) `ginkgo-linter:ignore-async-assert-warning`. -<<<<<<< HEAD To suppress the focus container warning, add a comment with (only) -======= -To supress the focus container warning, add a comment with (only) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) `ginkgo-linter:ignore-focus-container-warning` @@ -623,17 +572,10 @@ Notice that this comment will not work for an anonymous variable container like // ginkgo-linter:ignore-focus-container-warning (not working!!) var _ = FDescribe(...) ``` -<<<<<<< HEAD In this case, use the file comment (see below). There are two options to use these comments: 1. If the comment is at the top of the file, suppress the warning for the whole file; e.g.: -======= -In this case, use the file comment (see bellow). - -There are two options to use these comments: -1. If the comment is at the top of the file, supress the warning for the whole file; e.g.: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ```go package mypackage diff --git a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go index c63892f396..ac762cd9b6 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go @@ -37,7 +37,6 @@ func NewAnalyzer() *analysis.Analyzer { a := NewAnalyzerWithConfig(config) -<<<<<<< HEAD a.Flags.Init("ginkgolinter", flag.ExitOnError) a.Flags.BoolVar(&config.SuppressLen, "suppress-len-assertion", config.SuppressLen, "Suppress warning for wrong length assertions") a.Flags.BoolVar(&config.SuppressNil, "suppress-nil-assertion", config.SuppressNil, "Suppress warning for wrong nil assertions") @@ -51,23 +50,6 @@ func NewAnalyzer() *analysis.Analyzer { a.Flags.BoolVar(&config.ForbidFocus, "forbid-focus-container", config.ForbidFocus, "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") a.Flags.BoolVar(&config.ForbidSpecPollution, "forbid-spec-pollution", config.ForbidSpecPollution, "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") a.Flags.BoolVar(&config.ForceSucceedForFuncs, "force-succeed", config.ForceSucceedForFuncs, "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") -======= - var ignored bool - a.Flags.Init("ginkgolinter", flag.ExitOnError) - a.Flags.Var(&config.SuppressLen, "suppress-len-assertion", "Suppress warning for wrong length assertions") - a.Flags.Var(&config.SuppressNil, "suppress-nil-assertion", "Suppress warning for wrong nil assertions") - a.Flags.Var(&config.SuppressErr, "suppress-err-assertion", "Suppress warning for wrong error assertions") - a.Flags.Var(&config.SuppressCompare, "suppress-compare-assertion", "Suppress warning for wrong comparison assertions") - a.Flags.Var(&config.SuppressAsync, "suppress-async-assertion", "Suppress warning for function call in async assertion, like Eventually") - a.Flags.Var(&config.ValidateAsyncIntervals, "validate-async-intervals", "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") - a.Flags.Var(&config.SuppressTypeCompare, "suppress-type-compare-assertion", "Suppress warning for comparing values from different types, like int32 and uint32") - a.Flags.Var(&config.AllowHaveLen0, "allow-havelen-0", "Do not warn for HaveLen(0); default = false") - a.Flags.Var(&config.ForceExpectTo, "force-expect-to", "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") - a.Flags.BoolVar(&ignored, "suppress-focus-container", true, "Suppress warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt. Deprecated and ignored: use --forbid-focus-container instead") - a.Flags.Var(&config.ForbidFocus, "forbid-focus-container", "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") - a.Flags.Var(&config.ForbidSpecPollution, "forbid-spec-pollution", "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") - a.Flags.Var(&config.ForceSucceedForFuncs, "force-succeed", "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return a } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/doc.go b/vendor/github.com/nunnatsa/ginkgolinter/doc.go index de1225acf3..2a935e9b34 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/doc.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/doc.go @@ -94,11 +94,7 @@ For example: Eventually(func() bool { return true }, time.Second*10, 500*time.Millisecond).ProbeEvery(time.Millisecond * 500).Should(BeTrue()) * async timing interval: non-time.Duration intervals [Style] -<<<<<<< HEAD gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): -======= -gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) * time.Duration * any kind of numeric value, as number of seconds * duration string like "12s" diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go index b278afac52..5bd6dd6e7e 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go @@ -21,18 +21,8 @@ type Actual struct { actualOffset int } -<<<<<<< HEAD func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, timePkg string, info *gomegahandler.GomegaBasicInfo) (*Actual, bool) { arg, actualOffset := getActualArgPayload(orig, clone, pass, info) -======= -func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string, errMethodExists bool) (*Actual, bool) { - funcName, ok := handler.GetActualFuncName(orig) - if !ok { - return nil, false - } - - arg, actualOffset := getActualArgPayload(orig, clone, pass, funcName, errMethodExists) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if arg == nil { return nil, false } @@ -50,11 +40,7 @@ func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallE isTuple = tpl.Len() > 1 } -<<<<<<< HEAD isAsyncExpr := gomegainfo.IsAsyncActualMethod(info.MethodName) -======= - isAsyncExpr := gomegainfo.IsAsyncActualMethod(funcName) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var asyncArg *AsyncArg if isAsyncExpr { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go index efe1ee1f0d..5b6cfbbc44 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go @@ -8,10 +8,7 @@ import ( "golang.org/x/tools/go/analysis" "github.com/nunnatsa/ginkgolinter/internal/expression/value" -<<<<<<< HEAD "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/nunnatsa/ginkgolinter/internal/gomegainfo" "github.com/nunnatsa/ginkgolinter/internal/reverseassertion" ) @@ -44,24 +41,15 @@ func (a ArgType) Is(val ArgType) bool { return a&val != 0 } -<<<<<<< HEAD func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, info *gomegahandler.GomegaBasicInfo) (ArgPayload, int) { origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, info.MethodName, pass) -======= -func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, actualMethodName string, errMethodExists bool) (ArgPayload, int) { - origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, actualMethodName, pass) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !isGomegaExpr { return nil, 0 } var arg ArgPayload -<<<<<<< HEAD if info.HasErrorMethod { -======= - if errMethodExists { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) arg = &ErrorMethodPayload{} } else if value.IsExprError(pass, origArgExpr) { arg = newErrPayload(origArgExpr, argExprClone, pass) diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go index 86db59cfce..6e8e0db6ac 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go @@ -27,12 +27,8 @@ type GomegaExpression struct { origAssertionFuncName string actualFuncName string -<<<<<<< HEAD isAsync bool isUsingGomegaVar bool -======= - isAsync bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actual *actual.Actual matcher *matcher.Matcher @@ -41,13 +37,8 @@ type GomegaExpression struct { } func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string) (*GomegaExpression, bool) { -<<<<<<< HEAD info, ok := handler.GetGomegaBasicInfo(origExpr) if !ok || !gomegainfo.IsActualMethod(info.MethodName) { -======= - actualMethodName, ok := handler.GetActualFuncName(origExpr) - if !ok || !gomegainfo.IsActualMethod(actualMethodName) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, false } @@ -55,24 +46,14 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand if !ok || !gomegainfo.IsAssertionFunc(origSel.Sel.Name) { return &GomegaExpression{ orig: origExpr, -<<<<<<< HEAD actualFuncName: info.MethodName, -======= - actualFuncName: actualMethodName, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, true } exprClone := astcopy.CallExpr(origExpr) selClone := exprClone.Fun.(*ast.SelectorExpr) -<<<<<<< HEAD origActual := handler.GetActualExpr(origSel) -======= - errMethodExists := false - - origActual := handler.GetActualExpr(origSel, &errMethodExists) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if origActual == nil { return nil, false } @@ -82,11 +63,7 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand return nil, false } -<<<<<<< HEAD actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, timePkg, info) -======= - actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, handler, timePkg, errMethodExists) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return nil, false } @@ -111,16 +88,10 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand assertionFuncName: origSel.Sel.Name, origAssertionFuncName: origSel.Sel.Name, -<<<<<<< HEAD actualFuncName: info.MethodName, isAsync: actl.IsAsync(), isUsingGomegaVar: info.UseGomegaVar, -======= - actualFuncName: actualMethodName, - - isAsync: actl.IsAsync(), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actual: actl, matcher: mtchr, @@ -164,13 +135,10 @@ func (e *GomegaExpression) IsAsync() bool { return e.isAsync } -<<<<<<< HEAD func (e *GomegaExpression) IsUsingGomegaVar() bool { return e.isUsingGomegaVar } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *GomegaExpression) ReverseAssertionFuncLogic() { assertionFunc := e.clone.Fun.(*ast.SelectorExpr).Sel newName := reverseassertion.ChangeAssertionLogic(assertionFunc.Name) diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go index 5444ea1505..7a983cc9e8 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go @@ -41,22 +41,13 @@ func New(origMatcher, matcherClone *ast.CallExpr, pass *analysis.Pass, handler g reverse := false var assertFuncName string for { -<<<<<<< HEAD info, ok := handler.GetGomegaBasicInfo(origMatcher) -======= - ok := false - assertFuncName, ok = handler.GetActualFuncName(origMatcher) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return nil, false } -<<<<<<< HEAD if info.MethodName != "Not" { assertFuncName = info.MethodName -======= - if assertFuncName != "Not" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) break } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go index ef855830f7..ba74722d27 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go @@ -194,13 +194,10 @@ func IsExprError(pass *analysis.Pass, expr ast.Expr) bool { return interfaces.ImplementsError(actualArgType) case *gotypes.Pointer: -<<<<<<< HEAD if interfaces.ImplementsError(t) { return true } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if tt, ok := t.Elem().(*gotypes.Named); ok { return interfaces.ImplementsError(tt) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go index ead016b18c..322bbc4533 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go @@ -20,19 +20,11 @@ const ( func handleGinkgoSpecs(expr ast.Expr, config types.Config, pass *analysis.Pass, ginkgoHndlr Handler) bool { goDeeper := false if exp, ok := expr.(*ast.CallExpr); ok { -<<<<<<< HEAD if config.ForbidFocus && checkFocusContainer(pass, ginkgoHndlr, exp) { goDeeper = true } if config.ForbidSpecPollution && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { -======= - if bool(config.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, exp) { - goDeeper = true - } - - if bool(config.ForbidSpecPollution) && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) goDeeper = true } } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go index 50a90eacc3..8ab87c76e9 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go @@ -14,7 +14,6 @@ type dotHandler struct { pass *analysis.Pass } -<<<<<<< HEAD // GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info func (h dotHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { info := &GomegaBasicInfo{} @@ -43,26 +42,6 @@ func (h dotHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bo return nil, false } } -======= -// GetActualFuncName returns the name of the gomega function, e.g. `Expect` -func (h dotHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { - switch actualFunc := expr.Fun.(type) { - case *ast.Ident: - return actualFunc.Name, true - case *ast.SelectorExpr: - if h.isGomegaVar(actualFunc.X) { - return actualFunc.Sel.Name, true - } - - if x, ok := actualFunc.X.(*ast.CallExpr); ok { - return h.GetActualFuncName(x) - } - - case *ast.CallExpr: - return h.GetActualFuncName(actualFunc) - } - return "", false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ReplaceFunction replaces the function with another one, for fix suggestions @@ -82,11 +61,7 @@ func (dotHandler) GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast } } -<<<<<<< HEAD func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { -======= -func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actualExpr, ok := assertionFunc.X.(*ast.CallExpr) if !ok { return nil @@ -101,15 +76,7 @@ func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExis return actualExpr } } else { -<<<<<<< HEAD return h.GetActualExpr(fun) -======= - if fun.Sel.Name == "Error" { - *errMethodExists = true - } - - return h.GetActualExpr(fun, errMethodExists) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go index 970c7c4e13..881ec87896 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go @@ -14,34 +14,23 @@ const ( // in imported with "." name, custom name or without any name. type Handler interface { // GetActualFuncName returns the name of the gomega function, e.g. `Expect` -<<<<<<< HEAD GetGomegaBasicInfo(*ast.CallExpr) (*GomegaBasicInfo, bool) // ReplaceFunction replaces the function with another one, for fix suggestions ReplaceFunction(*ast.CallExpr, *ast.Ident) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr -======= - GetActualFuncName(*ast.CallExpr) (string, bool) - // ReplaceFunction replaces the function with another one, for fix suggestions - ReplaceFunction(*ast.CallExpr, *ast.Ident) - - GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GetActualExprClone(origFunc, funcClone *ast.SelectorExpr) *ast.CallExpr GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast.CallExpr } -<<<<<<< HEAD type GomegaBasicInfo struct { MethodName string UseGomegaVar bool HasErrorMethod bool } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetGomegaHandler returns a gomegar handler according to the way gomega was imported in the specific file func GetGomegaHandler(file *ast.File, pass *analysis.Pass) Handler { for _, imp := range file.Imports { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go index 62e27e15b8..61c471f4c2 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go @@ -18,7 +18,6 @@ type nameHandler struct { pass *analysis.Pass } -<<<<<<< HEAD // GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info func (g nameHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { info := &GomegaBasicInfo{} @@ -52,30 +51,6 @@ func (g nameHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, b return nil, false } } -======= -// GetActualFuncName returns the name of the gomega function, e.g. `Expect` -func (g nameHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { - selector, ok := expr.Fun.(*ast.SelectorExpr) - if !ok { - return "", false - } - - switch x := selector.X.(type) { - case *ast.Ident: - if x.Name != g.name { - if !g.isGomegaVar(x) { - return "", false - } - } - - return selector.Sel.Name, true - - case *ast.CallExpr: - return g.GetActualFuncName(x) - } - - return "", false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ReplaceFunction replaces the function with another one, for fix suggestions @@ -87,11 +62,7 @@ func (g nameHandler) isGomegaVar(x ast.Expr) bool { return gomegainfo.IsGomegaVar(x, g.pass) } -<<<<<<< HEAD func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { -======= -func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) actualExpr, ok := assertionFunc.X.(*ast.CallExpr) if !ok { return nil @@ -109,14 +80,7 @@ func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExi return actualExpr } } else { -<<<<<<< HEAD return g.GetActualExpr(fun) -======= - if fun.Sel.Name == "Error" { - *errMethodExists = true - } - return g.GetActualExpr(fun, errMethodExists) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go index a7b5bb0003..307cd2d125 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go @@ -19,19 +19,11 @@ const valueInEventually = "use a function call in %[1]s. This actually checks no type AsyncFuncCallRule struct{} func (r AsyncFuncCallRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { -<<<<<<< HEAD if config.SuppressAsync || !gexp.IsAsync() { return false } if asyncArg := gexp.GetAsyncActualArg(); asyncArg != nil { -======= - if bool(config.SuppressAsync) || !gexp.IsAsync() { - return false - } - - if asyncArg := gexp.GetAsyncActualArg(); asyncRules != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return !asyncArg.IsValid() } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go index e754be8f0c..ca5c326195 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go @@ -20,11 +20,7 @@ const ( type AsyncTimeIntervalsRule struct{} func (r AsyncTimeIntervalsRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { -<<<<<<< HEAD return !config.SuppressAsync && config.ValidateAsyncIntervals && gexp.IsAsync() -======= - return !bool(config.SuppressAsync) && bool(config.ValidateAsyncIntervals) && gexp.IsAsync() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r AsyncTimeIntervalsRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go index 437b8a199d..4b6eafdda0 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go @@ -14,11 +14,7 @@ const compareDifferentTypes = "use %[1]s with different types: Comparing %[2]s w type EqualDifferentTypesRule struct{} func (r EqualDifferentTypesRule) isApplied(config types.Config) bool { -<<<<<<< HEAD return !config.SuppressTypeCompare -======= - return !bool(config.SuppressTypeCompare) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r EqualDifferentTypesRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go index 275b226910..f27dfb0d88 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go @@ -12,11 +12,7 @@ import ( type EqualNilRule struct{} func (r EqualNilRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { -<<<<<<< HEAD return !config.SuppressNil && -======= - return !bool(config.SuppressNil) && ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gexp.MatcherTypeIs(matcher.EqualValueMatcherType) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go index 25ab42f39a..159fb615a0 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go @@ -10,11 +10,7 @@ import ( type HaveLen0 struct{} func (r *HaveLen0) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { -<<<<<<< HEAD return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !config.AllowHaveLen0 -======= - return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !bool(config.AllowHaveLen0) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r *HaveLen0) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go index 0067c812ea..317e22ed3d 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go @@ -24,11 +24,7 @@ func (r HaveOccurredRule) Apply(gexp *expression.GomegaExpression, config types. return true } -<<<<<<< HEAD if config.ForceSucceedForFuncs && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { -======= - if bool(config.ForceSucceedForFuncs) && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gexp.ReverseAssertionFuncLogic() gexp.SetMatcherSucceed() reportBuilder.AddIssue(true, "prefer using the Succeed matcher for error function, instead of HaveOccurred") diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go index faa3ff2323..6677dce3bb 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go @@ -42,15 +42,9 @@ func (r NilCompareRule) isApplied(gexp *expression.GomegaExpression, config type return false, false } -<<<<<<< HEAD isErr := actl.IsError() && !config.SuppressErr if !isErr && config.SuppressNil { -======= - isErr := actl.IsError() && !bool(config.SuppressErr) - - if !isErr && bool(config.SuppressNil) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return isErr, false } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go index f18c39991b..45a8d948b4 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go @@ -28,11 +28,7 @@ func (r SucceedRule) Apply(gexp *expression.GomegaExpression, config types.Confi return true } -<<<<<<< HEAD if config.ForceSucceedForFuncs && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { -======= - if bool(config.ForceSucceedForFuncs) && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) gexp.ReverseAssertionFuncLogic() gexp.SetMatcherHaveOccurred() diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go deleted file mode 100644 index be510c4e95..0000000000 --- a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go +++ /dev/null @@ -1,32 +0,0 @@ -package types - -import ( - "errors" - "strings" -) - -// Boolean is a bool, implementing the flag.Value interface, to be used as a flag var. -type Boolean bool - -func (b *Boolean) Set(value string) error { - if b == nil { - return errors.New("trying to set nil parameter") - } - switch strings.ToLower(value) { - case "true": - *b = true - case "false": - *b = false - default: - return errors.New(value + " is not a Boolean value") - - } - return nil -} - -func (b Boolean) String() string { - if b { - return "true" - } - return "false" -} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go index 6c81f365b2..81a9ebe327 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go @@ -17,7 +17,6 @@ const ( ) type Config struct { -<<<<<<< HEAD SuppressLen bool SuppressNil bool SuppressErr bool @@ -34,24 +33,6 @@ type Config struct { func (s *Config) AllTrue() bool { return s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus -======= - SuppressLen Boolean - SuppressNil Boolean - SuppressErr Boolean - SuppressCompare Boolean - SuppressAsync Boolean - ForbidFocus Boolean - SuppressTypeCompare Boolean - AllowHaveLen0 Boolean - ForceExpectTo Boolean - ValidateAsyncIntervals Boolean - ForbidSpecPollution Boolean - ForceSucceedForFuncs Boolean -} - -func (s *Config) AllTrue() bool { - return bool(s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *Config) Clone() Config { diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 14a88daef8..1448439b7f 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,11 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { -<<<<<<< HEAD return FmtUnknown -======= - return fmtUnknown ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } const textType = "text/plain" @@ -57,7 +53,6 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { -<<<<<<< HEAD return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { @@ -73,23 +68,6 @@ func ResponseFormat(h http.Header) Format { } return FmtUnknown -======= - return fmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown - } - return fmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown - } - return fmtText - } - - return fmtUnknown ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index c0237d02da..d7f3d76f55 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,11 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: -<<<<<<< HEAD escapingScheme = Format("; escaping=" + escapeParam) -======= - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: // If the escaping parameter is unknown, ignore it. } @@ -81,7 +77,6 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": -<<<<<<< HEAD return FmtProtoDelim + escapingScheme case "text": return FmtProtoText + escapingScheme @@ -94,20 +89,6 @@ func Negotiate(h http.Header) Format { } } return FmtText + escapingScheme -======= - return fmtProtoDelim + escapingScheme - case "text": - return fmtProtoText + escapingScheme - case "compact-text": - return fmtProtoCompact + escapingScheme - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme - } - } - return fmtText + escapingScheme ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -120,11 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: -<<<<<<< HEAD escapingScheme = Format("; escaping=" + escapeParam) -======= - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: // If the escaping parameter is unknown, ignore it. } @@ -133,7 +110,6 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": -<<<<<<< HEAD return FmtProtoDelim + escapingScheme case "text": return FmtProtoText + escapingScheme @@ -143,22 +119,10 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { return FmtText + escapingScheme -======= - return fmtProtoDelim + escapingScheme - case "text": - return fmtProtoText + escapingScheme - case "compact-text": - return fmtProtoCompact + escapingScheme - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: -<<<<<<< HEAD return FmtOpenMetrics_1_0_0 + escapingScheme default: return FmtOpenMetrics_0_0_1 + escapingScheme @@ -166,15 +130,6 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { } } return FmtText + escapingScheme -======= - return fmtOpenMetrics_1_0_0 + escapingScheme - default: - return fmtOpenMetrics_0_0_1 + escapingScheme - } - } - } - return fmtText + escapingScheme ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index ff84a6ecab..b26886560d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,11 +15,7 @@ package expfmt import ( -<<<<<<< HEAD "errors" -======= - "fmt" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "github.com/prometheus/common/model" @@ -36,23 +32,15 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( -<<<<<<< HEAD TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" -======= - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" -<<<<<<< HEAD // The Content-Type values for the different wire protocols. Do not do direct // comparisons to these constants, instead use the comparison functions. // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. @@ -69,18 +57,6 @@ const ( FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` -======= - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -110,7 +86,6 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: -<<<<<<< HEAD return FmtProtoCompact case TypeProtoDelim: return FmtProtoDelim @@ -122,19 +97,6 @@ func NewFormat(t FormatType) Format { return FmtOpenMetrics_1_0_0 default: return FmtUnknown -======= - return fmtProtoCompact - case TypeProtoDelim: - return fmtProtoDelim - case TypeProtoText: - return fmtProtoText - case TypeTextPlain: - return fmtText - case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 - default: - return fmtUnknown ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -142,7 +104,6 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { -<<<<<<< HEAD return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { @@ -172,14 +133,6 @@ func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { } terms = append(terms, model.EscapingKey+"="+s.String()) return Format(strings.Join(terms, "; ")) -======= - return fmtOpenMetrics_0_0_1, nil - } - if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil - } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a29481261d..a21ed4ec1f 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,11 +38,7 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See -<<<<<<< HEAD // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). -======= -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -106,11 +102,7 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: -<<<<<<< HEAD // (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). -======= -// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name @@ -160,13 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } -<<<<<<< HEAD if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { compliantName = compliantName + "_" + *in.Unit -======= - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Comments, first HELP, then TYPE. @@ -490,11 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. -<<<<<<< HEAD if !model.IsValidLegacyMetricName(name) { -======= - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index a702476fb8..4b86434b33 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,11 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. -<<<<<<< HEAD if !model.IsValidLegacyMetricName(name) { -======= - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -502,11 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { -<<<<<<< HEAD if model.IsValidLegacyMetricName(name) { -======= - if model.IsValidLegacyMetricName(model.LabelValue(name)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 0aa066c462..b4607fe4d2 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,15 +22,9 @@ import ( "math" "strconv" "strings" -<<<<<<< HEAD "unicode/utf8" dto "github.com/prometheus/client_model/go" -======= - - dto "github.com/prometheus/client_model/go" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -66,10 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair -<<<<<<< HEAD currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -84,12 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool -<<<<<<< HEAD // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -153,21 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() -<<<<<<< HEAD p.currentMF = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ -<<<<<<< HEAD p.currentMetricIsInsideBraces = false p.currentMetricInsideBracesIsPresent = false -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -183,12 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. -<<<<<<< HEAD case '{': p.currentMetricIsInsideBraces = true return p.readingLabels -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return p.readingMetricName } @@ -306,11 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { -<<<<<<< HEAD p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) p.currentLabelPairs = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -323,7 +299,6 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } -<<<<<<< HEAD if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -363,8 +338,6 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPairs = nil return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -374,35 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { -<<<<<<< HEAD p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { -======= - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - // Check for duplicate label names. - labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) -<<<<<<< HEAD p.currentLabelPairs = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } @@ -435,10 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) -<<<<<<< HEAD p.currentLabelPairs = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } else { @@ -465,25 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': -<<<<<<< HEAD if p.currentMF == nil { p.parseError("invalid metric name") return nil } p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) p.currentLabelPairs = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) -<<<<<<< HEAD p.currentLabelPairs = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } } @@ -692,11 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') -<<<<<<< HEAD case '"': p.currentToken.WriteByte('"') -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -722,7 +665,6 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() -<<<<<<< HEAD // A UTF-8 metric name must be quoted and may have escaped characters. quoted := false escaped := false @@ -762,15 +704,6 @@ func (p *TextParser) readTokenAsMetricName() { } p.currentByte, p.err = p.buf.ReadByte() if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { -======= - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } @@ -782,7 +715,6 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() -<<<<<<< HEAD // A UTF-8 label name must be quoted and may have escaped characters. quoted := false escaped := false @@ -822,15 +754,6 @@ func (p *TextParser) readTokenAsLabelName() { } p.currentByte, p.err = p.buf.ReadByte() if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { -======= - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } @@ -856,10 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) -<<<<<<< HEAD p.currentLabelPairs = nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } escaped = false @@ -918,32 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { -<<<<<<< HEAD return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } func isValidLabelNameContinuation(b byte, quoted bool) bool { return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) -======= - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -<<<<<<< HEAD func isValidMetricNameContinuation(b byte, quoted bool) bool { return isValidLabelNameContinuation(b, quoted) || b == ':' -======= -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func isBlankOrTab(b byte) bool { @@ -988,11 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { -<<<<<<< HEAD return 0, errors.New("unsupported character in float") -======= - return 0, fmt.Errorf("unsupported character in float") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index cfe5680f8d..bd3a39e3e1 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,10 +14,7 @@ package model import ( -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "time" ) @@ -93,27 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { -<<<<<<< HEAD return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { return errors.New("start time must be before end time") -======= - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { -<<<<<<< HEAD return errors.New("at least one label pair required") -======= - return fmt.Errorf("at least one label pair required") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 8a85d43451..73b7aa3e60 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,37 +97,21 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -<<<<<<< HEAD // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. -======= -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: -<<<<<<< HEAD return ln.IsValidLegacy() -======= - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } -<<<<<<< HEAD } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for @@ -142,8 +126,6 @@ func (ln LabelName) IsValidLegacy() bool { return false } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 3cbeefec53..abb2c90018 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,11 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -<<<<<<< HEAD -======= -//go:build go1.21 - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e7..0000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eddea85d80..5766107cf9 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,17 +14,11 @@ package model import ( -<<<<<<< HEAD "errors" "fmt" "regexp" "sort" "strconv" -======= - "fmt" - "regexp" - "sort" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "unicode/utf8" @@ -34,7 +28,6 @@ import ( var ( // NameValidationScheme determines the method of name validation to be used by -<<<<<<< HEAD // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 // mode in isolation from other components that don't support UTF-8 may result // in bugs or other undefined behavior. This value can be set to @@ -50,20 +43,6 @@ var ( // is used in content negotiation to indicate that a system supports UTF-8 and // has that feature enabled. NameEscapingScheme = UnderscoreEscaping -======= - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode - // in isolation from other components that don't support UTF-8 may result in - // bugs or other undefined behavior. This value is intended to be set by - // UTF-8-aware binaries as part of their startup. To avoid need for locking, - // this value should be set once, ideally in an init(), before multiple - // goroutines are started. - NameValidationScheme = LegacyValidation - - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -187,11 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: -<<<<<<< HEAD return IsValidLegacyMetricName(string(n)) -======= - return IsValidLegacyMetricName(n) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case UTF8Validation: if len(n) == 0 { return false @@ -206,11 +181,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -<<<<<<< HEAD func IsValidLegacyMetricName(n string) bool { -======= -func IsValidLegacyMetricName(n LabelValue) bool { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(n) == 0 { return false } @@ -242,11 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. -<<<<<<< HEAD if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { -======= - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -268,11 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { -<<<<<<< HEAD if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { -======= - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) escaped.Label = append(escaped.Label, l) continue } @@ -282,11 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } -<<<<<<< HEAD if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { -======= - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) escaped.Label = append(escaped.Label, l) continue } @@ -302,30 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { -<<<<<<< HEAD if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } if !IsValidLegacyMetricName(l.GetName()) { -======= - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { - return true - } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return true } } return false } -<<<<<<< HEAD -======= -const ( - lowerhex = "0123456789abcdef" -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -339,11 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: -<<<<<<< HEAD if IsValidLegacyMetricName(name) { -======= - if IsValidLegacyMetricName(LabelValue(name)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return name } for i, b := range name { @@ -364,25 +305,16 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { -<<<<<<< HEAD escaped.WriteString("__") -======= - escaped.WriteRune('_') ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return escaped.String() case ValueEncodingEscaping: -<<<<<<< HEAD if IsValidLegacyMetricName(name) { -======= - if IsValidLegacyMetricName(LabelValue(name)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return name } escaped.WriteString("U__") for i, b := range name { -<<<<<<< HEAD if b == '_' { escaped.WriteString("__") } else if isValidLegacyRune(b, i) { @@ -392,23 +324,6 @@ func EscapeName(name string, scheme EscapingScheme) string { } else { escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) -======= - if isValidLegacyRune(b, i) { - escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { - escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { - escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) escaped.WriteRune('_') } } @@ -466,14 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { -<<<<<<< HEAD // This is too many characters for a utf8 value based on the MaxRune // value of '\U0010FFFF'. if j >= 6 { -======= - // This is too many characters for a utf8 value. - if j > 4 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -526,11 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { -<<<<<<< HEAD return NoEscaping, errors.New("got empty string instead of escaping scheme") -======= - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } switch s { case AllowUTF8: @@ -542,10 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: -<<<<<<< HEAD return NoEscaping, fmt.Errorf("unknown format scheme %s", s) -======= - return NoEscaping, fmt.Errorf("unknown format scheme " + s) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 32c7095723..8f91a9702e 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,10 +15,7 @@ package model import ( "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "regexp" "time" @@ -38,11 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { -<<<<<<< HEAD return errors.New("label name in matcher must not be empty") -======= - return fmt.Errorf("label name in matcher must not be empty") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -85,11 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { -<<<<<<< HEAD return errors.New("at least one matcher required") -======= - return fmt.Errorf("at least one matcher required") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -97,7 +86,6 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { -<<<<<<< HEAD return errors.New("start time missing") } if s.EndsAt.IsZero() { @@ -114,24 +102,6 @@ func (s *Silence) Validate() error { } if s.CreatedAt.IsZero() { return errors.New("creation timestamp missing") -======= - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 40b5822730..6bfc757d18 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,10 +15,7 @@ package model import ( "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "math" "strconv" @@ -43,11 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { -<<<<<<< HEAD return errors.New("sample value must be a quoted string") -======= - return fmt.Errorf("sample value must be a quoted string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 45ac9f202a..895e6a3e83 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,10 +15,7 @@ package model import ( "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "strconv" "strings" @@ -36,11 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { -<<<<<<< HEAD return errors.New("float value must be a quoted string") -======= - return fmt.Errorf("float value must be a quoted string") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -149,11 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { -<<<<<<< HEAD return nil, errors.New("histogram is nil") -======= - return nil, fmt.Errorf("histogram is nil") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -176,11 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { -<<<<<<< HEAD return errors.New("histogram is null") -======= - return fmt.Errorf("histogram is null") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } diff --git a/vendor/github.com/raeperd/recvcheck/.gitignore b/vendor/github.com/raeperd/recvcheck/.gitignore index fbde17613f..4212673324 100644 --- a/vendor/github.com/raeperd/recvcheck/.gitignore +++ b/vendor/github.com/raeperd/recvcheck/.gitignore @@ -1,8 +1,3 @@ -<<<<<<< HEAD .idea/ coverage.txt /recvcheck -======= -coverage.txt -cmd/recvcheck/recvcheck ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/raeperd/recvcheck/Makefile b/vendor/github.com/raeperd/recvcheck/Makefile index 54fb68be88..d78605a3bd 100644 --- a/vendor/github.com/raeperd/recvcheck/Makefile +++ b/vendor/github.com/raeperd/recvcheck/Makefile @@ -1,4 +1,3 @@ -<<<<<<< HEAD .PHONY: clean lint test build default: clean lint test build @@ -10,17 +9,6 @@ build: go build -ldflags "-s -w" -trimpath ./cmd/recvcheck/ test: clean -======= -all: build test lint - -download: - go mod download - -build: download - go build -C cmd/recvcheck - -test: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go test -race -coverprofile=coverage.txt . lint: diff --git a/vendor/github.com/raeperd/recvcheck/README.md b/vendor/github.com/raeperd/recvcheck/README.md index 90e342c926..067aa3c580 100644 --- a/vendor/github.com/raeperd/recvcheck/README.md +++ b/vendor/github.com/raeperd/recvcheck/README.md @@ -1,5 +1,4 @@ # recvcheck -<<<<<<< HEAD [![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) Golang linter for check receiver type in method @@ -8,16 +7,6 @@ From [Go Wiki: Go Code Review Comments - The Go Programming Language](https://go > Don’t mix receiver types. Choose either pointers or struct types for all available method Following code from [Dave Cheney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? -======= -[![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) [![codecov](https://codecov.io/gh/raeperd/recvcheck/graph/badge.svg?token=fPYgEHlq1e)](https://codecov.io/gh/raeperd/recvcheck) -Golang linter for check receiver type in method - -## Motivtation -From [Go Wiki: Go Code Review Comments - The Go Programming Language](https://go.dev/wiki/CodeReviewComments#receiver-type) -> Don’t mix receiver types. Choose either pointers or struct types for all available method - -Following code from [Dave Chenney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) This linter does it for you. ```go diff --git a/vendor/github.com/raeperd/recvcheck/analyzer.go b/vendor/github.com/raeperd/recvcheck/analyzer.go index 1082fbbb41..11fb38e72e 100644 --- a/vendor/github.com/raeperd/recvcheck/analyzer.go +++ b/vendor/github.com/raeperd/recvcheck/analyzer.go @@ -8,7 +8,6 @@ import ( "golang.org/x/tools/go/ast/inspector" ) -<<<<<<< HEAD // NewAnalyzer returns a new analyzer to check for receiver type consistency. func NewAnalyzer(s Settings) *analysis.Analyzer { a := &analyzer{ @@ -61,16 +60,6 @@ type analyzer struct { } func (r *analyzer) run(pass *analysis.Pass) (any, error) { -======= -var Analyzer = &analysis.Analyzer{ - Name: "recvcheck", - Doc: "checks for receiver type consistency", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, -} - -func run(pass *analysis.Pass) (any, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) structs := map[string]*structType{} @@ -80,7 +69,6 @@ func run(pass *analysis.Pass) (any, error) { return } -<<<<<<< HEAD recv, isStar := recvTypeIdent(funcDecl.Recv.List[0].Type) if recv == nil { return @@ -93,31 +81,10 @@ func run(pass *analysis.Pass) (any, error) { st, ok := structs[recv.Name] if !ok { structs[recv.Name] = &structType{} -======= - var recv *ast.Ident - var isStar bool - switch recvType := funcDecl.Recv.List[0].Type.(type) { - case *ast.StarExpr: - isStar = true - if recv, ok = recvType.X.(*ast.Ident); !ok { - return - } - case *ast.Ident: - recv = recvType - default: - return - } - - var st *structType - st, ok = structs[recv.Name] - if !ok { - structs[recv.Name] = &structType{recv: recv.Name} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) st = structs[recv.Name] } if isStar { -<<<<<<< HEAD st.starUsed = true } else { st.typeUsed = true @@ -127,24 +94,12 @@ func run(pass *analysis.Pass) (any, error) { for recv, st := range structs { if st.starUsed && st.typeUsed { pass.Reportf(pass.Pkg.Scope().Lookup(recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", recv) -======= - st.numStarMethod++ - } else { - st.numTypeMethod++ - } - }) - - for _, st := range structs { - if st.numStarMethod > 0 && st.numTypeMethod > 0 { - pass.Reportf(pass.Pkg.Scope().Lookup(st.recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", st.recv) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil, nil } -<<<<<<< HEAD func (r *analyzer) isExcluded(recv *ast.Ident, f *ast.FuncDecl) bool { if f.Name == nil || f.Name.Name == "" { return true @@ -177,10 +132,4 @@ func recvTypeIdent(r ast.Expr) (*ast.Ident, bool) { } return nil, false -======= -type structType struct { - recv string - numStarMethod int - numTypeMethod int ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore deleted file mode 100644 index 3c0af38259..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.vscode -.idea -*.swp -cmd/jv/jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules deleted file mode 100644 index 314da31c5e..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "testdata/JSON-Schema-Test-Suite"] - path = testdata/JSON-Schema-Test-Suite - url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE deleted file mode 100644 index 19dc35b243..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md deleted file mode 100644 index b0d05054ca..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# jsonschema v5.3.1 - -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) -[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v5)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v5) -[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) -[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/master/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema) - -Package jsonschema provides json-schema compilation and validation. - -[Benchmarks](https://dev.to/vearutop/benchmarking-correctness-and-performance-of-go-json-schema-validators-3247) - -### Features: - - implements - [draft 2020-12](https://json-schema.org/specification-links.html#2020-12), - [draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8), - [draft-7](https://json-schema.org/specification-links.html#draft-7), - [draft-6](https://json-schema.org/specification-links.html#draft-6), - [draft-4](https://json-schema.org/specification-links.html#draft-4) - - fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional) - - list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L24)) - - validates schemas against meta-schema - - full support of remote references - - support of recursive references between schemas - - detects infinite loop in schemas - - thread safe validation - - rich, intuitive hierarchial error messages with json-pointers to exact location - - supports output formats flag, basic and detailed - - supports enabling format and content Assertions in draft2019-09 or above - - change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true` - - compiled schema can be introspected. easier to develop tools like generating go structs given schema - - supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension) - - implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat)) - - date-time, date, time, duration, period (supports leap-second) - - uuid, hostname, email - - ip-address, ipv4, ipv6 - - uri, uriref, uri-template(limited validation) - - json-pointer, relative-json-pointer - - regex, format - - implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) - - base64 - - implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) - - application/json - - can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader)) - - -see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) - -The schema is compiled against the version specified in `$schema` property. -If "$schema" property is missing, it uses latest draft which currently implemented -by this library. - -You can force to use specific version, when `$schema` is missing, as follows: - -```go -compiler := jsonschema.NewCompiler() -compiler.Draft = jsonschema.Draft4 -``` - -This package supports loading json-schema from filePath and fileURL. - -To load json-schema from HTTPURL, add following import: - -```go -import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" -``` - -## Rich Errors - -The ValidationError returned by Validate method contains detailed context to understand why and where the error is. - -schema.json: -```json -{ - "$ref": "t.json#/definitions/employee" -} -``` - -t.json: -```json -{ - "definitions": { - "employee": { - "type": "string" - } - } -} -``` - -doc.json: -```json -1 -``` - -assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`, -```go -fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy -``` -Prints: -``` -[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json# - [I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee' - [I#] [S#/definitions/employee/type] expected string, but got number -``` - -Here `I` stands for instance document and `S` stands for schema document. -The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. -Nested causes are printed with indent. - -To output `err` in `flag` output format: -```go -b, _ := json.MarshalIndent(err.FlagOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false -} -``` -To output `err` in `basic` output format: -```go -b, _ := json.MarshalIndent(err.BasicOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false, - "errors": [ - { - "keywordLocation": "", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", - "instanceLocation": "", - "error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#" - }, - { - "keywordLocation": "/$ref", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", - "instanceLocation": "", - "error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'" - }, - { - "keywordLocation": "/$ref/type", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", - "instanceLocation": "", - "error": "expected string, but got number" - } - ] -} -``` -To output `err` in `detailed` output format: -```go -b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false, - "keywordLocation": "", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", - "instanceLocation": "", - "errors": [ - { - "valid": false, - "keywordLocation": "/$ref", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", - "instanceLocation": "", - "errors": [ - { - "valid": false, - "keywordLocation": "/$ref/type", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", - "instanceLocation": "", - "error": "expected string, but got number" - } - ] - } - ] -} -``` - -## CLI - -to install `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` - -```bash -jv [-draft INT] [-output FORMAT] [-assertformat] [-assertcontent] []... - -assertcontent - enable content assertions with draft >= 2019 - -assertformat - enable format assertions with draft >= 2019 - -draft int - draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020) - -output string - output format. valid values flag, basic, detailed -``` - -if no `` arguments are passed, it simply validates the ``. -if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag - -exit-code is 1, if there are any validation errors - -`jv` can also validate yaml files. It also accepts schema from yaml files. - -## Validating YAML Documents - -since yaml supports non-string keys, such yaml documents are rendered as invalid json documents. - -most yaml parser use `map[interface{}]interface{}` for object, -whereas json parser uses `map[string]interface{}`. - -so we need to manually convert them to `map[string]interface{}`. -below code shows such conversion by `toStringKeys` function. - -https://play.golang.org/p/Hhax3MrtD8r - -NOTE: if you are using `gopkg.in/yaml.v3`, then you do not need such conversion. since this library -returns `map[string]interface{}` if all keys are strings. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go deleted file mode 100644 index fdb68e6480..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go +++ /dev/null @@ -1,812 +0,0 @@ -package jsonschema - -import ( - "encoding/json" - "fmt" - "io" - "math/big" - "regexp" - "strconv" - "strings" -) - -// A Compiler represents a json-schema compiler. -type Compiler struct { - // Draft represents the draft used when '$schema' attribute is missing. - // - // This defaults to latest supported draft (currently 2020-12). - Draft *Draft - resources map[string]*resource - - // Extensions is used to register extensions. - extensions map[string]extension - - // ExtractAnnotations tells whether schema annotations has to be extracted - // in compiled Schema or not. - ExtractAnnotations bool - - // LoadURL loads the document at given absolute URL. - // - // If nil, package global LoadURL is used. - LoadURL func(s string) (io.ReadCloser, error) - - // Formats can be registered by adding to this map. Key is format name, - // value is function that knows how to validate that format. - Formats map[string]func(interface{}) bool - - // AssertFormat for specifications >= draft2019-09. - AssertFormat bool - - // Decoders can be registered by adding to this map. Key is encoding name, - // value is function that knows how to decode string in that format. - Decoders map[string]func(string) ([]byte, error) - - // MediaTypes can be registered by adding to this map. Key is mediaType name, - // value is function that knows how to validate that mediaType. - MediaTypes map[string]func([]byte) error - - // AssertContent for specifications >= draft2019-09. - AssertContent bool -} - -// Compile parses json-schema at given url returns, if successful, -// a Schema object that can be used to match against json. -// -// Returned error can be *SchemaError -func Compile(url string) (*Schema, error) { - return NewCompiler().Compile(url) -} - -// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. -// It simplifies safe initialization of global variables holding compiled Schemas. -func MustCompile(url string) *Schema { - return NewCompiler().MustCompile(url) -} - -// CompileString parses and compiles the given schema with given base url. -func CompileString(url, schema string) (*Schema, error) { - c := NewCompiler() - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - return nil, err - } - return c.Compile(url) -} - -// MustCompileString is like CompileString but panics on error. -// It simplified safe initialization of global variables holding compiled Schema. -func MustCompileString(url, schema string) *Schema { - c := NewCompiler() - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - panic(err) - } - return c.MustCompile(url) -} - -// NewCompiler returns a json-schema Compiler object. -// if '$schema' attribute is missing, it is treated as draft7. to change this -// behavior change Compiler.Draft value -func NewCompiler() *Compiler { - return &Compiler{ - Draft: latest, - resources: make(map[string]*resource), - Formats: make(map[string]func(interface{}) bool), - Decoders: make(map[string]func(string) ([]byte, error)), - MediaTypes: make(map[string]func([]byte) error), - extensions: make(map[string]extension), - } -} - -// AddResource adds in-memory resource to the compiler. -// -// Note that url must not have fragment -func (c *Compiler) AddResource(url string, r io.Reader) error { - res, err := newResource(url, r) - if err != nil { - return err - } - c.resources[res.url] = res - return nil -} - -// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. -// It simplifies safe initialization of global variables holding compiled Schemas. -func (c *Compiler) MustCompile(url string) *Schema { - s, err := c.Compile(url) - if err != nil { - panic(fmt.Sprintf("jsonschema: %#v", err)) - } - return s -} - -// Compile parses json-schema at given url returns, if successful, -// a Schema object that can be used to match against json. -// -// error returned will be of type *SchemaError -func (c *Compiler) Compile(url string) (*Schema, error) { - // make url absolute - u, err := toAbs(url) - if err != nil { - return nil, &SchemaError{url, err} - } - url = u - - sch, err := c.compileURL(url, nil, "#") - if err != nil { - err = &SchemaError{url, err} - } - return sch, err -} - -func (c *Compiler) findResource(url string) (*resource, error) { - if _, ok := c.resources[url]; !ok { - // load resource - var rdr io.Reader - if sch, ok := vocabSchemas[url]; ok { - rdr = strings.NewReader(sch) - } else { - loadURL := LoadURL - if c.LoadURL != nil { - loadURL = c.LoadURL - } - r, err := loadURL(url) - if err != nil { - return nil, err - } - defer r.Close() - rdr = r - } - if err := c.AddResource(url, rdr); err != nil { - return nil, err - } - } - - r := c.resources[url] - if r.draft != nil { - return r, nil - } - - // set draft - r.draft = c.Draft - if m, ok := r.doc.(map[string]interface{}); ok { - if sch, ok := m["$schema"]; ok { - sch, ok := sch.(string) - if !ok { - return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) - } - if !isURI(sch) { - return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url) - } - r.draft = findDraft(sch) - if r.draft == nil { - sch, _ := split(sch) - if sch == url { - return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url) - } - mr, err := c.findResource(sch) - if err != nil { - return nil, err - } - r.draft = mr.draft - } - } - } - - id, err := r.draft.resolveID(r.url, r.doc) - if err != nil { - return nil, err - } - if id != "" { - r.url = id - } - - if err := r.fillSubschemas(c, r); err != nil { - return nil, err - } - - return r, nil -} - -func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) { - // if url points to a draft, return Draft.meta - if d := findDraft(url); d != nil && d.meta != nil { - return d.meta, nil - } - - b, f := split(url) - r, err := c.findResource(b) - if err != nil { - return nil, err - } - return c.compileRef(r, stack, ptr, r, f) -} - -func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) { - base := r.baseURL(res.floc) - ref, err := resolveURL(base, ref) - if err != nil { - return nil, err - } - - u, f := split(ref) - sr := r.findResource(u) - if sr == nil { - // external resource - return c.compileURL(ref, stack, refPtr) - } - - // ensure root resource is always compiled first. - // this is required to get schema.meta from root resource - if r.schema == nil { - r.schema = newSchema(r.url, r.floc, r.draft, r.doc) - if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil { - return nil, err - } - } - - sr, err = r.resolveFragment(c, sr, f) - if err != nil { - return nil, err - } - if sr == nil { - return nil, fmt.Errorf("jsonschema: %s not found", ref) - } - - if sr.schema != nil { - if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil { - return nil, err - } - return sr.schema, nil - } - - sr.schema = newSchema(r.url, sr.floc, r.draft, sr.doc) - return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr) -} - -func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error { - if r.draft.version < 2020 { - return nil - } - - rr := r.listResources(res) - rr = append(rr, res) - for _, sr := range rr { - if m, ok := sr.doc.(map[string]interface{}); ok { - if _, ok := m["$dynamicAnchor"]; ok { - sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc) - if err != nil { - return err - } - res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch) - } - } - } - return nil -} - -func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) { - if err := c.compileDynamicAnchors(r, res); err != nil { - return nil, err - } - - switch v := res.doc.(type) { - case bool: - res.schema.Always = &v - return res.schema, nil - default: - return res.schema, c.compileMap(r, stack, sref, res) - } -} - -func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error { - m := res.doc.(map[string]interface{}) - - if err := checkLoop(stack, sref); err != nil { - return err - } - stack = append(stack, sref) - - var s = res.schema - var err error - - if r == res { // root schema - if sch, ok := m["$schema"]; ok { - sch := sch.(string) - if d := findDraft(sch); d != nil { - s.meta = d.meta - } else { - if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil { - return err - } - } - } - } - - if ref, ok := m["$ref"]; ok { - s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string)) - if err != nil { - return err - } - if r.draft.version < 2019 { - // All other properties in a "$ref" object MUST be ignored - return nil - } - } - - if r.draft.version >= 2019 { - if r == res { // root schema - if vocab, ok := m["$vocabulary"]; ok { - for url, reqd := range vocab.(map[string]interface{}) { - if reqd, ok := reqd.(bool); ok && !reqd { - continue - } - if !r.draft.isVocab(url) { - return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res) - } - s.vocab = append(s.vocab, url) - } - } else { - s.vocab = r.draft.defaultVocab - } - } - - if ref, ok := m["$recursiveRef"]; ok { - s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string)) - if err != nil { - return err - } - } - } - if r.draft.version >= 2020 { - if dref, ok := m["$dynamicRef"]; ok { - s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string)) - if err != nil { - return err - } - if dref, ok := dref.(string); ok { - _, frag := split(dref) - if frag != "#" && !strings.HasPrefix(frag, "#/") { - // frag is anchor - s.dynamicRefAnchor = frag[1:] - } - } - } - } - - loadInt := func(pname string) int { - if num, ok := m[pname]; ok { - i, _ := num.(json.Number).Float64() - return int(i) - } - return -1 - } - - loadRat := func(pname string) *big.Rat { - if num, ok := m[pname]; ok { - r, _ := new(big.Rat).SetString(string(num.(json.Number))) - return r - } - return nil - } - - if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") { - if t, ok := m["type"]; ok { - switch t := t.(type) { - case string: - s.Types = []string{t} - case []interface{}: - s.Types = toStrings(t) - } - } - - if e, ok := m["enum"]; ok { - s.Enum = e.([]interface{}) - allPrimitives := true - for _, item := range s.Enum { - switch jsonType(item) { - case "object", "array": - allPrimitives = false - break - } - } - s.enumError = "enum failed" - if allPrimitives { - if len(s.Enum) == 1 { - s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) - } else { - strEnum := make([]string, len(s.Enum)) - for i, item := range s.Enum { - strEnum[i] = fmt.Sprintf("%#v", item) - } - s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) - } - } - } - - s.Minimum = loadRat("minimum") - if exclusive, ok := m["exclusiveMinimum"]; ok { - if exclusive, ok := exclusive.(bool); ok { - if exclusive { - s.Minimum, s.ExclusiveMinimum = nil, s.Minimum - } - } else { - s.ExclusiveMinimum = loadRat("exclusiveMinimum") - } - } - - s.Maximum = loadRat("maximum") - if exclusive, ok := m["exclusiveMaximum"]; ok { - if exclusive, ok := exclusive.(bool); ok { - if exclusive { - s.Maximum, s.ExclusiveMaximum = nil, s.Maximum - } - } else { - s.ExclusiveMaximum = loadRat("exclusiveMaximum") - } - } - - s.MultipleOf = loadRat("multipleOf") - - s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") - - if req, ok := m["required"]; ok { - s.Required = toStrings(req.([]interface{})) - } - - s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") - - if unique, ok := m["uniqueItems"]; ok { - s.UniqueItems = unique.(bool) - } - - s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") - - if pattern, ok := m["pattern"]; ok { - s.Pattern = regexp.MustCompile(pattern.(string)) - } - - if r.draft.version >= 2019 { - s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains") - if s.MinContains == -1 { - s.MinContains = 1 - } - - if deps, ok := m["dependentRequired"]; ok { - deps := deps.(map[string]interface{}) - s.DependentRequired = make(map[string][]string, len(deps)) - for pname, pvalue := range deps { - s.DependentRequired[pname] = toStrings(pvalue.([]interface{})) - } - } - } - } - - compile := func(stack []schemaRef, ptr string) (*Schema, error) { - return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr) - } - - loadSchema := func(pname string, stack []schemaRef) (*Schema, error) { - if _, ok := m[pname]; ok { - return compile(stack, escape(pname)) - } - return nil, nil - } - - loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) { - if pvalue, ok := m[pname]; ok { - pvalue := pvalue.([]interface{}) - schemas := make([]*Schema, len(pvalue)) - for i := range pvalue { - sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i)) - if err != nil { - return nil, err - } - schemas[i] = sch - } - return schemas, nil - } - return nil, nil - } - - if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") { - if s.Not, err = loadSchema("not", stack); err != nil { - return err - } - if s.AllOf, err = loadSchemas("allOf", stack); err != nil { - return err - } - if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil { - return err - } - if s.OneOf, err = loadSchemas("oneOf", stack); err != nil { - return err - } - - if props, ok := m["properties"]; ok { - props := props.(map[string]interface{}) - s.Properties = make(map[string]*Schema, len(props)) - for pname := range props { - s.Properties[pname], err = compile(nil, "properties/"+escape(pname)) - if err != nil { - return err - } - } - } - - if regexProps, ok := m["regexProperties"]; ok { - s.RegexProperties = regexProps.(bool) - } - - if patternProps, ok := m["patternProperties"]; ok { - patternProps := patternProps.(map[string]interface{}) - s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) - for pattern := range patternProps { - s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern)) - if err != nil { - return err - } - } - } - - if additionalProps, ok := m["additionalProperties"]; ok { - switch additionalProps := additionalProps.(type) { - case bool: - s.AdditionalProperties = additionalProps - case map[string]interface{}: - s.AdditionalProperties, err = compile(nil, "additionalProperties") - if err != nil { - return err - } - } - } - - if deps, ok := m["dependencies"]; ok { - deps := deps.(map[string]interface{}) - s.Dependencies = make(map[string]interface{}, len(deps)) - for pname, pvalue := range deps { - switch pvalue := pvalue.(type) { - case []interface{}: - s.Dependencies[pname] = toStrings(pvalue) - default: - s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname)) - if err != nil { - return err - } - } - } - } - - if r.draft.version >= 6 { - if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil { - return err - } - if s.Contains, err = loadSchema("contains", nil); err != nil { - return err - } - } - - if r.draft.version >= 7 { - if m["if"] != nil { - if s.If, err = loadSchema("if", stack); err != nil { - return err - } - if s.Then, err = loadSchema("then", stack); err != nil { - return err - } - if s.Else, err = loadSchema("else", stack); err != nil { - return err - } - } - } - if r.draft.version >= 2019 { - if deps, ok := m["dependentSchemas"]; ok { - deps := deps.(map[string]interface{}) - s.DependentSchemas = make(map[string]*Schema, len(deps)) - for pname := range deps { - s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname)) - if err != nil { - return err - } - } - } - } - - if r.draft.version >= 2020 { - if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil { - return err - } - if s.Items2020, err = loadSchema("items", nil); err != nil { - return err - } - } else { - if items, ok := m["items"]; ok { - switch items.(type) { - case []interface{}: - s.Items, err = loadSchemas("items", nil) - if err != nil { - return err - } - if additionalItems, ok := m["additionalItems"]; ok { - switch additionalItems := additionalItems.(type) { - case bool: - s.AdditionalItems = additionalItems - case map[string]interface{}: - s.AdditionalItems, err = compile(nil, "additionalItems") - if err != nil { - return err - } - } - } - default: - s.Items, err = compile(nil, "items") - if err != nil { - return err - } - } - } - } - - } - - // unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020 - if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) { - if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil { - return err - } - if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil { - return err - } - if r.draft.version >= 2020 { - // any item in an array that passes validation of the contains schema is considered "evaluated" - s.ContainsEval = true - } - } - - if format, ok := m["format"]; ok { - s.Format = format.(string) - if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") { - if format, ok := c.Formats[s.Format]; ok { - s.format = format - } else { - s.format, _ = Formats[s.Format] - } - } - } - - if c.ExtractAnnotations { - if title, ok := m["title"]; ok { - s.Title = title.(string) - } - if description, ok := m["description"]; ok { - s.Description = description.(string) - } - s.Default = m["default"] - } - - if r.draft.version >= 6 { - if c, ok := m["const"]; ok { - s.Constant = []interface{}{c} - } - } - - if r.draft.version >= 7 { - if encoding, ok := m["contentEncoding"]; ok { - s.ContentEncoding = encoding.(string) - if decoder, ok := c.Decoders[s.ContentEncoding]; ok { - s.decoder = decoder - } else { - s.decoder, _ = Decoders[s.ContentEncoding] - } - } - if mediaType, ok := m["contentMediaType"]; ok { - s.ContentMediaType = mediaType.(string) - if mediaType, ok := c.MediaTypes[s.ContentMediaType]; ok { - s.mediaType = mediaType - } else { - s.mediaType, _ = MediaTypes[s.ContentMediaType] - } - if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil { - return err - } - } - if c.ExtractAnnotations { - if comment, ok := m["$comment"]; ok { - s.Comment = comment.(string) - } - if readOnly, ok := m["readOnly"]; ok { - s.ReadOnly = readOnly.(bool) - } - if writeOnly, ok := m["writeOnly"]; ok { - s.WriteOnly = writeOnly.(bool) - } - if examples, ok := m["examples"]; ok { - s.Examples = examples.([]interface{}) - } - } - } - - if r.draft.version >= 2019 { - if !c.AssertContent { - s.decoder = nil - s.mediaType = nil - s.ContentSchema = nil - } - if c.ExtractAnnotations { - if deprecated, ok := m["deprecated"]; ok { - s.Deprecated = deprecated.(bool) - } - } - } - - for name, ext := range c.extensions { - es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m) - if err != nil { - return err - } - if es != nil { - if s.Extensions == nil { - s.Extensions = make(map[string]ExtSchema) - } - s.Extensions[name] = es - } - } - - return nil -} - -func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error { - validate := func(meta *Schema) error { - if meta == nil { - return nil - } - return meta.validateValue(v, vloc) - } - - if err := validate(r.draft.meta); err != nil { - return err - } - for _, ext := range c.extensions { - if err := validate(ext.meta); err != nil { - return err - } - } - return nil -} - -func toStrings(arr []interface{}) []string { - s := make([]string, len(arr)) - for i, v := range arr { - s[i] = v.(string) - } - return s -} - -// SchemaRef captures schema and the path referring to it. -type schemaRef struct { - path string // relative-json-pointer to schema - schema *Schema // target schema - discard bool // true when scope left -} - -func (sr schemaRef) String() string { - return fmt.Sprintf("(%s)%v", sr.path, sr.schema) -} - -func checkLoop(stack []schemaRef, sref schemaRef) error { - for _, ref := range stack { - if ref.schema == sref.schema { - return infiniteLoopError(stack, sref) - } - } - return nil -} - -func keywordLocation(stack []schemaRef, path string) string { - var loc string - for _, ref := range stack[1:] { - loc += "/" + ref.path - } - if path != "" { - loc = loc + "/" + path - } - return loc -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go deleted file mode 100644 index 7570b8b5a9..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go +++ /dev/null @@ -1,29 +0,0 @@ -package jsonschema - -import ( - "encoding/base64" - "encoding/json" -) - -// Decoders is a registry of functions, which know how to decode -// string encoded in specific format. -// -// New Decoders can be registered by adding to this map. Key is encoding name, -// value is function that knows how to decode string in that format. -var Decoders = map[string]func(string) ([]byte, error){ - "base64": base64.StdEncoding.DecodeString, -} - -// MediaTypes is a registry of functions, which know how to validate -// whether the bytes represent data of that mediaType. -// -// New mediaTypes can be registered by adding to this map. Key is mediaType name, -// value is function that knows how to validate that mediaType. -var MediaTypes = map[string]func([]byte) error{ - "application/json": validateJSON, -} - -func validateJSON(b []byte) error { - var v interface{} - return json.Unmarshal(b, &v) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go deleted file mode 100644 index a124262a51..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Package jsonschema provides json-schema compilation and validation. - -Features: - - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4 - - fully compliant with JSON-Schema-Test-Suite, (excluding some optional) - - list of optional tests that are excluded can be found in schema_test.go(variable skipTests) - - validates schemas against meta-schema - - full support of remote references - - support of recursive references between schemas - - detects infinite loop in schemas - - thread safe validation - - rich, intuitive hierarchial error messages with json-pointers to exact location - - supports output formats flag, basic and detailed - - supports enabling format and content Assertions in draft2019-09 or above - - change Compiler.AssertFormat, Compiler.AssertContent to true - - compiled schema can be introspected. easier to develop tools like generating go structs given schema - - supports user-defined keywords via extensions - - implements following formats (supports user-defined) - - date-time, date, time, duration (supports leap-second) - - uuid, hostname, email - - ip-address, ipv4, ipv6 - - uri, uriref, uri-template(limited validation) - - json-pointer, relative-json-pointer - - regex, format - - implements following contentEncoding (supports user-defined) - - base64 - - implements following contentMediaType (supports user-defined) - - application/json - - can load from files/http/https/string/[]byte/io.Reader (supports user-defined) - -The schema is compiled against the version specified in "$schema" property. -If "$schema" property is missing, it uses latest draft which currently implemented -by this library. - -You can force to use specific draft, when "$schema" is missing, as follows: - - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft4 - -This package supports loading json-schema from filePath and fileURL. - -To load json-schema from HTTPURL, add following import: - - import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" - -you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA -*/ -package jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go deleted file mode 100644 index 154fa5837d..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go +++ /dev/null @@ -1,1454 +0,0 @@ -package jsonschema - -import ( - "fmt" - "strconv" - "strings" -) - -// A Draft represents json-schema draft -type Draft struct { - version int - meta *Schema - id string // property name used to represent schema id. - boolSchema bool // is boolean valid schema - vocab []string // built-in vocab - defaultVocab []string // vocabs when $vocabulary is not used - subschemas map[string]position -} - -func (d *Draft) URL() string { - switch d.version { - case 2020: - return "https://json-schema.org/draft/2020-12/schema" - case 2019: - return "https://json-schema.org/draft/2019-09/schema" - case 7: - return "https://json-schema.org/draft-07/schema" - case 6: - return "https://json-schema.org/draft-06/schema" - case 4: - return "https://json-schema.org/draft-04/schema" - } - return "" -} - -func (d *Draft) String() string { - return fmt.Sprintf("Draft%d", d.version) -} - -func (d *Draft) loadMeta(url, schema string) { - c := NewCompiler() - c.AssertFormat = true - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - panic(err) - } - d.meta = c.MustCompile(url) - d.meta.meta = d.meta -} - -func (d *Draft) getID(sch interface{}) string { - m, ok := sch.(map[string]interface{}) - if !ok { - return "" - } - if _, ok := m["$ref"]; ok && d.version <= 7 { - // $ref prevents a sibling id from changing the base uri - return "" - } - v, ok := m[d.id] - if !ok { - return "" - } - id, ok := v.(string) - if !ok { - return "" - } - return id -} - -func (d *Draft) resolveID(base string, sch interface{}) (string, error) { - id, _ := split(d.getID(sch)) // strip fragment - if id == "" { - return "", nil - } - url, err := resolveURL(base, id) - url, _ = split(url) // strip fragment - return url, err -} - -func (d *Draft) anchors(sch interface{}) []string { - m, ok := sch.(map[string]interface{}) - if !ok { - return nil - } - - var anchors []string - - // before draft2019, anchor is specified in id - _, f := split(d.getID(m)) - if f != "#" { - anchors = append(anchors, f[1:]) - } - - if v, ok := m["$anchor"]; ok && d.version >= 2019 { - anchors = append(anchors, v.(string)) - } - if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 { - anchors = append(anchors, v.(string)) - } - return anchors -} - -// listSubschemas collects subschemas in r into rr. -func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error { - add := func(loc string, sch interface{}) error { - url, err := d.resolveID(base, sch) - if err != nil { - return err - } - floc := r.floc + "/" + loc - sr := &resource{url: url, floc: floc, doc: sch} - rr[floc] = sr - - base := base - if url != "" { - base = url - } - return d.listSubschemas(sr, base, rr) - } - - sch, ok := r.doc.(map[string]interface{}) - if !ok { - return nil - } - for kw, pos := range d.subschemas { - v, ok := sch[kw] - if !ok { - continue - } - if pos&self != 0 { - switch v := v.(type) { - case map[string]interface{}: - if err := add(kw, v); err != nil { - return err - } - case bool: - if d.boolSchema { - if err := add(kw, v); err != nil { - return err - } - } - } - } - if pos&item != 0 { - if v, ok := v.([]interface{}); ok { - for i, item := range v { - if err := add(kw+"/"+strconv.Itoa(i), item); err != nil { - return err - } - } - } - } - if pos&prop != 0 { - if v, ok := v.(map[string]interface{}); ok { - for pname, pval := range v { - if err := add(kw+"/"+escape(pname), pval); err != nil { - return err - } - } - } - } - } - return nil -} - -// isVocab tells whether url is built-in vocab. -func (d *Draft) isVocab(url string) bool { - for _, v := range d.vocab { - if url == v { - return true - } - } - return false -} - -type position uint - -const ( - self position = 1 << iota - prop - item -) - -// supported drafts -var ( - Draft4 = &Draft{version: 4, id: "id", boolSchema: false} - Draft6 = &Draft{version: 6, id: "$id", boolSchema: true} - Draft7 = &Draft{version: 7, id: "$id", boolSchema: true} - Draft2019 = &Draft{ - version: 2019, - id: "$id", - boolSchema: true, - vocab: []string{ - "https://json-schema.org/draft/2019-09/vocab/core", - "https://json-schema.org/draft/2019-09/vocab/applicator", - "https://json-schema.org/draft/2019-09/vocab/validation", - "https://json-schema.org/draft/2019-09/vocab/meta-data", - "https://json-schema.org/draft/2019-09/vocab/format", - "https://json-schema.org/draft/2019-09/vocab/content", - }, - defaultVocab: []string{ - "https://json-schema.org/draft/2019-09/vocab/core", - "https://json-schema.org/draft/2019-09/vocab/applicator", - "https://json-schema.org/draft/2019-09/vocab/validation", - }, - } - Draft2020 = &Draft{ - version: 2020, - id: "$id", - boolSchema: true, - vocab: []string{ - "https://json-schema.org/draft/2020-12/vocab/core", - "https://json-schema.org/draft/2020-12/vocab/applicator", - "https://json-schema.org/draft/2020-12/vocab/unevaluated", - "https://json-schema.org/draft/2020-12/vocab/validation", - "https://json-schema.org/draft/2020-12/vocab/meta-data", - "https://json-schema.org/draft/2020-12/vocab/format-annotation", - "https://json-schema.org/draft/2020-12/vocab/format-assertion", - "https://json-schema.org/draft/2020-12/vocab/content", - }, - defaultVocab: []string{ - "https://json-schema.org/draft/2020-12/vocab/core", - "https://json-schema.org/draft/2020-12/vocab/applicator", - "https://json-schema.org/draft/2020-12/vocab/unevaluated", - "https://json-schema.org/draft/2020-12/vocab/validation", - }, - } - - latest = Draft2020 -) - -func findDraft(url string) *Draft { - if strings.HasPrefix(url, "http://") { - url = "https://" + strings.TrimPrefix(url, "http://") - } - if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") { - url = url[:strings.IndexByte(url, '#')] - } - switch url { - case "https://json-schema.org/schema": - return latest - case "https://json-schema.org/draft/2020-12/schema": - return Draft2020 - case "https://json-schema.org/draft/2019-09/schema": - return Draft2019 - case "https://json-schema.org/draft-07/schema": - return Draft7 - case "https://json-schema.org/draft-06/schema": - return Draft6 - case "https://json-schema.org/draft-04/schema": - return Draft4 - } - return nil -} - -func init() { - subschemas := map[string]position{ - // type agnostic - "definitions": prop, - "not": self, - "allOf": item, - "anyOf": item, - "oneOf": item, - // object - "properties": prop, - "additionalProperties": self, - "patternProperties": prop, - // array - "items": self | item, - "additionalItems": self, - "dependencies": prop, - } - Draft4.subschemas = clone(subschemas) - - subschemas["propertyNames"] = self - subschemas["contains"] = self - Draft6.subschemas = clone(subschemas) - - subschemas["if"] = self - subschemas["then"] = self - subschemas["else"] = self - Draft7.subschemas = clone(subschemas) - - subschemas["$defs"] = prop - subschemas["dependentSchemas"] = prop - subschemas["unevaluatedProperties"] = self - subschemas["unevaluatedItems"] = self - subschemas["contentSchema"] = self - Draft2019.subschemas = clone(subschemas) - - subschemas["prefixItems"] = item - Draft2020.subschemas = clone(subschemas) - - Draft4.loadMeta("http://json-schema.org/draft-04/schema", `{ - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uriref" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "regexProperties": true, - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "regexProperties": { "type": "boolean" }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" }, - "format": { "type": "string" }, - "$ref": { "type": "string" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} - }`) - Draft6.loadMeta("http://json-schema.org/draft-06/schema", `{ - "$schema": "http://json-schema.org/draft-06/schema#", - "$id": "http://json-schema.org/draft-06/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "regexProperties": true, - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": {}, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": {} - }`) - Draft7.loadMeta("http://json-schema.org/draft-07/schema", `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://json-schema.org/draft-07/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$comment": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": true - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": true, - "enum": { - "type": "array", - "items": true, - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "if": { "$ref": "#" }, - "then": { "$ref": "#" }, - "else": { "$ref": "#" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": true - }`) - Draft2019.loadMeta("https://json-schema.org/draft/2019-09/schema", `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/schema", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/core": true, - "https://json-schema.org/draft/2019-09/vocab/applicator": true, - "https://json-schema.org/draft/2019-09/vocab/validation": true, - "https://json-schema.org/draft/2019-09/vocab/meta-data": true, - "https://json-schema.org/draft/2019-09/vocab/format": false, - "https://json-schema.org/draft/2019-09/vocab/content": true - }, - "$recursiveAnchor": true, - - "title": "Core and Validation specifications meta-schema", - "allOf": [ - {"$ref": "meta/core"}, - {"$ref": "meta/applicator"}, - {"$ref": "meta/validation"}, - {"$ref": "meta/meta-data"}, - {"$ref": "meta/format"}, - {"$ref": "meta/content"} - ], - "type": ["object", "boolean"], - "properties": { - "definitions": { - "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - }, - "dependencies": { - "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$recursiveRef": "#" }, - { "$ref": "meta/validation#/$defs/stringArray" } - ] - } - } - } - }`) - Draft2020.loadMeta("https://json-schema.org/draft/2020-12/schema", `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/schema", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": true, - "https://json-schema.org/draft/2020-12/vocab/applicator": true, - "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, - "https://json-schema.org/draft/2020-12/vocab/validation": true, - "https://json-schema.org/draft/2020-12/vocab/meta-data": true, - "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, - "https://json-schema.org/draft/2020-12/vocab/content": true - }, - "$dynamicAnchor": "meta", - - "title": "Core and Validation specifications meta-schema", - "allOf": [ - {"$ref": "meta/core"}, - {"$ref": "meta/applicator"}, - {"$ref": "meta/unevaluated"}, - {"$ref": "meta/validation"}, - {"$ref": "meta/meta-data"}, - {"$ref": "meta/format-annotation"}, - {"$ref": "meta/content"} - ], - "type": ["object", "boolean"], - "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", - "properties": { - "definitions": { - "$comment": "\"definitions\" has been replaced by \"$defs\".", - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "deprecated": true, - "default": {} - }, - "dependencies": { - "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$dynamicRef": "#meta" }, - { "$ref": "meta/validation#/$defs/stringArray" } - ] - }, - "deprecated": true, - "default": {} - }, - "$recursiveAnchor": { - "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", - "$ref": "meta/core#/$defs/anchorString", - "deprecated": true - }, - "$recursiveRef": { - "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", - "$ref": "meta/core#/$defs/uriReferenceString", - "deprecated": true - } - } - }`) -} - -var vocabSchemas = map[string]string{ - "https://json-schema.org/draft/2019-09/meta/core": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/core", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/core": true - }, - "$recursiveAnchor": true, - - "title": "Core vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference", - "$comment": "Non-empty fragments not allowed.", - "pattern": "^[^#]*#?$" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$anchor": { - "type": "string", - "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$recursiveRef": { - "type": "string", - "format": "uri-reference" - }, - "$recursiveAnchor": { - "type": "boolean", - "default": false - }, - "$vocabulary": { - "type": "object", - "propertyNames": { - "type": "string", - "format": "uri" - }, - "additionalProperties": { - "type": "boolean" - } - }, - "$comment": { - "type": "string" - }, - "$defs": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/applicator": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/applicator", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/applicator": true - }, - "$recursiveAnchor": true, - - "title": "Applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "additionalItems": { "$recursiveRef": "#" }, - "unevaluatedItems": { "$recursiveRef": "#" }, - "items": { - "anyOf": [ - { "$recursiveRef": "#" }, - { "$ref": "#/$defs/schemaArray" } - ] - }, - "contains": { "$recursiveRef": "#" }, - "additionalProperties": { "$recursiveRef": "#" }, - "unevaluatedProperties": { "$recursiveRef": "#" }, - "properties": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependentSchemas": { - "type": "object", - "additionalProperties": { - "$recursiveRef": "#" - } - }, - "propertyNames": { "$recursiveRef": "#" }, - "if": { "$recursiveRef": "#" }, - "then": { "$recursiveRef": "#" }, - "else": { "$recursiveRef": "#" }, - "allOf": { "$ref": "#/$defs/schemaArray" }, - "anyOf": { "$ref": "#/$defs/schemaArray" }, - "oneOf": { "$ref": "#/$defs/schemaArray" }, - "not": { "$recursiveRef": "#" } - }, - "$defs": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$recursiveRef": "#" } - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/validation": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/validation", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/validation": true - }, - "$recursiveAnchor": true, - - "title": "Validation vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, - "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, - "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, - "minContains": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 1 - }, - "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, - "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/$defs/stringArray" }, - "dependentRequired": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/stringArray" - } - }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "type": { - "anyOf": [ - { "$ref": "#/$defs/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/$defs/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - } - }, - "$defs": { - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 0 - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/meta-data": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/meta-data": true - }, - "$recursiveAnchor": true, - - "title": "Meta-data vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "deprecated": { - "type": "boolean", - "default": false - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/format": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/format", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/format": true - }, - "$recursiveAnchor": true, - - "title": "Format vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2019-09/meta/content": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/content", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/content": true - }, - "$recursiveAnchor": true, - - "title": "Content vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "contentSchema": { "$recursiveRef": "#" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/core": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/core", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": true - }, - "$dynamicAnchor": "meta", - - "title": "Core vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "$id": { - "$ref": "#/$defs/uriReferenceString", - "$comment": "Non-empty fragments not allowed.", - "pattern": "^[^#]*#?$" - }, - "$schema": { "$ref": "#/$defs/uriString" }, - "$ref": { "$ref": "#/$defs/uriReferenceString" }, - "$anchor": { "$ref": "#/$defs/anchorString" }, - "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, - "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, - "$vocabulary": { - "type": "object", - "propertyNames": { "$ref": "#/$defs/uriString" }, - "additionalProperties": { - "type": "boolean" - } - }, - "$comment": { - "type": "string" - }, - "$defs": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" } - } - }, - "$defs": { - "anchorString": { - "type": "string", - "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" - }, - "uriString": { - "type": "string", - "format": "uri" - }, - "uriReferenceString": { - "type": "string", - "format": "uri-reference" - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/applicator": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/applicator", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/applicator": true - }, - "$dynamicAnchor": "meta", - - "title": "Applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "prefixItems": { "$ref": "#/$defs/schemaArray" }, - "items": { "$dynamicRef": "#meta" }, - "contains": { "$dynamicRef": "#meta" }, - "additionalProperties": { "$dynamicRef": "#meta" }, - "properties": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependentSchemas": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "default": {} - }, - "propertyNames": { "$dynamicRef": "#meta" }, - "if": { "$dynamicRef": "#meta" }, - "then": { "$dynamicRef": "#meta" }, - "else": { "$dynamicRef": "#meta" }, - "allOf": { "$ref": "#/$defs/schemaArray" }, - "anyOf": { "$ref": "#/$defs/schemaArray" }, - "oneOf": { "$ref": "#/$defs/schemaArray" }, - "not": { "$dynamicRef": "#meta" } - }, - "$defs": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$dynamicRef": "#meta" } - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/unevaluated": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/unevaluated": true - }, - "$dynamicAnchor": "meta", - - "title": "Unevaluated applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "unevaluatedItems": { "$dynamicRef": "#meta" }, - "unevaluatedProperties": { "$dynamicRef": "#meta" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/validation": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/validation", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/validation": true - }, - "$dynamicAnchor": "meta", - - "title": "Validation vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "type": { - "anyOf": [ - { "$ref": "#/$defs/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/$defs/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, - "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, - "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, - "minContains": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 1 - }, - "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, - "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/$defs/stringArray" }, - "dependentRequired": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/stringArray" - } - } - }, - "$defs": { - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 0 - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/meta-data": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/meta-data": true - }, - "$dynamicAnchor": "meta", - - "title": "Meta-data vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "deprecated": { - "type": "boolean", - "default": false - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/format-annotation": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/format-annotation": true - }, - "$dynamicAnchor": "meta", - - "title": "Format vocabulary meta-schema for annotation results", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/format-assertion": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/format-assertion": true - }, - "$dynamicAnchor": "meta", - - "title": "Format vocabulary meta-schema for assertion results", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/content": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/content", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/content": true - }, - "$dynamicAnchor": "meta", - - "title": "Content vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "contentEncoding": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentSchema": { "$dynamicRef": "#meta" } - } - }`, -} - -func clone(m map[string]position) map[string]position { - mm := make(map[string]position) - for k, v := range m { - mm[k] = v - } - return mm -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go deleted file mode 100644 index deaded89f7..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsonschema - -import ( - "fmt" - "strings" -) - -// InvalidJSONTypeError is the error type returned by ValidateInterface. -// this tells that specified go object is not valid jsonType. -type InvalidJSONTypeError string - -func (e InvalidJSONTypeError) Error() string { - return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e)) -} - -// InfiniteLoopError is returned by Compile/Validate. -// this gives url#keywordLocation that lead to infinity loop. -type InfiniteLoopError string - -func (e InfiniteLoopError) Error() string { - return "jsonschema: infinite loop " + string(e) -} - -func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError { - var path string - for _, ref := range stack { - if path == "" { - path += ref.schema.Location - } else { - path += "/" + ref.path - } - } - return InfiniteLoopError(path + "/" + sref.path) -} - -// SchemaError is the error type returned by Compile. -type SchemaError struct { - // SchemaURL is the url to json-schema that filed to compile. - // This is helpful, if your schema refers to external schemas - SchemaURL string - - // Err is the error that occurred during compilation. - // It could be ValidationError, because compilation validates - // given schema against the json meta-schema - Err error -} - -func (se *SchemaError) Unwrap() error { - return se.Err -} - -func (se *SchemaError) Error() string { - s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL) - if se.Err != nil { - return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: ")) - } - return s -} - -func (se *SchemaError) GoString() string { - if _, ok := se.Err.(*ValidationError); ok { - return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err) - } - return se.Error() -} - -// ValidationError is the error type returned by Validate. -type ValidationError struct { - KeywordLocation string // validation path of validating keyword or schema - AbsoluteKeywordLocation string // absolute location of validating keyword or schema - InstanceLocation string // location of the json value within the instance being validated - Message string // describes error - Causes []*ValidationError // nested validation errors -} - -func (ve *ValidationError) add(causes ...error) error { - for _, cause := range causes { - ve.Causes = append(ve.Causes, cause.(*ValidationError)) - } - return ve -} - -func (ve *ValidationError) causes(err error) error { - if err := err.(*ValidationError); err.Message == "" { - ve.Causes = err.Causes - } else { - ve.add(err) - } - return ve -} - -func (ve *ValidationError) Error() string { - leaf := ve - for len(leaf.Causes) > 0 { - leaf = leaf.Causes[0] - } - u, _ := split(ve.AbsoluteKeywordLocation) - return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message) -} - -func (ve *ValidationError) GoString() string { - sloc := ve.AbsoluteKeywordLocation - sloc = sloc[strings.IndexByte(sloc, '#')+1:] - msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message) - for _, c := range ve.Causes { - for _, line := range strings.Split(c.GoString(), "\n") { - msg += "\n " + line - } - } - return msg -} - -func joinPtr(ptr1, ptr2 string) string { - if len(ptr1) == 0 { - return ptr2 - } - if len(ptr2) == 0 { - return ptr1 - } - return ptr1 + "/" + ptr2 -} - -// quote returns single-quoted string -func quote(s string) string { - s = fmt.Sprintf("%q", s) - s = strings.ReplaceAll(s, `\"`, `"`) - s = strings.ReplaceAll(s, `'`, `\'`) - return "'" + s[1:len(s)-1] + "'" -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go deleted file mode 100644 index 452ba118c5..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go +++ /dev/null @@ -1,116 +0,0 @@ -package jsonschema - -// ExtCompiler compiles custom keyword(s) into ExtSchema. -type ExtCompiler interface { - // Compile compiles the custom keywords in schema m and returns its compiled representation. - // if the schema m does not contain the keywords defined by this extension, - // compiled representation nil should be returned. - Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error) -} - -// ExtSchema is schema representation of custom keyword(s) -type ExtSchema interface { - // Validate validates the json value v with this ExtSchema. - // Returned error must be *ValidationError. - Validate(ctx ValidationContext, v interface{}) error -} - -type extension struct { - meta *Schema - compiler ExtCompiler -} - -// RegisterExtension registers custom keyword(s) into this compiler. -// -// name is extension name, used only to avoid name collisions. -// meta captures the metaschema for the new keywords. -// This is used to validate the schema before calling ext.Compile. -func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) { - c.extensions[name] = extension{meta, ext} -} - -// CompilerContext --- - -// CompilerContext provides additional context required in compiling for extension. -type CompilerContext struct { - c *Compiler - r *resource - stack []schemaRef - res *resource -} - -// Compile compiles given value at ptr into *Schema. This is useful in implementing -// keyword like allOf/not/patternProperties. -// -// schPath is the relative-json-pointer to the schema to be compiled from parent schema. -// -// applicableOnSameInstance tells whether current schema and the given schema -// are applied on same instance value. this is used to detect infinite loop in schema. -func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) { - var stack []schemaRef - if applicableOnSameInstance { - stack = ctx.stack - } - return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath) -} - -// CompileRef compiles the schema referenced by ref uri -// -// refPath is the relative-json-pointer to ref. -// -// applicableOnSameInstance tells whether current schema and the given schema -// are applied on same instance value. this is used to detect infinite loop in schema. -func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) { - var stack []schemaRef - if applicableOnSameInstance { - stack = ctx.stack - } - return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref) -} - -// ValidationContext --- - -// ValidationContext provides additional context required in validating for extension. -type ValidationContext struct { - result validationResult - validate func(sch *Schema, schPath string, v interface{}, vpath string) error - validateInplace func(sch *Schema, schPath string) error - validationError func(keywordPath string, format string, a ...interface{}) *ValidationError -} - -// EvaluatedProp marks given property of object as evaluated. -func (ctx ValidationContext) EvaluatedProp(prop string) { - delete(ctx.result.unevalProps, prop) -} - -// EvaluatedItem marks given index of array as evaluated. -func (ctx ValidationContext) EvaluatedItem(index int) { - delete(ctx.result.unevalItems, index) -} - -// Validate validates schema s with value v. Extension must use this method instead of -// *Schema.ValidateInterface method. This will be useful in implementing keywords like -// allOf/oneOf -// -// spath is relative-json-pointer to s -// vpath is relative-json-pointer to v. -func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error { - if vpath == "" { - return ctx.validateInplace(s, spath) - } - return ctx.validate(s, spath, v, vpath) -} - -// Error used to construct validation error by extensions. -// -// keywordPath is relative-json-pointer to keyword. -func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError { - return ctx.validationError(keywordPath, format, a...) -} - -// Group is used by extensions to group multiple errors as causes to parent error. -// This is useful in implementing keywords like allOf where each schema specified -// in allOf can result a validationError. -func (ValidationError) Group(parent *ValidationError, causes ...error) error { - return parent.add(causes...) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go deleted file mode 100644 index 05686073f0..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go +++ /dev/null @@ -1,567 +0,0 @@ -package jsonschema - -import ( - "errors" - "net" - "net/mail" - "net/url" - "regexp" - "strconv" - "strings" - "time" -) - -// Formats is a registry of functions, which know how to validate -// a specific format. -// -// New Formats can be registered by adding to this map. Key is format name, -// value is function that knows how to validate that format. -var Formats = map[string]func(interface{}) bool{ - "date-time": isDateTime, - "date": isDate, - "time": isTime, - "duration": isDuration, - "period": isPeriod, - "hostname": isHostname, - "email": isEmail, - "ip-address": isIPV4, - "ipv4": isIPV4, - "ipv6": isIPV6, - "uri": isURI, - "iri": isURI, - "uri-reference": isURIReference, - "uriref": isURIReference, - "iri-reference": isURIReference, - "uri-template": isURITemplate, - "regex": isRegex, - "json-pointer": isJSONPointer, - "relative-json-pointer": isRelativeJSONPointer, - "uuid": isUUID, -} - -// isDateTime tells whether given string is a valid date representation -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isDateTime(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ - return false - } - if s[10] != 'T' && s[10] != 't' { - return false - } - return isDate(s[:10]) && isTime(s[11:]) -} - -// isDate tells whether given string is a valid full-date production -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isDate(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := time.Parse("2006-01-02", s) - return err == nil -} - -// isTime tells whether given string is a valid full-time production -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isTime(v interface{}) bool { - str, ok := v.(string) - if !ok { - return true - } - - // golang time package does not support leap seconds. - // so we are parsing it manually here. - - // hh:mm:ss - // 01234567 - if len(str) < 9 || str[2] != ':' || str[5] != ':' { - return false - } - isInRange := func(str string, min, max int) (int, bool) { - n, err := strconv.Atoi(str) - if err != nil { - return 0, false - } - if n < min || n > max { - return 0, false - } - return n, true - } - var h, m, s int - if h, ok = isInRange(str[0:2], 0, 23); !ok { - return false - } - if m, ok = isInRange(str[3:5], 0, 59); !ok { - return false - } - if s, ok = isInRange(str[6:8], 0, 60); !ok { - return false - } - str = str[8:] - - // parse secfrac if present - if str[0] == '.' { - // dot following more than one digit - str = str[1:] - var numDigits int - for str != "" { - if str[0] < '0' || str[0] > '9' { - break - } - numDigits++ - str = str[1:] - } - if numDigits == 0 { - return false - } - } - - if len(str) == 0 { - return false - } - - if str[0] == 'z' || str[0] == 'Z' { - if len(str) != 1 { - return false - } - } else { - // time-numoffset - // +hh:mm - // 012345 - if len(str) != 6 || str[3] != ':' { - return false - } - - var sign int - if str[0] == '+' { - sign = -1 - } else if str[0] == '-' { - sign = +1 - } else { - return false - } - - var zh, zm int - if zh, ok = isInRange(str[1:3], 0, 23); !ok { - return false - } - if zm, ok = isInRange(str[4:6], 0, 59); !ok { - return false - } - - // apply timezone offset - hm := (h*60 + m) + sign*(zh*60+zm) - if hm < 0 { - hm += 24 * 60 - } - h, m = hm/60, hm%60 - } - - // check leapsecond - if s == 60 { // leap second - if h != 23 || m != 59 { - return false - } - } - - return true -} - -// isDuration tells whether given string is a valid duration format -// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details -func isDuration(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if len(s) == 0 || s[0] != 'P' { - return false - } - s = s[1:] - parseUnits := func() (units string, ok bool) { - for len(s) > 0 && s[0] != 'T' { - digits := false - for { - if len(s) == 0 { - break - } - if s[0] < '0' || s[0] > '9' { - break - } - digits = true - s = s[1:] - } - if !digits || len(s) == 0 { - return units, false - } - units += s[:1] - s = s[1:] - } - return units, true - } - units, ok := parseUnits() - if !ok { - return false - } - if units == "W" { - return len(s) == 0 // P_W - } - if len(units) > 0 { - if strings.Index("YMD", units) == -1 { - return false - } - if len(s) == 0 { - return true // "P" dur-date - } - } - if len(s) == 0 || s[0] != 'T' { - return false - } - s = s[1:] - units, ok = parseUnits() - return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1 -} - -// isPeriod tells whether given string is a valid period format -// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details -func isPeriod(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - slash := strings.IndexByte(s, '/') - if slash == -1 { - return false - } - start, end := s[:slash], s[slash+1:] - if isDateTime(start) { - return isDateTime(end) || isDuration(end) - } - return isDuration(start) && isDateTime(end) -} - -// isHostname tells whether given string is a valid representation -// for an Internet host name, as defined by RFC 1034 section 3.1 and -// RFC 1123 section 2.1. -// -// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. -func isHostname(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters - s = strings.TrimSuffix(s, ".") - if len(s) > 253 { - return false - } - - // Hostnames are composed of series of labels concatenated with dots, as are all domain names - for _, label := range strings.Split(s, ".") { - // Each label must be from 1 to 63 characters long - if labelLen := len(label); labelLen < 1 || labelLen > 63 { - return false - } - - // labels must not start with a hyphen - // RFC 1123 section 2.1: restriction on the first character - // is relaxed to allow either a letter or a digit - if first := s[0]; first == '-' { - return false - } - - // must not end with a hyphen - if label[len(label)-1] == '-' { - return false - } - - // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), - // the digits '0' through '9', and the hyphen ('-') - for _, c := range label { - if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { - return false - } - } - } - - return true -} - -// isEmail tells whether given string is a valid Internet email address -// as defined by RFC 5322, section 3.4.1. -// -// See https://en.wikipedia.org/wiki/Email_address, for details. -func isEmail(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - // entire email address to be no more than 254 characters long - if len(s) > 254 { - return false - } - - // email address is generally recognized as having two parts joined with an at-sign - at := strings.LastIndexByte(s, '@') - if at == -1 { - return false - } - local := s[0:at] - domain := s[at+1:] - - // local part may be up to 64 characters long - if len(local) > 64 { - return false - } - - // domain if enclosed in brackets, must match an IP address - if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' { - ip := domain[1 : len(domain)-1] - if strings.HasPrefix(ip, "IPv6:") { - return isIPV6(strings.TrimPrefix(ip, "IPv6:")) - } - return isIPV4(ip) - } - - // domain must match the requirements for a hostname - if !isHostname(domain) { - return false - } - - _, err := mail.ParseAddress(s) - return err == nil -} - -// isIPV4 tells whether given string is a valid representation of an IPv4 address -// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. -func isIPV4(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - groups := strings.Split(s, ".") - if len(groups) != 4 { - return false - } - for _, group := range groups { - n, err := strconv.Atoi(group) - if err != nil { - return false - } - if n < 0 || n > 255 { - return false - } - if n != 0 && group[0] == '0' { - return false // leading zeroes should be rejected, as they are treated as octals - } - } - return true -} - -// isIPV6 tells whether given string is a valid representation of an IPv6 address -// as defined in RFC 2373, section 2.2. -func isIPV6(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if !strings.Contains(s, ":") { - return false - } - return net.ParseIP(s) != nil -} - -// isURI tells whether given string is valid URI, according to RFC 3986. -func isURI(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - u, err := urlParse(s) - return err == nil && u.IsAbs() -} - -func urlParse(s string) (*url.URL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - - // if hostname is ipv6, validate it - hostname := u.Hostname() - if strings.IndexByte(hostname, ':') != -1 { - if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 { - return nil, errors.New("ipv6 address is not enclosed in brackets") - } - if !isIPV6(hostname) { - return nil, errors.New("invalid ipv6 address") - } - } - return u, nil -} - -// isURIReference tells whether given string is a valid URI Reference -// (either a URI or a relative-reference), according to RFC 3986. -func isURIReference(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := urlParse(s) - return err == nil && !strings.Contains(s, `\`) -} - -// isURITemplate tells whether given string is a valid URI Template -// according to RFC6570. -// -// Current implementation does minimal validation. -func isURITemplate(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - u, err := urlParse(s) - if err != nil { - return false - } - for _, item := range strings.Split(u.RawPath, "/") { - depth := 0 - for _, ch := range item { - switch ch { - case '{': - depth++ - if depth != 1 { - return false - } - case '}': - depth-- - if depth != 0 { - return false - } - } - } - if depth != 0 { - return false - } - } - return true -} - -// isRegex tells whether given string is a valid regular expression, -// according to the ECMA 262 regular expression dialect. -// -// The implementation uses go-lang regexp package. -func isRegex(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := regexp.Compile(s) - return err == nil -} - -// isJSONPointer tells whether given string is a valid JSON Pointer. -// -// Note: It returns false for JSON Pointer URI fragments. -func isJSONPointer(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if s != "" && !strings.HasPrefix(s, "/") { - return false - } - for _, item := range strings.Split(s, "/") { - for i := 0; i < len(item); i++ { - if item[i] == '~' { - if i == len(item)-1 { - return false - } - switch item[i+1] { - case '0', '1': - // valid - default: - return false - } - } - } - } - return true -} - -// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. -// -// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 -func isRelativeJSONPointer(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if s == "" { - return false - } - if s[0] == '0' { - s = s[1:] - } else if s[0] >= '0' && s[0] <= '9' { - for s != "" && s[0] >= '0' && s[0] <= '9' { - s = s[1:] - } - } else { - return false - } - return s == "#" || isJSONPointer(s) -} - -// isUUID tells whether given string is a valid uuid format -// as specified in RFC4122. -// -// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details -func isUUID(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - parseHex := func(n int) bool { - for n > 0 { - if len(s) == 0 { - return false - } - hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F') - if !hex { - return false - } - s = s[1:] - n-- - } - return true - } - groups := []int{8, 4, 4, 4, 12} - for i, numDigits := range groups { - if !parseHex(numDigits) { - return false - } - if i == len(groups)-1 { - break - } - if len(s) == 0 || s[0] != '-' { - return false - } - s = s[1:] - } - return len(s) == 0 -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go deleted file mode 100644 index 4198cfe37c..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package httploader implements loader.Loader for http/https url. -// -// The package is typically only imported for the side effect of -// registering its Loaders. -// -// To use httploader, link this package into your program: -// -// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" -package httploader - -import ( - "fmt" - "io" - "net/http" - - "github.com/santhosh-tekuri/jsonschema/v5" -) - -// Client is the default HTTP Client used to Get the resource. -var Client = http.DefaultClient - -// Load loads resource from given http(s) url. -func Load(url string) (io.ReadCloser, error) { - resp, err := Client.Get(url) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - _ = resp.Body.Close() - return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) - } - return resp.Body, nil -} - -func init() { - jsonschema.Loaders["http"] = Load - jsonschema.Loaders["https"] = Load -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go deleted file mode 100644 index c94195c335..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go +++ /dev/null @@ -1,60 +0,0 @@ -package jsonschema - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" -) - -func loadFileURL(s string) (io.ReadCloser, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - f := u.Path - if runtime.GOOS == "windows" { - f = strings.TrimPrefix(f, "/") - f = filepath.FromSlash(f) - } - return os.Open(f) -} - -// Loaders is a registry of functions, which know how to load -// absolute url of specific schema. -// -// New loaders can be registered by adding to this map. Key is schema, -// value is function that knows how to load url of that schema -var Loaders = map[string]func(url string) (io.ReadCloser, error){ - "file": loadFileURL, -} - -// LoaderNotFoundError is the error type returned by Load function. -// It tells that no Loader is registered for that URL Scheme. -type LoaderNotFoundError string - -func (e LoaderNotFoundError) Error() string { - return fmt.Sprintf("jsonschema: no Loader found for %s", string(e)) -} - -// LoadURL loads document at given absolute URL. The default implementation -// uses Loaders registry to lookup by schema and uses that loader. -// -// Users can change this variable, if they would like to take complete -// responsibility of loading given URL. Used by Compiler if its LoadURL -// field is nil. -var LoadURL = func(s string) (io.ReadCloser, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - loader, ok := Loaders[u.Scheme] - if !ok { - return nil, LoaderNotFoundError(s) - - } - return loader(s) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go deleted file mode 100644 index d65ae2a929..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go +++ /dev/null @@ -1,77 +0,0 @@ -package jsonschema - -// Flag is output format with simple boolean property valid. -type Flag struct { - Valid bool `json:"valid"` -} - -// FlagOutput returns output in flag format -func (ve *ValidationError) FlagOutput() Flag { - return Flag{} -} - -// Basic --- - -// Basic is output format with flat list of output units. -type Basic struct { - Valid bool `json:"valid"` - Errors []BasicError `json:"errors"` -} - -// BasicError is output unit in basic format. -type BasicError struct { - KeywordLocation string `json:"keywordLocation"` - AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` - InstanceLocation string `json:"instanceLocation"` - Error string `json:"error"` -} - -// BasicOutput returns output in basic format -func (ve *ValidationError) BasicOutput() Basic { - var errors []BasicError - var flatten func(*ValidationError) - flatten = func(ve *ValidationError) { - errors = append(errors, BasicError{ - KeywordLocation: ve.KeywordLocation, - AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, - InstanceLocation: ve.InstanceLocation, - Error: ve.Message, - }) - for _, cause := range ve.Causes { - flatten(cause) - } - } - flatten(ve) - return Basic{Errors: errors} -} - -// Detailed --- - -// Detailed is output format based on structure of schema. -type Detailed struct { - Valid bool `json:"valid"` - KeywordLocation string `json:"keywordLocation"` - AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` - InstanceLocation string `json:"instanceLocation"` - Error string `json:"error,omitempty"` - Errors []Detailed `json:"errors,omitempty"` -} - -// DetailedOutput returns output in detailed format -func (ve *ValidationError) DetailedOutput() Detailed { - var errors []Detailed - for _, cause := range ve.Causes { - errors = append(errors, cause.DetailedOutput()) - } - var message = ve.Message - if len(ve.Causes) > 0 { - message = "" - } - return Detailed{ - KeywordLocation: ve.KeywordLocation, - AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, - InstanceLocation: ve.InstanceLocation, - Error: message, - Errors: errors, - } -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go deleted file mode 100644 index 18349daac7..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go +++ /dev/null @@ -1,280 +0,0 @@ -package jsonschema - -import ( - "encoding/json" - "fmt" - "io" - "net/url" - "path/filepath" - "runtime" - "strconv" - "strings" -) - -type resource struct { - url string // base url of resource. can be empty - floc string // fragment with json-pointer from root resource - doc interface{} - draft *Draft - subresources map[string]*resource // key is floc. only applicable for root resource - schema *Schema -} - -func (r *resource) String() string { - return r.url + r.floc -} - -func newResource(url string, r io.Reader) (*resource, error) { - if strings.IndexByte(url, '#') != -1 { - panic(fmt.Sprintf("BUG: newResource(%q)", url)) - } - doc, err := unmarshal(r) - if err != nil { - return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err) - } - url, err = toAbs(url) - if err != nil { - return nil, err - } - return &resource{ - url: url, - floc: "#", - doc: doc, - }, nil -} - -// fillSubschemas fills subschemas in res into r.subresources -func (r *resource) fillSubschemas(c *Compiler, res *resource) error { - if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil { - return err - } - - if r.subresources == nil { - r.subresources = make(map[string]*resource) - } - if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil { - return err - } - - // ensure subresource.url uniqueness - url2floc := make(map[string]string) - for _, sr := range r.subresources { - if sr.url != "" { - if floc, ok := url2floc[sr.url]; ok { - return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url) - } - url2floc[sr.url] = sr.floc - } - } - - return nil -} - -// listResources lists all subresources in res -func (r *resource) listResources(res *resource) []*resource { - var result []*resource - prefix := res.floc + "/" - for _, sr := range r.subresources { - if strings.HasPrefix(sr.floc, prefix) { - result = append(result, sr) - } - } - return result -} - -func (r *resource) findResource(url string) *resource { - if r.url == url { - return r - } - for _, res := range r.subresources { - if res.url == url { - return res - } - } - return nil -} - -// resolve fragment f with sr as base -func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) { - if f == "#" || f == "#/" { - return sr, nil - } - - // resolve by anchor - if !strings.HasPrefix(f, "#/") { - // check in given resource - for _, anchor := range r.draft.anchors(sr.doc) { - if anchor == f[1:] { - return sr, nil - } - } - - // check in subresources that has same base url - prefix := sr.floc + "/" - for _, res := range r.subresources { - if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url { - for _, anchor := range r.draft.anchors(res.doc) { - if anchor == f[1:] { - return res, nil - } - } - } - } - return nil, nil - } - - // resolve by ptr - floc := sr.floc + f[1:] - if res, ok := r.subresources[floc]; ok { - return res, nil - } - - // non-standrad location - doc := r.doc - for _, item := range strings.Split(floc[2:], "/") { - item = strings.Replace(item, "~1", "/", -1) - item = strings.Replace(item, "~0", "~", -1) - item, err := url.PathUnescape(item) - if err != nil { - return nil, err - } - switch d := doc.(type) { - case map[string]interface{}: - if _, ok := d[item]; !ok { - return nil, nil - } - doc = d[item] - case []interface{}: - index, err := strconv.Atoi(item) - if err != nil { - return nil, err - } - if index < 0 || index >= len(d) { - return nil, nil - } - doc = d[index] - default: - return nil, nil - } - } - - id, err := r.draft.resolveID(r.baseURL(floc), doc) - if err != nil { - return nil, err - } - res := &resource{url: id, floc: floc, doc: doc} - r.subresources[floc] = res - if err := r.fillSubschemas(c, res); err != nil { - return nil, err - } - return res, nil -} - -func (r *resource) baseURL(floc string) string { - for { - if sr, ok := r.subresources[floc]; ok { - if sr.url != "" { - return sr.url - } - } - slash := strings.LastIndexByte(floc, '/') - if slash == -1 { - break - } - floc = floc[:slash] - } - return r.url -} - -// url helpers --- - -func toAbs(s string) (string, error) { - // if windows absolute file path, convert to file url - // because: net/url parses driver name as scheme - if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` { - s = "file:///" + filepath.ToSlash(s) - } - - u, err := url.Parse(s) - if err != nil { - return "", err - } - if u.IsAbs() { - return s, nil - } - - // s is filepath - if s, err = filepath.Abs(s); err != nil { - return "", err - } - if runtime.GOOS == "windows" { - s = "file:///" + filepath.ToSlash(s) - } else { - s = "file://" + s - } - u, err = url.Parse(s) // to fix spaces in filepath - return u.String(), err -} - -func resolveURL(base, ref string) (string, error) { - if ref == "" { - return base, nil - } - if strings.HasPrefix(ref, "urn:") { - return ref, nil - } - - refURL, err := url.Parse(ref) - if err != nil { - return "", err - } - if refURL.IsAbs() { - return ref, nil - } - - if strings.HasPrefix(base, "urn:") { - base, _ = split(base) - return base + ref, nil - } - - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - return baseURL.ResolveReference(refURL).String(), nil -} - -func split(uri string) (string, string) { - hash := strings.IndexByte(uri, '#') - if hash == -1 { - return uri, "#" - } - f := uri[hash:] - if f == "#/" { - f = "#" - } - return uri[0:hash], f -} - -func (s *Schema) url() string { - u, _ := split(s.Location) - return u -} - -func (s *Schema) loc() string { - _, f := split(s.Location) - return f[1:] -} - -func unmarshal(r io.Reader) (interface{}, error) { - decoder := json.NewDecoder(r) - decoder.UseNumber() - var doc interface{} - if err := decoder.Decode(&doc); err != nil { - return nil, err - } - if t, _ := decoder.Token(); t != nil { - return nil, fmt.Errorf("invalid character %v after top-level value", t) - } - return doc, nil -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go deleted file mode 100644 index 688f0a6fee..0000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go +++ /dev/null @@ -1,900 +0,0 @@ -package jsonschema - -import ( - "bytes" - "encoding/json" - "fmt" - "hash/maphash" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -// A Schema represents compiled version of json-schema. -type Schema struct { - Location string // absolute location - - Draft *Draft // draft used by schema. - meta *Schema - vocab []string - dynamicAnchors []*Schema - - // type agnostic validations - Format string - format func(interface{}) bool - Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. - Ref *Schema - RecursiveAnchor bool - RecursiveRef *Schema - DynamicAnchor string - DynamicRef *Schema - dynamicRefAnchor string - Types []string // allowed types. - Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. - Enum []interface{} // allowed values. - enumError string // error message for enum fail. captured here to avoid constructing error message every time. - Not *Schema - AllOf []*Schema - AnyOf []*Schema - OneOf []*Schema - If *Schema - Then *Schema // nil, when If is nil. - Else *Schema // nil, when If is nil. - - // object validations - MinProperties int // -1 if not specified. - MaxProperties int // -1 if not specified. - Required []string // list of required properties. - Properties map[string]*Schema - PropertyNames *Schema - RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. - PatternProperties map[*regexp.Regexp]*Schema - AdditionalProperties interface{} // nil or bool or *Schema. - Dependencies map[string]interface{} // map value is *Schema or []string. - DependentRequired map[string][]string - DependentSchemas map[string]*Schema - UnevaluatedProperties *Schema - - // array validations - MinItems int // -1 if not specified. - MaxItems int // -1 if not specified. - UniqueItems bool - Items interface{} // nil or *Schema or []*Schema - AdditionalItems interface{} // nil or bool or *Schema. - PrefixItems []*Schema - Items2020 *Schema // items keyword reintroduced in draft 2020-12 - Contains *Schema - ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated" - MinContains int // 1 if not specified - MaxContains int // -1 if not specified - UnevaluatedItems *Schema - - // string validations - MinLength int // -1 if not specified. - MaxLength int // -1 if not specified. - Pattern *regexp.Regexp - ContentEncoding string - decoder func(string) ([]byte, error) - ContentMediaType string - mediaType func([]byte) error - ContentSchema *Schema - - // number validators - Minimum *big.Rat - ExclusiveMinimum *big.Rat - Maximum *big.Rat - ExclusiveMaximum *big.Rat - MultipleOf *big.Rat - - // annotations. captured only when Compiler.ExtractAnnotations is true. - Title string - Description string - Default interface{} - Comment string - ReadOnly bool - WriteOnly bool - Examples []interface{} - Deprecated bool - - // user defined extensions - Extensions map[string]ExtSchema -} - -func (s *Schema) String() string { - return s.Location -} - -func newSchema(url, floc string, draft *Draft, doc interface{}) *Schema { - // fill with default values - s := &Schema{ - Location: url + floc, - Draft: draft, - MinProperties: -1, - MaxProperties: -1, - MinItems: -1, - MaxItems: -1, - MinContains: 1, - MaxContains: -1, - MinLength: -1, - MaxLength: -1, - } - - if doc, ok := doc.(map[string]interface{}); ok { - if ra, ok := doc["$recursiveAnchor"]; ok { - if ra, ok := ra.(bool); ok { - s.RecursiveAnchor = ra - } - } - if da, ok := doc["$dynamicAnchor"]; ok { - if da, ok := da.(string); ok { - s.DynamicAnchor = da - } - } - } - return s -} - -func (s *Schema) hasVocab(name string) bool { - if s == nil { // during bootstrap - return true - } - if name == "core" { - return true - } - for _, url := range s.vocab { - if url == "https://json-schema.org/draft/2019-09/vocab/"+name { - return true - } - if url == "https://json-schema.org/draft/2020-12/vocab/"+name { - return true - } - } - return false -} - -// Validate validates given doc, against the json-schema s. -// -// the v must be the raw json value. for number precision -// unmarshal with json.UseNumber(). -// -// returns *ValidationError if v does not confirm with schema s. -// returns InfiniteLoopError if it detects loop during validation. -// returns InvalidJSONTypeError if it detects any non json value in v. -func (s *Schema) Validate(v interface{}) (err error) { - return s.validateValue(v, "") -} - -func (s *Schema) validateValue(v interface{}, vloc string) (err error) { - defer func() { - if r := recover(); r != nil { - switch r := r.(type) { - case InfiniteLoopError, InvalidJSONTypeError: - err = r.(error) - default: - panic(r) - } - } - }() - if _, err := s.validate(nil, 0, "", v, vloc); err != nil { - ve := ValidationError{ - KeywordLocation: "", - AbsoluteKeywordLocation: s.Location, - InstanceLocation: vloc, - Message: fmt.Sprintf("doesn't validate with %s", s.Location), - } - return ve.causes(err) - } - return nil -} - -// validate validates given value v with this schema. -func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) { - validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError { - return &ValidationError{ - KeywordLocation: keywordLocation(scope, keywordPath), - AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath), - InstanceLocation: vloc, - Message: fmt.Sprintf(format, a...), - } - } - - sref := schemaRef{spath, s, false} - if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil { - panic(err) - } - scope = append(scope, sref) - vscope++ - - // populate result - switch v := v.(type) { - case map[string]interface{}: - result.unevalProps = make(map[string]struct{}) - for pname := range v { - result.unevalProps[pname] = struct{}{} - } - case []interface{}: - result.unevalItems = make(map[int]struct{}) - for i := range v { - result.unevalItems[i] = struct{}{} - } - } - - validate := func(sch *Schema, schPath string, v interface{}, vpath string) error { - vloc := vloc - if vpath != "" { - vloc += "/" + vpath - } - _, err := sch.validate(scope, 0, schPath, v, vloc) - return err - } - - validateInplace := func(sch *Schema, schPath string) error { - vr, err := sch.validate(scope, vscope, schPath, v, vloc) - if err == nil { - // update result - for pname := range result.unevalProps { - if _, ok := vr.unevalProps[pname]; !ok { - delete(result.unevalProps, pname) - } - } - for i := range result.unevalItems { - if _, ok := vr.unevalItems[i]; !ok { - delete(result.unevalItems, i) - } - } - } - return err - } - - if s.Always != nil { - if !*s.Always { - return result, validationError("", "not allowed") - } - return result, nil - } - - if len(s.Types) > 0 { - vType := jsonType(v) - matched := false - for _, t := range s.Types { - if vType == t { - matched = true - break - } else if t == "integer" && vType == "number" { - num, _ := new(big.Rat).SetString(fmt.Sprint(v)) - if num.IsInt() { - matched = true - break - } - } - } - if !matched { - return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) - } - } - - var errors []error - - if len(s.Constant) > 0 { - if !equals(v, s.Constant[0]) { - switch jsonType(s.Constant[0]) { - case "object", "array": - errors = append(errors, validationError("const", "const failed")) - default: - errors = append(errors, validationError("const", "value must be %#v", s.Constant[0])) - } - } - } - - if len(s.Enum) > 0 { - matched := false - for _, item := range s.Enum { - if equals(v, item) { - matched = true - break - } - } - if !matched { - errors = append(errors, validationError("enum", s.enumError)) - } - } - - if s.format != nil && !s.format(v) { - var val = v - if v, ok := v.(string); ok { - val = quote(v) - } - errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format))) - } - - switch v := v.(type) { - case map[string]interface{}: - if s.MinProperties != -1 && len(v) < s.MinProperties { - errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v))) - } - if s.MaxProperties != -1 && len(v) > s.MaxProperties { - errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v))) - } - if len(s.Required) > 0 { - var missing []string - for _, pname := range s.Required { - if _, ok := v[pname]; !ok { - missing = append(missing, quote(pname)) - } - } - if len(missing) > 0 { - errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", "))) - } - } - - for pname, sch := range s.Properties { - if pvalue, ok := v[pname]; ok { - delete(result.unevalProps, pname) - if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - - if s.PropertyNames != nil { - for pname := range v { - if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - - if s.RegexProperties { - for pname := range v { - if !isRegex(pname) { - errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname))) - } - } - } - for pattern, sch := range s.PatternProperties { - for pname, pvalue := range v { - if pattern.MatchString(pname) { - delete(result.unevalProps, pname) - if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - } - if s.AdditionalProperties != nil { - if allowed, ok := s.AdditionalProperties.(bool); ok { - if !allowed && len(result.unevalProps) > 0 { - errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames())) - } - } else { - schema := s.AdditionalProperties.(*Schema) - for pname := range result.unevalProps { - if pvalue, ok := v[pname]; ok { - if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - } - result.unevalProps = nil - } - for dname, dvalue := range s.Dependencies { - if _, ok := v[dname]; ok { - switch dvalue := dvalue.(type) { - case *Schema: - if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil { - errors = append(errors, err) - } - case []string: - for i, pname := range dvalue { - if _, ok := v[pname]; !ok { - errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) - } - } - } - } - } - for dname, dvalue := range s.DependentRequired { - if _, ok := v[dname]; ok { - for i, pname := range dvalue { - if _, ok := v[pname]; !ok { - errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) - } - } - } - } - for dname, sch := range s.DependentSchemas { - if _, ok := v[dname]; ok { - if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil { - errors = append(errors, err) - } - } - } - - case []interface{}: - if s.MinItems != -1 && len(v) < s.MinItems { - errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v))) - } - if s.MaxItems != -1 && len(v) > s.MaxItems { - errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v))) - } - if s.UniqueItems { - if len(v) <= 20 { - outer1: - for i := 1; i < len(v); i++ { - for j := 0; j < i; j++ { - if equals(v[i], v[j]) { - errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) - break outer1 - } - } - } - } else { - m := make(map[uint64][]int) - var h maphash.Hash - outer2: - for i, item := range v { - h.Reset() - hash(item, &h) - k := h.Sum64() - if err != nil { - panic(err) - } - arr, ok := m[k] - if ok { - for _, j := range arr { - if equals(v[j], item) { - errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) - break outer2 - } - } - } - arr = append(arr, i) - m[k] = arr - } - } - } - - // items + additionalItems - switch items := s.Items.(type) { - case *Schema: - for i, item := range v { - if err := validate(items, "items", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } - result.unevalItems = nil - case []*Schema: - for i, item := range v { - if i < len(items) { - delete(result.unevalItems, i) - if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else if sch, ok := s.AdditionalItems.(*Schema); ok { - delete(result.unevalItems, i) - if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else { - break - } - } - if additionalItems, ok := s.AdditionalItems.(bool); ok { - if additionalItems { - result.unevalItems = nil - } else if len(v) > len(items) { - errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v))) - } - } - } - - // prefixItems + items - for i, item := range v { - if i < len(s.PrefixItems) { - delete(result.unevalItems, i) - if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else if s.Items2020 != nil { - delete(result.unevalItems, i) - if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else { - break - } - } - - // contains + minContains + maxContains - if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) { - matched := 0 - var causes []error - for i, item := range v { - if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil { - causes = append(causes, err) - } else { - matched++ - if s.ContainsEval { - delete(result.unevalItems, i) - } - } - } - if s.MinContains != -1 && matched < s.MinContains { - errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...)) - } - if s.MaxContains != -1 && matched > s.MaxContains { - errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched)) - } - } - - case string: - // minLength + maxLength - if s.MinLength != -1 || s.MaxLength != -1 { - length := utf8.RuneCount([]byte(v)) - if s.MinLength != -1 && length < s.MinLength { - errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length)) - } - if s.MaxLength != -1 && length > s.MaxLength { - errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length)) - } - } - - if s.Pattern != nil && !s.Pattern.MatchString(v) { - errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String()))) - } - - // contentEncoding + contentMediaType - if s.decoder != nil || s.mediaType != nil { - decoded := s.ContentEncoding == "" - var content []byte - if s.decoder != nil { - b, err := s.decoder(v) - if err != nil { - errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding)) - } else { - content, decoded = b, true - } - } - if decoded && s.mediaType != nil { - if s.decoder == nil { - content = []byte(v) - } - if err := s.mediaType(content); err != nil { - errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType))) - } - } - if decoded && s.ContentSchema != nil { - contentJSON, err := unmarshal(bytes.NewReader(content)) - if err != nil { - errors = append(errors, validationError("contentSchema", "value is not valid json")) - } else { - err := validate(s.ContentSchema, "contentSchema", contentJSON, "") - if err != nil { - errors = append(errors, err) - } - } - } - } - - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - // lazy convert to *big.Rat to avoid allocation - var numVal *big.Rat - num := func() *big.Rat { - if numVal == nil { - numVal, _ = new(big.Rat).SetString(fmt.Sprint(v)) - } - return numVal - } - f64 := func(r *big.Rat) float64 { - f, _ := r.Float64() - return f - } - if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { - errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v)) - } - if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { - errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v)) - } - if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { - errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v)) - } - if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { - errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v)) - } - if s.MultipleOf != nil { - if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { - errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf))) - } - } - } - - // $ref + $recursiveRef + $dynamicRef - validateRef := func(sch *Schema, refPath string) error { - if sch != nil { - if err := validateInplace(sch, refPath); err != nil { - var url = sch.Location - if s.url() == sch.url() { - url = sch.loc() - } - return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err) - } - } - return nil - } - if err := validateRef(s.Ref, "$ref"); err != nil { - errors = append(errors, err) - } - if s.RecursiveRef != nil { - sch := s.RecursiveRef - if sch.RecursiveAnchor { - // recursiveRef based on scope - for _, e := range scope { - if e.schema.RecursiveAnchor { - sch = e.schema - break - } - } - } - if err := validateRef(sch, "$recursiveRef"); err != nil { - errors = append(errors, err) - } - } - if s.DynamicRef != nil { - sch := s.DynamicRef - if s.dynamicRefAnchor != "" && sch.DynamicAnchor == s.dynamicRefAnchor { - // dynamicRef based on scope - for i := len(scope) - 1; i >= 0; i-- { - sr := scope[i] - if sr.discard { - break - } - for _, da := range sr.schema.dynamicAnchors { - if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef { - sch = da - break - } - } - } - } - if err := validateRef(sch, "$dynamicRef"); err != nil { - errors = append(errors, err) - } - } - - if s.Not != nil && validateInplace(s.Not, "not") == nil { - errors = append(errors, validationError("not", "not failed")) - } - - for i, sch := range s.AllOf { - schPath := "allOf/" + strconv.Itoa(i) - if err := validateInplace(sch, schPath); err != nil { - errors = append(errors, validationError(schPath, "allOf failed").add(err)) - } - } - - if len(s.AnyOf) > 0 { - matched := false - var causes []error - for i, sch := range s.AnyOf { - if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil { - matched = true - } else { - causes = append(causes, err) - } - } - if !matched { - errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...)) - } - } - - if len(s.OneOf) > 0 { - matched := -1 - var causes []error - for i, sch := range s.OneOf { - if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil { - if matched == -1 { - matched = i - } else { - errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i)) - break - } - } else { - causes = append(causes, err) - } - } - if matched == -1 { - errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...)) - } - } - - // if + then + else - if s.If != nil { - err := validateInplace(s.If, "if") - // "if" leaves dynamic scope - scope[len(scope)-1].discard = true - if err == nil { - if s.Then != nil { - if err := validateInplace(s.Then, "then"); err != nil { - errors = append(errors, validationError("then", "if-then failed").add(err)) - } - } - } else { - if s.Else != nil { - if err := validateInplace(s.Else, "else"); err != nil { - errors = append(errors, validationError("else", "if-else failed").add(err)) - } - } - } - // restore dynamic scope - scope[len(scope)-1].discard = false - } - - for _, ext := range s.Extensions { - if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil { - errors = append(errors, err) - } - } - - // unevaluatedProperties + unevaluatedItems - switch v := v.(type) { - case map[string]interface{}: - if s.UnevaluatedProperties != nil { - for pname := range result.unevalProps { - if pvalue, ok := v[pname]; ok { - if err := validate(s.UnevaluatedProperties, "unevaluatedProperties", pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - result.unevalProps = nil - } - case []interface{}: - if s.UnevaluatedItems != nil { - for i := range result.unevalItems { - if err := validate(s.UnevaluatedItems, "unevaluatedItems", v[i], strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } - result.unevalItems = nil - } - } - - switch len(errors) { - case 0: - return result, nil - case 1: - return result, errors[0] - default: - return result, validationError("", "").add(errors...) // empty message, used just for wrapping - } -} - -type validationResult struct { - unevalProps map[string]struct{} - unevalItems map[int]struct{} -} - -func (vr validationResult) unevalPnames() string { - pnames := make([]string, 0, len(vr.unevalProps)) - for pname := range vr.unevalProps { - pnames = append(pnames, quote(pname)) - } - return strings.Join(pnames, ", ") -} - -// jsonType returns the json type of given value v. -// -// It panics if the given value is not valid json value -func jsonType(v interface{}) string { - switch v.(type) { - case nil: - return "null" - case bool: - return "boolean" - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - return "number" - case string: - return "string" - case []interface{}: - return "array" - case map[string]interface{}: - return "object" - } - panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) -} - -// equals tells if given two json values are equal or not. -func equals(v1, v2 interface{}) bool { - v1Type := jsonType(v1) - if v1Type != jsonType(v2) { - return false - } - switch v1Type { - case "array": - arr1, arr2 := v1.([]interface{}), v2.([]interface{}) - if len(arr1) != len(arr2) { - return false - } - for i := range arr1 { - if !equals(arr1[i], arr2[i]) { - return false - } - } - return true - case "object": - obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) - if len(obj1) != len(obj2) { - return false - } - for k, v1 := range obj1 { - if v2, ok := obj2[k]; ok { - if !equals(v1, v2) { - return false - } - } else { - return false - } - } - return true - case "number": - num1, _ := new(big.Rat).SetString(fmt.Sprint(v1)) - num2, _ := new(big.Rat).SetString(fmt.Sprint(v2)) - return num1.Cmp(num2) == 0 - default: - return v1 == v2 - } -} - -func hash(v interface{}, h *maphash.Hash) { - switch v := v.(type) { - case nil: - h.WriteByte(0) - case bool: - h.WriteByte(1) - if v { - h.WriteByte(1) - } else { - h.WriteByte(0) - } - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - h.WriteByte(2) - num, _ := new(big.Rat).SetString(fmt.Sprint(v)) - h.Write(num.Num().Bytes()) - h.Write(num.Denom().Bytes()) - case string: - h.WriteByte(3) - h.WriteString(v) - case []interface{}: - h.WriteByte(4) - for _, item := range v { - hash(item, h) - } - case map[string]interface{}: - h.WriteByte(5) - props := make([]string, 0, len(v)) - for prop := range v { - props = append(props, prop) - } - sort.Slice(props, func(i, j int) bool { - return props[i] < props[j] - }) - for _, prop := range props { - hash(prop, h) - hash(v[prop], h) - } - default: - panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) - } -} - -// escape converts given token to valid json-pointer token -func escape(token string) string { - token = strings.ReplaceAll(token, "~", "~0") - token = strings.ReplaceAll(token, "/", "~1") - return url.PathEscape(token) -} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go index f9f0587344..c9e2dabd11 100644 --- a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go @@ -2,10 +2,7 @@ package analyzer import ( "flag" -<<<<<<< HEAD "fmt" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go/ast" "go/token" "strings" @@ -368,11 +365,7 @@ func checkHTTPMethod(pass *analysis.Pass, basicLit *ast.BasicLit) { key := strings.ToUpper(currentVal) if newVal, ok := mapping.HTTPMethod[key]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -380,11 +373,7 @@ func checkHTTPStatusCode(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.HTTPStatusCode[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -392,11 +381,7 @@ func checkTimeWeekday(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeWeekday[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -404,11 +389,7 @@ func checkTimeMonth(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeMonth[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -416,11 +397,7 @@ func checkTimeLayout(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeLayout[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -428,11 +405,7 @@ func checkCryptoHash(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.CryptoHash[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -440,11 +413,7 @@ func checkRPCDefaultPath(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.RPCDefaultPath[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -454,11 +423,7 @@ func checkSQLIsolationLevel(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.SQLIsolationLevel[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -466,11 +431,7 @@ func checkTLSSignatureScheme(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TLSSignatureScheme[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -478,11 +439,7 @@ func checkConstantKind(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.ConstantKind[currentVal]; ok { -<<<<<<< HEAD report(pass, basicLit, currentVal, newVal) -======= - report(pass, basicLit.Pos(), currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -558,7 +515,6 @@ func getBasicLitValue(basicLit *ast.BasicLit) string { return val.String() } -<<<<<<< HEAD func report(pass *analysis.Pass, rg analysis.Range, currentVal, newVal string) { pass.Report(analysis.Diagnostic{ Pos: rg.Pos(), @@ -571,8 +527,4 @@ func report(pass *analysis.Pass, rg analysis.Range, currentVal, newVal string) { }}, }}, }) -======= -func report(pass *analysis.Pass, pos token.Pos, currentVal, newVal string) { - pass.Reportf(pos, "%q can be replaced by %s", currentVal, newVal) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index 2616f75c23..d04246747c 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -43,12 +43,8 @@ func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]Accepted // If *any* signature is found to be incorrect, it is skipped var acceptedKeys []AcceptedKey usedKeyids := make(map[string]string) -<<<<<<< HEAD unverified_providers := make([]Verifier, len(ev.providers)) copy(unverified_providers, ev.providers) -======= - unverified_providers := ev.providers ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, s := range e.Signatures { sig, err := b64Decode(s.Sig) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go index af0d52d97f..691091af99 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -11,14 +11,10 @@ import ( "os" ) -<<<<<<< HEAD const ( ECDSAKeyType = "ecdsa" ECDSAKeyScheme = "ecdsa-sha2-nistp256" ) -======= -const ECDSAKeyType = "ecdsa" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and // verify signatures using ECDSA keys. @@ -96,14 +92,11 @@ func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { // LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in // a file in the custom securesystemslib format. -<<<<<<< HEAD // // Deprecated: use LoadKey(). The custom serialization format has been // deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go index 34dc8a710b..d954e14b74 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -88,14 +88,11 @@ func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { // LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored // in a file in the custom securesystemslib format. -<<<<<<< HEAD // // Deprecated: use LoadKey(). The custom serialization format has been // deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go index 07a7710be6..2abfcb27c4 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -94,14 +94,11 @@ func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { // LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a // file. -<<<<<<< HEAD // // Deprecated: use LoadKey(). The custom serialization format has been // deprecated. Use // https://github.com/secure-systems-lab/securesystemslib/blob/main/docs/migrate_key.py // to convert your key. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { contents, err := os.ReadFile(path) if err != nil { @@ -111,7 +108,6 @@ func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { return LoadRSAPSSKeyFromBytes(contents) } -<<<<<<< HEAD // LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This // byte array should represent a PEM encoded RSA key, as PEM encoding is // required. The function returns an SSLibKey instance, which is a struct that @@ -119,11 +115,6 @@ func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { // // Deprecated: use LoadKey() for all key types, RSA is no longer the only key // that uses PEM serialization. -======= -// LoadRSAPSSKeyFromBytes is a function that takes a byte array as input. This byte array should represent a PEM encoded RSA key, as PEM encoding is required. -// The function returns an SSLibKey instance, which is a struct that holds the key data. - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadRSAPSSKeyFromBytes(contents []byte) (*SSLibKey, error) { pemData, keyObj, err := decodeAndParsePEM(contents) if err != nil { diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go index 03ae596d43..3a8259dfda 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -1,7 +1,6 @@ package signerverifier import ( -<<<<<<< HEAD "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -9,9 +8,6 @@ import ( "encoding/hex" "errors" "strings" -======= - "errors" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var KeyIDHashAlgorithms = []string{"sha256", "sha512"} @@ -22,10 +18,7 @@ var ( ErrUnknownKeyType = errors.New("unknown key type") ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") ErrInvalidKey = errors.New("key object has no value") -<<<<<<< HEAD ErrInvalidPEM = errors.New("unable to parse PEM block") -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -48,7 +41,6 @@ type KeyVal struct { Identity string `json:"identity,omitempty"` Issuer string `json:"issuer,omitempty"` } -<<<<<<< HEAD // LoadKey returns an SSLibKey object when provided a PEM encoded key. // Currently, RSA, ED25519, and ECDSA keys are supported. @@ -152,5 +144,3 @@ func LoadKey(keyBytes []byte) (*SSLibKey, error) { return key, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go index c82c9dde5b..e8a30b59fa 100644 --- a/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -27,12 +27,9 @@ var ( // LoadKeyFromSSLibBytes returns a pointer to a Key instance created from the // contents of the bytes. The key contents are expected to be in the custom // securesystemslib format. -<<<<<<< HEAD // // Deprecated: use LoadKey() for all key types, RSA is no longer the only key // that uses PEM serialization. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func LoadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { var key *SSLibKey if err := json.Unmarshal(contents, &key); err != nil { diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go index 5b388c8ee0..4a6d753cc8 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio/fulcio.go @@ -24,10 +24,7 @@ import ( "os" "strings" -<<<<<<< HEAD "github.com/go-jose/go-jose/v3/jwt" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/privacy" "github.com/sigstore/cosign/v2/internal/pkg/cosign/fulcio/fulcioroots" @@ -37,10 +34,6 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/oauthflow" "github.com/sigstore/sigstore/pkg/signature" -<<<<<<< HEAD -======= - "go.step.sm/crypto/jose" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/term" ) @@ -216,11 +209,7 @@ func NewClient(fulcioURL string) (api.LegacyClient, error) { // or a path to an identity token via the --identity-token flag func idToken(s string) (string, error) { // If this is a valid raw token or is empty, just return it -<<<<<<< HEAD if _, err := jwt.ParseSigned(s); err == nil || s == "" { -======= - if _, err := jose.ParseSigned(s); err == nil || s == "" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return s, nil } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go index c94ad478d9..9408186371 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/copy.go @@ -35,11 +35,7 @@ func (o *CopyOptions) AddFlags(cmd *cobra.Command) { o.Registry.AddFlags(cmd) cmd.Flags().StringVar(&o.CopyOnly, "only", "", -<<<<<<< HEAD "custom string array to only copy specific items, this flag is comma delimited. ex: --only=sig,att,sbom") -======= - "custom string array to only copy specific items, this flag is comma delimited. ex: --only=sbom,sign,att") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cmd.Flags().BoolVar(&o.SignatureOnly, "sig-only", false, "[DEPRECATED] only copy the image signature") diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go index 38bfd617eb..39900375f9 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/deprecate.go @@ -19,11 +19,8 @@ const SBOMAttachmentDeprecation = "WARNING: SBOM attachments are deprecated " + "and support will be removed in a Cosign release soon after 2024-02-22 " + "(see https://github.com/sigstore/cosign/issues/2755). " + "Instead, please use SBOM attestations." -<<<<<<< HEAD const RootWithoutChecksumDeprecation = "WARNING: Fetching initial root from URL " + "without providing its checksum is deprecated and will be disallowed in " + "a future Cosign release. Please provide the initial root checksum " + "via the --root-checksum argument." -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go index 38f7d3c05a..9af970e0ad 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/initialize.go @@ -22,14 +22,9 @@ import ( // InitializeOptions is the top level wrapper for the initialize command. type InitializeOptions struct { -<<<<<<< HEAD Mirror string Root string RootChecksum string -======= - Mirror string - Root string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var _ Interface = (*InitializeOptions)(nil) @@ -42,10 +37,7 @@ func (o *InitializeOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Root, "root", "", "path to trusted initial root. defaults to embedded root") _ = cmd.Flags().SetAnnotation("root", cobra.BashCompSubdirsInDir, []string{}) -<<<<<<< HEAD cmd.Flags().StringVar(&o.RootChecksum, "root-checksum", "", "checksum of the initial root, required if root is downloaded via http(s). expects sha256 by default, can be changed to sha512 by providing sha512:") -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go index 99d90d7293..9d22b4ea8b 100644 --- a/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go +++ b/vendor/github.com/sigstore/cosign/v2/cmd/cosign/cli/options/registry.go @@ -17,18 +17,12 @@ package options import ( "context" "crypto/tls" -<<<<<<< HEAD "crypto/x509" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "errors" "fmt" "io" "net/http" -<<<<<<< HEAD "os" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ecr "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" "github.com/chrismellard/docker-credential-acr-env/pkg/credhelper" @@ -53,13 +47,10 @@ type RegistryOptions struct { RefOpts ReferenceOptions Keychain Keychain AuthConfig authn.AuthConfig -<<<<<<< HEAD RegistryCACert string RegistryClientCert string RegistryClientKey string RegistryServerName string -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RegistryClientOpts allows overriding the result of GetRegistryClientOpts. RegistryClientOpts []remote.Option @@ -87,7 +78,6 @@ func (o *RegistryOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.AuthConfig.RegistryToken, "registry-token", "", "registry bearer auth token") -<<<<<<< HEAD cmd.Flags().StringVar(&o.RegistryCACert, "registry-cacert", "", "path to the X.509 CA certificate file in PEM format to be used for the connection to the registry") @@ -100,8 +90,6 @@ func (o *RegistryOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.RegistryServerName, "registry-server-name", "", "SAN name to use as the 'ServerName' tls.Config field to verify the mTLS connection to the registry") -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) o.RefOpts.AddFlags(cmd) } @@ -161,16 +149,11 @@ func (o *RegistryOptions) GetRegistryClientOpts(ctx context.Context) []remote.Op opts = append(opts, remote.WithAuthFromKeychain(authn.DefaultKeychain)) } -<<<<<<< HEAD tlsConfig, err := o.getTLSConfig() if err == nil { tr := http.DefaultTransport.(*http.Transport).Clone() tr.TLSClientConfig = tlsConfig opts = append(opts, remote.WithTransport(tr)) -======= - if o.AllowInsecure { - opts = append(opts, remote.WithTransport(&http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}})) // #nosec G402 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Reuse a remote.Pusher and a remote.Puller for all operations that use these opts. @@ -231,7 +214,6 @@ func (o *RegistryExperimentalOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().Var(&o.RegistryReferrersMode, "registry-referrers-mode", "mode for fetching references from the registry. allowed: legacy, oci-1-1") } -<<<<<<< HEAD func (o *RegistryOptions) getTLSConfig() (*tls.Config, error) { var tlsConfig tls.Config @@ -270,5 +252,3 @@ func (o *RegistryOptions) getTLSConfig() (*tls.Config, error) { return &tlsConfig, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go b/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go index 0a3b24e7df..8ee624e93a 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/blob/load.go @@ -15,12 +15,9 @@ package blob import ( -<<<<<<< HEAD "crypto/sha256" "crypto/sha512" "encoding/hex" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "io" "net/http" @@ -78,7 +75,6 @@ func LoadFileOrURL(fileRef string) ([]byte, error) { } return raw, nil } -<<<<<<< HEAD func LoadFileOrURLWithChecksum(fileRef string, checksum string) ([]byte, error) { checksumParts := strings.Split(checksum, ":") @@ -111,5 +107,3 @@ func LoadFileOrURLWithChecksum(fileRef string, checksum string) ([]byte, error) return fileContent, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go index c5424e6102..b124691321 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/git/gitlab/gitlab.go @@ -24,11 +24,7 @@ import ( "github.com/sigstore/cosign/v2/internal/ui" "github.com/sigstore/cosign/v2/pkg/cosign" "github.com/sigstore/cosign/v2/pkg/cosign/env" -<<<<<<< HEAD gitlab "gitlab.com/gitlab-org/api/client-go" -======= - "github.com/xanzy/go-gitlab" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go index cad3d24ef9..3a6ee79b46 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/cosign/verify.go @@ -34,11 +34,7 @@ import ( "strings" "time" -<<<<<<< HEAD "errors" -======= - "github.com/pkg/errors" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/digitorus/timestamp" "github.com/go-openapi/runtime" @@ -156,11 +152,8 @@ type CheckOpts struct { TSARootCertificates []*x509.Certificate // TSAIntermediateCertificates are the set of intermediates for chain building TSAIntermediateCertificates []*x509.Certificate -<<<<<<< HEAD // UseSignedTimestamps enables timestamp verification using a TSA UseSignedTimestamps bool -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IgnoreTlog skip tlog verification IgnoreTlog bool @@ -675,7 +668,6 @@ func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, bundleVerified bool, err error) { var acceptableRFC3161Time, acceptableRekorBundleTime *time.Time // Timestamps for the signature we accept, or nil if not applicable. -<<<<<<< HEAD var acceptableRFC3161Timestamp *timestamp.Timestamp if co.UseSignedTimestamps { acceptableRFC3161Timestamp, err = VerifyRFC3161Timestamp(sig, co) @@ -685,14 +677,6 @@ func verifyInternal(ctx context.Context, sig oci.Signature, h v1.Hash, if acceptableRFC3161Timestamp != nil { acceptableRFC3161Time = &acceptableRFC3161Timestamp.Time } -======= - acceptableRFC3161Timestamp, err := VerifyRFC3161Timestamp(sig, co) - if err != nil { - return false, fmt.Errorf("unable to verify RFC3161 timestamp bundle: %w", err) - } - if acceptableRFC3161Timestamp != nil { - acceptableRFC3161Time = &acceptableRFC3161Timestamp.Time ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if !co.IgnoreTlog { diff --git a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go index 1c7243ba96..6eeaadd010 100644 --- a/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go +++ b/vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/options.go @@ -113,7 +113,6 @@ func WithRemoteOptions(opts ...remote.Option) Option { } } -<<<<<<< HEAD // WithMoreRemoteOptions is a functional option for adding to the default // remote options already specified func WithMoreRemoteOptions(opts ...remote.Option) Option { @@ -122,8 +121,6 @@ func WithMoreRemoteOptions(opts ...remote.Option) Option { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithTargetRepository is a functional option for overriding the default // target repository hosting the signature and attestation tags. func WithTargetRepository(repo name.Repository) Option { diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go index ab346e1e99..80198530de 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1/sigstore_bundle.pb.go @@ -14,13 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 -======= -// protoc-gen-go v1.28.1 -// protoc v3.21.6 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: sigstore_bundle.proto package v1 @@ -47,40 +42,22 @@ const ( // Currently only RFC3161 signatures are provided. More formats may be added // in the future. type TimestampVerificationData struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A list of RFC3161 signed timestamps provided by the user. // This can be used when the entry has not been stored on a // transparency log, or in conjunction for a stronger trust model. // Clients MUST verify the hashed message in the message imprint // against the signature in the bundle. Rfc3161Timestamps []*v1.RFC3161SignedTimestamp `protobuf:"bytes,1,rep,name=rfc3161_timestamps,json=rfc3161Timestamps,proto3" json:"rfc3161_timestamps,omitempty"` -<<<<<<< HEAD unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimestampVerificationData) Reset() { *x = TimestampVerificationData{} -<<<<<<< HEAD mi := &file_sigstore_bundle_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_bundle_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimestampVerificationData) String() string { @@ -91,11 +68,7 @@ func (*TimestampVerificationData) ProtoMessage() {} func (x *TimestampVerificationData) ProtoReflect() protoreflect.Message { mi := &file_sigstore_bundle_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -127,14 +100,7 @@ func (x *TimestampVerificationData) GetRfc3161Timestamps() []*v1.RFC3161SignedTi // the key identifier, it MUST match the `keyid` field of the signature the // extension is attached to. type VerificationMaterial struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The key material for verification purposes. // // This allows key material to be conveyed in one of three forms: @@ -169,11 +135,7 @@ type VerificationMaterial struct { // When used in a `0.3` bundle with the PGI and "keyless" signing, // form (3) MUST be used. // -<<<<<<< HEAD // Types that are valid to be assigned to Content: -======= - // Types that are assignable to Content: ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // *VerificationMaterial_PublicKey // *VerificationMaterial_X509CertificateChain @@ -190,26 +152,15 @@ type VerificationMaterial struct { // Timestamp may also come from // tlog_entries.inclusion_promise.signed_entry_timestamp. TimestampVerificationData *TimestampVerificationData `protobuf:"bytes,4,opt,name=timestamp_verification_data,json=timestampVerificationData,proto3" json:"timestamp_verification_data,omitempty"` -<<<<<<< HEAD unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerificationMaterial) Reset() { *x = VerificationMaterial{} -<<<<<<< HEAD mi := &file_sigstore_bundle_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_bundle_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *VerificationMaterial) String() string { @@ -220,11 +171,7 @@ func (*VerificationMaterial) ProtoMessage() {} func (x *VerificationMaterial) ProtoReflect() protoreflect.Message { mi := &file_sigstore_bundle_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -239,57 +186,36 @@ func (*VerificationMaterial) Descriptor() ([]byte, []int) { return file_sigstore_bundle_proto_rawDescGZIP(), []int{1} } -<<<<<<< HEAD func (x *VerificationMaterial) GetContent() isVerificationMaterial_Content { if x != nil { return x.Content -======= -func (m *VerificationMaterial) GetContent() isVerificationMaterial_Content { - if m != nil { - return m.Content ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *VerificationMaterial) GetPublicKey() *v1.PublicKeyIdentifier { -<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*VerificationMaterial_PublicKey); ok { return x.PublicKey } -======= - if x, ok := x.GetContent().(*VerificationMaterial_PublicKey); ok { - return x.PublicKey ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *VerificationMaterial) GetX509CertificateChain() *v1.X509CertificateChain { -<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*VerificationMaterial_X509CertificateChain); ok { return x.X509CertificateChain } -======= - if x, ok := x.GetContent().(*VerificationMaterial_X509CertificateChain); ok { - return x.X509CertificateChain ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *VerificationMaterial) GetCertificate() *v1.X509Certificate { -<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*VerificationMaterial_Certificate); ok { return x.Certificate } -======= - if x, ok := x.GetContent().(*VerificationMaterial_Certificate); ok { - return x.Certificate ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -331,14 +257,7 @@ func (*VerificationMaterial_X509CertificateChain) isVerificationMaterial_Content func (*VerificationMaterial_Certificate) isVerificationMaterial_Content() {} type Bundle struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MUST be application/vnd.dev.sigstore.bundle.v0.3+json when // when encoded as JSON. // Clients must to be able to accept media type using the previously @@ -357,7 +276,6 @@ type Bundle struct { // MUST be exactly the same in the verification material and in the // DSSE envelope. VerificationMaterial *VerificationMaterial `protobuf:"bytes,2,opt,name=verification_material,json=verificationMaterial,proto3" json:"verification_material,omitempty"` -<<<<<<< HEAD // Types that are valid to be assigned to Content: // // *Bundle_MessageSignature @@ -365,28 +283,13 @@ type Bundle struct { Content isBundle_Content `protobuf_oneof:"content"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - // Types that are assignable to Content: - // - // *Bundle_MessageSignature - // *Bundle_DsseEnvelope - Content isBundle_Content `protobuf_oneof:"content"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bundle) Reset() { *x = Bundle{} -<<<<<<< HEAD mi := &file_sigstore_bundle_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_bundle_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Bundle) String() string { @@ -397,11 +300,7 @@ func (*Bundle) ProtoMessage() {} func (x *Bundle) ProtoReflect() protoreflect.Message { mi := &file_sigstore_bundle_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -430,43 +329,27 @@ func (x *Bundle) GetVerificationMaterial() *VerificationMaterial { return nil } -<<<<<<< HEAD func (x *Bundle) GetContent() isBundle_Content { if x != nil { return x.Content -======= -func (m *Bundle) GetContent() isBundle_Content { - if m != nil { - return m.Content ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *Bundle) GetMessageSignature() *v1.MessageSignature { -<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*Bundle_MessageSignature); ok { return x.MessageSignature } -======= - if x, ok := x.GetContent().(*Bundle_MessageSignature); ok { - return x.MessageSignature ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *Bundle) GetDsseEnvelope() *dsse.Envelope { -<<<<<<< HEAD if x != nil { if x, ok := x.Content.(*Bundle_DsseEnvelope); ok { return x.DsseEnvelope } -======= - if x, ok := x.GetContent().(*Bundle_DsseEnvelope); ok { - return x.DsseEnvelope ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -485,17 +368,10 @@ type Bundle_DsseEnvelope struct { // supported and expected type. This is part of the DSSE // protocol which is defined here: // -<<<<<<< HEAD // DSSE envelopes in a bundle MUST have exactly one signature. // This is a limitation from the DSSE spec, as it can contain // multiple signatures. There are two primary reasons: // 1. It simplifies the verification logic and policy -======= - // DSSE envelopes in a bundle MUST have exactly one signture. - // This is a limitation from the DSSE spec, as it can contain - // multiple signatures. There are two primary reasons: - // 1. It simplfies the verification logic and policy ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // 2. The bundle (currently) can only contain a single // instance of the required verification materials // @@ -603,11 +479,7 @@ func file_sigstore_bundle_proto_rawDescGZIP() []byte { } var file_sigstore_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -<<<<<<< HEAD var file_sigstore_bundle_proto_goTypes = []any{ -======= -var file_sigstore_bundle_proto_goTypes = []interface{}{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*TimestampVerificationData)(nil), // 0: dev.sigstore.bundle.v1.TimestampVerificationData (*VerificationMaterial)(nil), // 1: dev.sigstore.bundle.v1.VerificationMaterial (*Bundle)(nil), // 2: dev.sigstore.bundle.v1.Bundle @@ -641,58 +513,12 @@ func file_sigstore_bundle_proto_init() { if File_sigstore_bundle_proto != nil { return } -<<<<<<< HEAD file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []any{ -======= - if !protoimpl.UnsafeEnabled { - file_sigstore_bundle_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimestampVerificationData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_bundle_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerificationMaterial); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_bundle_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bundle); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_sigstore_bundle_proto_msgTypes[1].OneofWrappers = []interface{}{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*VerificationMaterial_PublicKey)(nil), (*VerificationMaterial_X509CertificateChain)(nil), (*VerificationMaterial_Certificate)(nil), } -<<<<<<< HEAD file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []any{ -======= - file_sigstore_bundle_proto_msgTypes[2].OneofWrappers = []interface{}{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*Bundle_MessageSignature)(nil), (*Bundle_DsseEnvelope)(nil), } diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go index 6c8639f035..2c5c99efde 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -14,13 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 -======= -// protoc-gen-go v1.28.1 -// protoc v3.21.6 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: sigstore_common.proto package v1 @@ -128,7 +123,6 @@ const ( PublicKeyDetails_PUBLIC_KEY_DETAILS_UNSPECIFIED PublicKeyDetails = 0 // RSA // -<<<<<<< HEAD // Deprecated: Marked as deprecated in sigstore_common.proto. PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 // Deprecated: Marked as deprecated in sigstore_common.proto. @@ -136,15 +130,6 @@ const ( // Deprecated: Marked as deprecated in sigstore_common.proto. PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 // Deprecated: Marked as deprecated in sigstore_common.proto. -======= - // Deprecated: Do not use. - PublicKeyDetails_PKCS1_RSA_PKCS1V5 PublicKeyDetails = 1 // See RFC8017 - // Deprecated: Do not use. - PublicKeyDetails_PKCS1_RSA_PSS PublicKeyDetails = 2 // See RFC8017 - // Deprecated: Do not use. - PublicKeyDetails_PKIX_RSA_PKCS1V5 PublicKeyDetails = 3 - // Deprecated: Do not use. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PublicKeyDetails_PKIX_RSA_PSS PublicKeyDetails = 4 // RSA public key in PKIX format, PKCS#1v1.5 signature PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256 PublicKeyDetails = 9 @@ -156,11 +141,7 @@ const ( PublicKeyDetails_PKIX_RSA_PSS_4096_SHA256 PublicKeyDetails = 18 // ECDSA // -<<<<<<< HEAD // Deprecated: Marked as deprecated in sigstore_common.proto. -======= - // Deprecated: Do not use. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) PublicKeyDetails_PKIX_ECDSA_P256_HMAC_SHA_256 PublicKeyDetails = 6 // See RFC6979 PublicKeyDetails_PKIX_ECDSA_P256_SHA_256 PublicKeyDetails = 5 // See NIST FIPS 186-4 PublicKeyDetails_PKIX_ECDSA_P384_SHA_384 PublicKeyDetails = 12 @@ -316,7 +297,6 @@ func (SubjectAlternativeNameType) EnumDescriptor() ([]byte, []int) { // HashOutput captures a digest of a 'message' (generic octet sequence) // and the corresponding hash algorithm used. type HashOutput struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` // This is the raw octets of the message digest as computed by @@ -324,31 +304,13 @@ type HashOutput struct { Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Algorithm HashAlgorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=dev.sigstore.common.v1.HashAlgorithm" json:"algorithm,omitempty"` - // This is the raw octets of the message digest as computed by - // the hash algorithm. - Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HashOutput) Reset() { *x = HashOutput{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HashOutput) String() string { @@ -359,11 +321,7 @@ func (*HashOutput) ProtoMessage() {} func (x *HashOutput) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -394,14 +352,7 @@ func (x *HashOutput) GetDigest() []byte { // MessageSignature stores the computed signature over a message. type MessageSignature struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Message digest can be used to identify the artifact. // Clients MUST NOT attempt to use this digest to verify the associated // signature; it is intended solely for identification. @@ -414,28 +365,16 @@ type MessageSignature struct { // algorithm. // When using a key pair, the algorithm MUST be part of the public // key, which MUST be communicated out-of-band. -<<<<<<< HEAD Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MessageSignature) Reset() { *x = MessageSignature{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MessageSignature) String() string { @@ -446,11 +385,7 @@ func (*MessageSignature) ProtoMessage() {} func (x *MessageSignature) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -481,35 +416,18 @@ func (x *MessageSignature) GetSignature() []byte { // LogId captures the identity of a transparency log. type LogId struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` // The unique identity of the log, represented by its public key. KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The unique identity of the log, represented by its public key. - KeyId []byte `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LogId) Reset() { *x = LogId{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LogId) String() string { @@ -520,11 +438,7 @@ func (*LogId) ProtoMessage() {} func (x *LogId) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -548,37 +462,19 @@ func (x *LogId) GetKeyId() []byte { // This message holds a RFC 3161 timestamp. type RFC3161SignedTimestamp struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` // Signed timestamp is the DER encoded TimeStampResponse. // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Signed timestamp is the DER encoded TimeStampResponse. - // See https://www.rfc-editor.org/rfc/rfc3161.html#section-2.4.2 - SignedTimestamp []byte `protobuf:"bytes,1,opt,name=signed_timestamp,json=signedTimestamp,proto3" json:"signed_timestamp,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RFC3161SignedTimestamp) Reset() { *x = RFC3161SignedTimestamp{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RFC3161SignedTimestamp) String() string { @@ -589,11 +485,7 @@ func (*RFC3161SignedTimestamp) ProtoMessage() {} func (x *RFC3161SignedTimestamp) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -616,42 +508,23 @@ func (x *RFC3161SignedTimestamp) GetSignedTimestamp() []byte { } type PublicKey struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DER-encoded public key, encoding method is specified by the // key_details attribute. RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3,oneof" json:"raw_bytes,omitempty"` // Key encoding and signature algorithm to use for this key. KeyDetails PublicKeyDetails `protobuf:"varint,2,opt,name=key_details,json=keyDetails,proto3,enum=dev.sigstore.common.v1.PublicKeyDetails" json:"key_details,omitempty"` // Optional validity period for this key, *inclusive* of the endpoints. -<<<<<<< HEAD ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - ValidFor *TimeRange `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3,oneof" json:"valid_for,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKey) Reset() { *x = PublicKey{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKey) String() string { @@ -662,11 +535,7 @@ func (*PublicKey) ProtoMessage() {} func (x *PublicKey) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -705,14 +574,7 @@ func (x *PublicKey) GetValidFor() *TimeRange { // PublicKeyIdentifier can be used to identify an (out of band) delivered // key, to verify a signature. type PublicKeyIdentifier struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Optional unauthenticated hint on which key to use. // The format of the hint must be agreed upon out of band by the // signer and the verifiers, and so is not subject to this @@ -722,28 +584,16 @@ type PublicKeyIdentifier struct { // Implementors are RECOMMENDED to derive the value from the public // key as described in RFC 6962. // See: -<<<<<<< HEAD Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Hint string `protobuf:"bytes,1,opt,name=hint,proto3" json:"hint,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKeyIdentifier) Reset() { *x = PublicKeyIdentifier{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PublicKeyIdentifier) String() string { @@ -754,11 +604,7 @@ func (*PublicKeyIdentifier) ProtoMessage() {} func (x *PublicKeyIdentifier) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -782,33 +628,17 @@ func (x *PublicKeyIdentifier) GetHint() string { // An ASN.1 OBJECT IDENTIFIER type ObjectIdentifier struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id []int32 `protobuf:"varint,1,rep,packed,name=id,proto3" json:"id,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifier) Reset() { *x = ObjectIdentifier{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifier) String() string { @@ -819,11 +649,7 @@ func (*ObjectIdentifier) ProtoMessage() {} func (x *ObjectIdentifier) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -847,35 +673,18 @@ func (x *ObjectIdentifier) GetId() []int32 { // An OID and the corresponding (byte) value. type ObjectIdentifierValuePair struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Oid *ObjectIdentifier `protobuf:"bytes,1,opt,name=oid,proto3" json:"oid,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifierValuePair) Reset() { *x = ObjectIdentifierValuePair{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ObjectIdentifierValuePair) String() string { @@ -886,11 +695,7 @@ func (*ObjectIdentifierValuePair) ProtoMessage() {} func (x *ObjectIdentifierValuePair) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -920,35 +725,18 @@ func (x *ObjectIdentifierValuePair) GetValue() []byte { } type DistinguishedName struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Organization string `protobuf:"bytes,1,opt,name=organization,proto3" json:"organization,omitempty"` - CommonName string `protobuf:"bytes,2,opt,name=common_name,json=commonName,proto3" json:"common_name,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DistinguishedName) Reset() { *x = DistinguishedName{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *DistinguishedName) String() string { @@ -959,11 +747,7 @@ func (*DistinguishedName) ProtoMessage() {} func (x *DistinguishedName) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -993,35 +777,18 @@ func (x *DistinguishedName) GetCommonName() string { } type X509Certificate struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` // DER-encoded X.509 certificate. RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // DER-encoded X.509 certificate. - RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes,proto3" json:"raw_bytes,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509Certificate) Reset() { *x = X509Certificate{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509Certificate) String() string { @@ -1032,11 +799,7 @@ func (*X509Certificate) ProtoMessage() {} func (x *X509Certificate) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[9] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1059,7 +822,6 @@ func (x *X509Certificate) GetRawBytes() []byte { } type SubjectAlternativeName struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` // Types that are valid to be assigned to Identity: @@ -1069,33 +831,13 @@ type SubjectAlternativeName struct { Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type SubjectAlternativeNameType `protobuf:"varint,1,opt,name=type,proto3,enum=dev.sigstore.common.v1.SubjectAlternativeNameType" json:"type,omitempty"` - // Types that are assignable to Identity: - // - // *SubjectAlternativeName_Regexp - // *SubjectAlternativeName_Value - Identity isSubjectAlternativeName_Identity `protobuf_oneof:"identity"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *SubjectAlternativeName) Reset() { *x = SubjectAlternativeName{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *SubjectAlternativeName) String() string { @@ -1106,11 +848,7 @@ func (*SubjectAlternativeName) ProtoMessage() {} func (x *SubjectAlternativeName) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[10] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1132,43 +870,27 @@ func (x *SubjectAlternativeName) GetType() SubjectAlternativeNameType { return SubjectAlternativeNameType_SUBJECT_ALTERNATIVE_NAME_TYPE_UNSPECIFIED } -<<<<<<< HEAD func (x *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { if x != nil { return x.Identity -======= -func (m *SubjectAlternativeName) GetIdentity() isSubjectAlternativeName_Identity { - if m != nil { - return m.Identity ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } func (x *SubjectAlternativeName) GetRegexp() string { -<<<<<<< HEAD if x != nil { if x, ok := x.Identity.(*SubjectAlternativeName_Regexp); ok { return x.Regexp } -======= - if x, ok := x.GetIdentity().(*SubjectAlternativeName_Regexp); ok { - return x.Regexp ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return "" } func (x *SubjectAlternativeName) GetValue() string { -<<<<<<< HEAD if x != nil { if x, ok := x.Identity.(*SubjectAlternativeName_Value); ok { return x.Value } -======= - if x, ok := x.GetIdentity().(*SubjectAlternativeName_Value); ok { - return x.Value ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return "" } @@ -1198,41 +920,22 @@ func (*SubjectAlternativeName_Value) isSubjectAlternativeName_Identity() {} // certificate within a TUF root of trust or multiple untrusted certificates for // the purpose of chain building. type X509CertificateChain struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // One or more DER-encoded certificates. // // In some contexts (such as `VerificationMaterial.x509_certificate_chain`), this sequence // has an imposed order. Unless explicitly specified, there is otherwise no // guaranteed order. -<<<<<<< HEAD Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Certificates []*X509Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509CertificateChain) Reset() { *x = X509CertificateChain{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *X509CertificateChain) String() string { @@ -1243,11 +946,7 @@ func (*X509CertificateChain) ProtoMessage() {} func (x *X509CertificateChain) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[11] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1274,35 +973,18 @@ func (x *X509CertificateChain) GetCertificates() []*X509Certificate { // End is optional to be able to capture a period that has started but // has no known end. type TimeRange struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end,proto3,oneof" json:"end,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimeRange) Reset() { *x = TimeRange{} -<<<<<<< HEAD mi := &file_sigstore_common_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_common_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TimeRange) String() string { @@ -1313,11 +995,7 @@ func (*TimeRange) ProtoMessage() {} func (x *TimeRange) ProtoReflect() protoreflect.Message { mi := &file_sigstore_common_proto_msgTypes[12] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1511,11 +1189,7 @@ func file_sigstore_common_proto_rawDescGZIP() []byte { var file_sigstore_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_sigstore_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -<<<<<<< HEAD var file_sigstore_common_proto_goTypes = []any{ -======= -var file_sigstore_common_proto_goTypes = []interface{}{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (HashAlgorithm)(0), // 0: dev.sigstore.common.v1.HashAlgorithm (PublicKeyDetails)(0), // 1: dev.sigstore.common.v1.PublicKeyDetails (SubjectAlternativeNameType)(0), // 2: dev.sigstore.common.v1.SubjectAlternativeNameType @@ -1556,179 +1230,12 @@ func file_sigstore_common_proto_init() { if File_sigstore_common_proto != nil { return } -<<<<<<< HEAD file_sigstore_common_proto_msgTypes[4].OneofWrappers = []any{} file_sigstore_common_proto_msgTypes[10].OneofWrappers = []any{ (*SubjectAlternativeName_Regexp)(nil), (*SubjectAlternativeName_Value)(nil), } file_sigstore_common_proto_msgTypes[12].OneofWrappers = []any{} -======= - if !protoimpl.UnsafeEnabled { - file_sigstore_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HashOutput); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageSignature); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LogId); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RFC3161SignedTimestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PublicKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PublicKeyIdentifier); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectIdentifier); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectIdentifierValuePair); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DistinguishedName); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*X509Certificate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubjectAlternativeName); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*X509CertificateChain); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_common_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimeRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_sigstore_common_proto_msgTypes[4].OneofWrappers = []interface{}{} - file_sigstore_common_proto_msgTypes[10].OneofWrappers = []interface{}{ - (*SubjectAlternativeName_Regexp)(nil), - (*SubjectAlternativeName_Value)(nil), - } - file_sigstore_common_proto_msgTypes[12].OneofWrappers = []interface{}{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go index dea54e9b03..16e581ebe0 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/dsse/envelope.pb.go @@ -14,13 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 -======= -// protoc-gen-go v1.28.1 -// protoc v3.21.6 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: envelope.proto package dsse @@ -41,14 +36,7 @@ const ( // An authenticated message of arbitrary type. type Envelope struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Message to be signed. (In JSON, this is encoded as base64.) // REQUIRED. Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` @@ -66,28 +54,16 @@ type Envelope struct { // "DSSEv1" = ASCII [0x44, 0x53, 0x53, 0x45, 0x76, 0x31] // LEN(s) = ASCII decimal encoding of the byte length of s, with no leading zeros // REQUIRED (length >= 1). -<<<<<<< HEAD Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Signatures []*Signature `protobuf:"bytes,3,rep,name=signatures,proto3" json:"signatures,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Envelope) Reset() { *x = Envelope{} -<<<<<<< HEAD mi := &file_envelope_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_envelope_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Envelope) String() string { @@ -98,11 +74,7 @@ func (*Envelope) ProtoMessage() {} func (x *Envelope) ProtoReflect() protoreflect.Message { mi := &file_envelope_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -139,41 +111,22 @@ func (x *Envelope) GetSignatures() []*Signature { } type Signature struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Signature itself. (In JSON, this is encoded as base64.) // REQUIRED. Sig []byte `protobuf:"bytes,1,opt,name=sig,proto3" json:"sig,omitempty"` // *Unauthenticated* hint identifying which public key was used. // OPTIONAL. -<<<<<<< HEAD Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Keyid string `protobuf:"bytes,2,opt,name=keyid,proto3" json:"keyid,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Signature) Reset() { *x = Signature{} -<<<<<<< HEAD mi := &file_envelope_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_envelope_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Signature) String() string { @@ -184,11 +137,7 @@ func (*Signature) ProtoMessage() {} func (x *Signature) ProtoReflect() protoreflect.Message { mi := &file_envelope_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -253,11 +202,7 @@ func file_envelope_proto_rawDescGZIP() []byte { } var file_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -<<<<<<< HEAD var file_envelope_proto_goTypes = []any{ -======= -var file_envelope_proto_goTypes = []interface{}{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*Envelope)(nil), // 0: io.intoto.Envelope (*Signature)(nil), // 1: io.intoto.Signature } @@ -275,35 +220,6 @@ func file_envelope_proto_init() { if File_envelope_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_envelope_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Envelope); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_envelope_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Signature); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go index fec52e4e05..5874bc29eb 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1/sigstore_rekor.pb.go @@ -14,13 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.36.3 // protoc v5.29.3 -======= -// protoc-gen-go v1.28.1 -// protoc v3.21.6 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // source: sigstore_rekor.proto package v1 @@ -43,40 +38,21 @@ const ( // KindVersion contains the entry's kind and api version. type KindVersion struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Kind is the type of entry being stored in the log. // See here for a list: https://github.com/sigstore/rekor/tree/main/pkg/types Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` // The specific api version of the type. -<<<<<<< HEAD Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KindVersion) Reset() { *x = KindVersion{} -<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_rekor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *KindVersion) String() string { @@ -87,11 +63,7 @@ func (*KindVersion) ProtoMessage() {} func (x *KindVersion) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,33 +103,17 @@ func (x *KindVersion) GetVersion() string { // and https://github.com/C2SP/C2SP/blob/main/tlog-checkpoint.md. // An example implementation can be found in https://github.com/sigstore/rekor/blob/main/pkg/util/signed_note.go type Checkpoint struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Envelope string `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Checkpoint) Reset() { *x = Checkpoint{} -<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_rekor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Checkpoint) String() string { @@ -168,11 +124,7 @@ func (*Checkpoint) ProtoMessage() {} func (x *Checkpoint) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -197,14 +149,7 @@ func (x *Checkpoint) GetEnvelope() string { // InclusionProof is the proof returned from the transparency log. Can // be used for offline or online verification against the log. type InclusionProof struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The index of the entry in the tree it was written to. LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // The hash digest stored at the root of the merkle tree at the time @@ -220,28 +165,16 @@ type InclusionProof struct { Hashes [][]byte `protobuf:"bytes,4,rep,name=hashes,proto3" json:"hashes,omitempty"` // Signature of the tree head, as of the time of this proof was // generated. See above info on 'Checkpoint' for more details. -<<<<<<< HEAD Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - Checkpoint *Checkpoint `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionProof) Reset() { *x = InclusionProof{} -<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_rekor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionProof) String() string { @@ -252,11 +185,7 @@ func (*InclusionProof) ProtoMessage() {} func (x *InclusionProof) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -317,33 +246,17 @@ func (x *InclusionProof) GetCheckpoint() *Checkpoint { // This is used to verify the integration timestamp's value and that the log // has promised to include the entry. type InclusionPromise struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SignedEntryTimestamp []byte `protobuf:"bytes,1,opt,name=signed_entry_timestamp,json=signedEntryTimestamp,proto3" json:"signed_entry_timestamp,omitempty"` ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionPromise) Reset() { *x = InclusionPromise{} -<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_rekor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InclusionPromise) String() string { @@ -354,11 +267,7 @@ func (*InclusionPromise) ProtoMessage() {} func (x *InclusionPromise) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -389,14 +298,7 @@ func (x *InclusionPromise) GetSignedEntryTimestamp() []byte { // the response from Rekor) is similar to a Signed Certificate Timestamp // as described here https://www.rfc-editor.org/rfc/rfc6962.html#section-3.2. type TransparencyLogEntry struct { -<<<<<<< HEAD state protoimpl.MessageState `protogen:"open.v1"` -======= - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The global index of the entry, used when querying the log by index. LogIndex int64 `protobuf:"varint,1,opt,name=log_index,json=logIndex,proto3" json:"log_index,omitempty"` // The unique identifier of the log. @@ -406,7 +308,6 @@ type TransparencyLogEntry struct { // verification. KindVersion *KindVersion `protobuf:"bytes,3,opt,name=kind_version,json=kindVersion,proto3" json:"kind_version,omitempty"` // The UNIX timestamp from the log when the entry was persisted. -<<<<<<< HEAD // The integration time MUST NOT be trusted if inclusion_promise // is omitted. IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` @@ -417,13 +318,6 @@ type TransparencyLogEntry struct { // or the current system time for long-lived certificates). // MUST be verified if no other suitable source of time is present, // and SHOULD be verified otherwise. -======= - IntegratedTime int64 `protobuf:"varint,4,opt,name=integrated_time,json=integratedTime,proto3" json:"integrated_time,omitempty"` - // The inclusion promise/signed entry timestamp from the log. - // Required for v0.1 bundles, and MUST be verified. - // Optional for >= v0.2 bundles, and SHOULD be verified when present. - // Also may be used as a signed timestamp. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) InclusionPromise *InclusionPromise `protobuf:"bytes,5,opt,name=inclusion_promise,json=inclusionPromise,proto3" json:"inclusion_promise,omitempty"` // The inclusion proof can be used for offline or online verification // that the entry was appended to the log, and that the log has not been @@ -448,26 +342,15 @@ type TransparencyLogEntry struct { // If not set, clients are responsible for constructing an equivalent // payload from other sources to verify the signature. CanonicalizedBody []byte `protobuf:"bytes,7,opt,name=canonicalized_body,json=canonicalizedBody,proto3" json:"canonicalized_body,omitempty"` -<<<<<<< HEAD unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TransparencyLogEntry) Reset() { *x = TransparencyLogEntry{} -<<<<<<< HEAD mi := &file_sigstore_rekor_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_sigstore_rekor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *TransparencyLogEntry) String() string { @@ -478,11 +361,7 @@ func (*TransparencyLogEntry) ProtoMessage() {} func (x *TransparencyLogEntry) ProtoReflect() protoreflect.Message { mi := &file_sigstore_rekor_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -634,11 +513,7 @@ func file_sigstore_rekor_proto_rawDescGZIP() []byte { } var file_sigstore_rekor_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -<<<<<<< HEAD var file_sigstore_rekor_proto_goTypes = []any{ -======= -var file_sigstore_rekor_proto_goTypes = []interface{}{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) (*KindVersion)(nil), // 0: dev.sigstore.rekor.v1.KindVersion (*Checkpoint)(nil), // 1: dev.sigstore.rekor.v1.Checkpoint (*InclusionProof)(nil), // 2: dev.sigstore.rekor.v1.InclusionProof @@ -664,71 +539,6 @@ func file_sigstore_rekor_proto_init() { if File_sigstore_rekor_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_sigstore_rekor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KindVersion); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_rekor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Checkpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_rekor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InclusionProof); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_rekor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InclusionPromise); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sigstore_rekor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransparencyLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json index 0aba7f3926..3d536eb49e 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json @@ -47,11 +47,7 @@ }, "required": [ "algorithm", "value" ] } -<<<<<<< HEAD } -======= - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }, "required": [ "signature", "data" ] diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md index 6a36d799da..6bfe65b04e 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/README.md @@ -50,7 +50,6 @@ cosign verify --key azurekms://[Key Vault Name].vault.azure.net/[Key Name] [Cont ## Authentication -<<<<<<< HEAD This module uses the [`DefaultCredential` type](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential) to authenticate. This type supports the following authentication methods: @@ -62,19 +61,6 @@ to authenticate. This type supports the following authentication methods: See the [official documentation]( https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication?tabs=bash) for more information. -======= -There are multiple authentication methods supported for Azure Key Vault and by default they will be evaluated in the following order: - -1. Client credentials (FromEnvironment) -1. Client certificate (FromEnvironment) -1. Username password (FromEnvironment) -1. MSI (FromEnvironment) -1. CLI (FromCLI) - -You can force either `FromEnvironment` or `FromCLI` by configuring the environment variable `AZURE_AUTH_METHOD` to either `environment` or `cli`. - -For backward compatibility, if you configure `AZURE_TENANT_ID`, `AZURE_CLIENT_ID` and `AZURE_CLIENT_SECRET`, `FromEnvironment` will be used. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) If you would like to use a cloud other than the Azure public cloud, configure `AZURE_ENVIRONMENT`. The following values are accepted: - `AZUREUSGOVERNMENT`, `AZUREUSGOVERNMENTCLOUD` uses the Azure US Government Cloud diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go index 1080a11f28..38c788c8c6 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go @@ -33,10 +33,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" -<<<<<<< HEAD -======= - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "github.com/go-jose/go-jose/v3" "github.com/jellydator/ttlcache/v3" @@ -120,7 +116,6 @@ func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { return nil, err } -<<<<<<< HEAD opts := getAzClientOpts() cred, err := azidentity.NewDefaultAzureCredential(&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}) if err != nil { @@ -128,9 +123,6 @@ func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { } client, err := azkeys.NewClient(vaultURL, cred, nil) -======= - client, err := getKeysClient(vaultURL) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("new azure kms client: %w", err) } @@ -148,47 +140,6 @@ func newAzureKMS(keyResourceID string) (*azureVaultClient, error) { return azClient, nil } -<<<<<<< HEAD -======= -type authenticationMethod string - -const ( - unknownAuthenticationMethod = "unknown" - environmentAuthenticationMethod = "environment" - cliAuthenticationMethod = "cli" -) - -// getAuthMethod returns the an authenticationMethod to use to get an Azure Authorizer. -// If no environment variables are set, unknownAuthMethod will be used. -// If the environment variable 'AZURE_AUTH_METHOD' is set to either environment or cli, use it. -// If the environment variables 'AZURE_TENANT_ID', 'AZURE_CLIENT_ID' and 'AZURE_CLIENT_SECRET' are set, use environment. -func getAuthenticationMethod() authenticationMethod { - tenantID := os.Getenv("AZURE_TENANT_ID") - clientID := os.Getenv("AZURE_CLIENT_ID") - clientSecret := os.Getenv("AZURE_CLIENT_SECRET") - authMethod := os.Getenv("AZURE_AUTH_METHOD") - - if authMethod != "" { - switch strings.ToLower(authMethod) { - case "environment": - return environmentAuthenticationMethod - case "cli": - return cliAuthenticationMethod - } - } - - if tenantID != "" && clientID != "" && clientSecret != "" { - return environmentAuthenticationMethod - } - - return unknownAuthenticationMethod -} - -type azureCredential interface { - GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func getAzClientOpts() azcore.ClientOptions { envName := os.Getenv("AZURE_ENVIRONMENT") switch envName { @@ -203,77 +154,6 @@ func getAzClientOpts() azcore.ClientOptions { } } -<<<<<<< HEAD -======= -// getAzureCredential takes an authenticationMethod and returns an Azure credential or an error. -// If the method is unknown, Environment will be tested and if it returns an error CLI will be tested. -// If the method is specified, the specified method will be used and no other will be tested. -// This means the following default order of methods will be used if nothing else is defined: -// 1. Client credentials (FromEnvironment) -// 2. Client certificate (FromEnvironment) -// 3. Username password (FromEnvironment) -// 4. MSI (FromEnvironment) -// 5. CLI (FromCLI) -func getAzureCredential(method authenticationMethod) (azureCredential, error) { - clientOpts := getAzClientOpts() - - switch method { - case environmentAuthenticationMethod: - envCred, err := azidentity.NewEnvironmentCredential(&azidentity.EnvironmentCredentialOptions{ClientOptions: clientOpts}) - if err == nil { - return envCred, nil - } - - o := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: clientOpts} - if ID, ok := os.LookupEnv(azureClientID); ok { - o.ID = azidentity.ClientID(ID) - } - msiCred, err := azidentity.NewManagedIdentityCredential(o) - if err == nil { - return msiCred, nil - } - - return nil, fmt.Errorf("failed to create default azure credential from env auth method: %w", err) - case cliAuthenticationMethod: - cred, err := azidentity.NewAzureCLICredential(nil) - if err != nil { - return nil, fmt.Errorf("failed to create default Azure credential from env auth method: %w", err) - } - return cred, nil - case unknownAuthenticationMethod: - break - default: - return nil, fmt.Errorf("you should never reach this") - } - - envCreds, err := azidentity.NewEnvironmentCredential(&azidentity.EnvironmentCredentialOptions{ClientOptions: clientOpts}) - if err == nil { - return envCreds, nil - } - - cliCreds, err := azidentity.NewAzureCLICredential(nil) - if err != nil { - return nil, fmt.Errorf("failed to create default Azure credential from env auth method: %w", err) - } - return cliCreds, nil -} - -func getKeysClient(vaultURL string) (*azkeys.Client, error) { - authMethod := getAuthenticationMethod() - cred, err := getAzureCredential(authMethod) - if err != nil { - return nil, err - } - - client, err := azkeys.NewClient(vaultURL, cred, nil) - if err != nil { - return nil, err - } - - return client, nil -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { keyBundle, err := a.getKey(ctx) if err != nil { diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go index 41fc883d82..7baf9504b8 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go @@ -19,7 +19,6 @@ package kms import ( "context" "crypto" -<<<<<<< HEAD "errors" "fmt" "os/exec" @@ -27,12 +26,6 @@ import ( "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/kms/cliplugin" -======= - "fmt" - "strings" - - "github.com/sigstore/sigstore/pkg/signature" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ProviderNotFoundError indicates that no matching KMS provider was found @@ -58,7 +51,6 @@ func AddProvider(keyResourceID string, init ProviderInit) { var providersMap = map[string]ProviderInit{} // Get returns a KMS SignerVerifier for the given resource string and hash function. -<<<<<<< HEAD // If no matching built-in provider is found, it will try to use the plugin system as a provider. // It returns a ProviderNotFoundError in these situations: // - keyResourceID doesn't match any of our hard-coded providers' schemas, @@ -80,17 +72,6 @@ func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts . return nil, fmt.Errorf("%w: %w", &ProviderNotFoundError{ref: keyResourceID}, err) } return sv, err -======= -// If no matching provider is found, Get returns a ProviderNotFoundError. It -// also returns an error if initializing the SignerVerifier fails. -func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (SignerVerifier, error) { - for ref, pi := range providersMap { - if strings.HasPrefix(keyResourceID, ref) { - return pi(ctx, keyResourceID, hashFunc, opts...) - } - } - return nil, &ProviderNotFoundError{ref: keyResourceID} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // SupportedProviders returns list of initialized providers diff --git a/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go b/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go index 88aaf389eb..82bebdd2f8 100644 --- a/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go +++ b/vendor/github.com/sigstore/timestamp-authority/pkg/verification/verify.go @@ -250,11 +250,7 @@ func verifyTSRWithChain(ts *timestamp.Timestamp, opts VerifyOpts) error { return fmt.Errorf("error parsing hashed message: %w", err) } -<<<<<<< HEAD if len(opts.Roots) == 0 { -======= - if opts.Roots == nil || len(opts.Roots) == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fmt.Errorf("no root certificates provided for verifying the certificate chain") } diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 9cd8df117c..7c058de374 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -160,11 +160,7 @@ type FlagSet struct { args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- errorHandling ErrorHandling -<<<<<<< HEAD output io.Writer // nil means stderr; use Output() accessor -======= - output io.Writer // nil means stderr; use out() accessor ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName @@ -259,27 +255,20 @@ func (f *FlagSet) normalizeFlagName(name string) NormalizedName { return n(f, name) } -<<<<<<< HEAD // Output returns the destination for usage and error messages. os.Stderr is returned if // output was not set or was set to nil. func (f *FlagSet) Output() io.Writer { -======= -func (f *FlagSet) out() io.Writer { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if f.output == nil { return os.Stderr } return f.output } -<<<<<<< HEAD // Name returns the name of the flag set. func (f *FlagSet) Name() string { return f.name } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { @@ -376,11 +365,7 @@ func (f *FlagSet) ShorthandLookup(name string) *Flag { } if len(name) > 1 { msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) -<<<<<<< HEAD fmt.Fprintf(f.Output(), msg) -======= - fmt.Fprintf(f.out(), msg) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) } c := name[0] @@ -504,11 +489,7 @@ func (f *FlagSet) Set(name, value string) error { } if flag.Deprecated != "" { -<<<<<<< HEAD fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) -======= - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -549,11 +530,7 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() -<<<<<<< HEAD fmt.Fprint(f.Output(), usages) -======= - fmt.Fprint(f.out(), usages) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // defaultIsZeroValue returns true if the default value for this flag represents @@ -788,11 +765,7 @@ func PrintDefaults() { // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { -<<<<<<< HEAD fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) -======= - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) f.PrintDefaults() } @@ -878,11 +851,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { _, alreadyThere := f.formal[normalizedFlagName] if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) -<<<<<<< HEAD fmt.Fprintln(f.Output(), msg) -======= - fmt.Fprintln(f.out(), msg) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { @@ -898,11 +867,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { } if len(flag.Shorthand) > 1 { msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) -<<<<<<< HEAD fmt.Fprintf(f.Output(), msg) -======= - fmt.Fprintf(f.out(), msg) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) } if f.shorthands == nil { @@ -912,11 +877,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { used, alreadyThere := f.shorthands[c] if alreadyThere { msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) -<<<<<<< HEAD fmt.Fprintf(f.Output(), msg) -======= - fmt.Fprintf(f.out(), msg) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) panic(msg) } f.shorthands[c] = flag @@ -955,11 +916,7 @@ func VarP(value Value, name, shorthand, usage string) { func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) if f.errorHandling != ContinueOnError { -<<<<<<< HEAD fmt.Fprintln(f.Output(), err) -======= - fmt.Fprintln(f.out(), err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) f.usage() } return err @@ -1110,11 +1067,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } if flag.ShorthandDeprecated != "" { -<<<<<<< HEAD fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) -======= - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } err = fn(flag, value) diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 41c2bce29a..06b8bcb572 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -16,12 +16,9 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { -<<<<<<< HEAD if s == "" { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 0ab2b13385..d1ff0a96ba 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -31,15 +31,7 @@ func (s *stringArrayValue) Append(val string) error { func (s *stringArrayValue) Replace(val []string) error { out := make([]string, len(val)) for i, d := range val { -<<<<<<< HEAD out[i] = d -======= - var err error - out[i] = d - if err != nil { - return err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } *s.value = out return nil diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go index 7f211724b7..ebd3cacd47 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go @@ -3,10 +3,7 @@ package jwtbundle import ( "crypto" "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "os" "sync" @@ -73,11 +70,7 @@ func Parse(trustDomain spiffeid.TrustDomain, bundleBytes []byte) (*Bundle, error bundle := New(trustDomain) for i, key := range jwks.Keys { if err := bundle.AddJWTAuthority(key.KeyID, key.Key); err != nil { -<<<<<<< HEAD return nil, jwtbundleErr.New("error adding authority %d of JWKS: %v", i, errors.Unwrap(err)) -======= - return nil, jwtbundleErr.New("error adding authority %d of JWKS: %v", i, errs.Unwrap(err)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go index 44c67b1fd3..394878e1b2 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go @@ -5,29 +5,17 @@ // // You can create a new bundle for a specific trust domain: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := jwtbundle.New(td) // // Or you can load it from disk: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := jwtbundle.Load(td, "bundle.jwks") // // The bundle can be initialized with JWT authorities: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // var jwtAuthorities map[string]crypto.PublicKey = ... // bundle := jwtbundle.FromJWTAuthorities(td, jwtAuthorities) // diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go index 9f807acfe2..13b103e24c 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go @@ -4,10 +4,7 @@ import ( "crypto" "crypto/x509" "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "io" "os" "sync" @@ -110,11 +107,7 @@ func Parse(trustDomain spiffeid.TrustDomain, bundleBytes []byte) (*Bundle, error bundle.AddX509Authority(key.Certificates[0]) case jwtSVIDUse: if err := bundle.AddJWTAuthority(key.KeyID, key.Key); err != nil { -<<<<<<< HEAD return nil, spiffebundleErr.New("error adding authority %d of JWKS: %v", i, errors.Unwrap(err)) -======= - return nil, spiffebundleErr.New("error adding authority %d of JWKS: %v", i, errs.Unwrap(err)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go index 02ac3bf510..db9dcde31f 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go @@ -5,29 +5,17 @@ // // You can create a new bundle for a specific trust domain: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := spiffebundle.New(td) // // Or you can load it from disk: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := spiffebundle.Load(td, "bundle.json") // // The bundle can be initialized with X.509 or JWT authorities: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // var x509Authorities []*x509.Certificate = ... // bundle := spiffebundle.FromX509Authorities(td, x509Authorities) diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go index 40869fb265..889554f822 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go @@ -5,29 +5,17 @@ // // You can create a new bundle for a specific trust domain: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := x509bundle.New(td) // // Or you can load it from disk: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // bundle := x509bundle.Load(td, "bundle.pem") // // The bundle can be initialized with X.509 authorities: // -<<<<<<< HEAD // td := spiffeid.RequireTrustDomainFromString("example.org") -======= -// td := spiffeid.RequireTrustDomain("example.org") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // var x509Authorities []*x509.Certificate = ... // bundle := x509bundle.FromX509Authorities(td, x509Authorities) // diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go index 147f5feb1b..a0039b114c 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go @@ -22,7 +22,6 @@ func GetDefaultAddress() (string, bool) { // a Workload API endpoint exposed as either a Unix // Domain Socket or TCP socket. func ValidateAddress(addr string) error { -<<<<<<< HEAD _, err := TargetFromAddress(addr) return err } @@ -30,15 +29,6 @@ func ValidateAddress(addr string) error { // TargetFromAddress parses the endpoint address and returns a gRPC target // string for dialing. func TargetFromAddress(addr string) (string, error) { -======= - _, err := parseTargetFromStringAddr(addr) - return err -} - -// parseTargetFromStringAddr parses the endpoint address and returns a gRPC target -// string for dialing. -func parseTargetFromStringAddr(addr string) (string, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) u, err := url.Parse(addr) if err != nil { return "", errors.New("workload endpoint socket is not a valid URI: " + err.Error()) diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go index 9e11950062..58738b42e4 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go @@ -24,10 +24,6 @@ func (c *Client) setAddress() error { } var err error -<<<<<<< HEAD c.config.address, err = TargetFromAddress(c.config.address) -======= - c.config.address, err = parseTargetFromStringAddr(c.config.address) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go index 49f8031a51..0a14266682 100644 --- a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go @@ -45,11 +45,7 @@ func (c *Client) setAddress() error { c.config.dialOptions = append(c.config.dialOptions, grpc.WithContextDialer(winio.DialPipeContext)) } -<<<<<<< HEAD c.config.address, err = TargetFromAddress(c.config.address) -======= - c.config.address, err = parseTargetFromStringAddr(c.config.address) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return err } diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go index 6dcd54c9b0..ce57a4c210 100644 --- a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -29,11 +29,7 @@ func run(pass *analysis.Pass) (interface{}, error) { callExpr := node.(*ast.CallExpr) if p, f, ok := getCallExprFunction(callExpr); ok && p == "fmt" && f == "Sprintf" { if err := checkForHostPortConstruction(callExpr); err != nil { -<<<<<<< HEAD pass.Reportf(node.Pos(), "%s", err.Error()) -======= - pass.Reportf(node.Pos(), err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } }) @@ -56,11 +52,7 @@ func getCallExprFunction(callExpr *ast.CallExpr) (pkg string, fn string, result // getStringLiteral returns the value at a position if it's a string literal. func getStringLiteral(args []ast.Expr, pos int) (string, bool) { -<<<<<<< HEAD if len(args) < pos+1 { -======= - if len(args) < pos + 1 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "", false } @@ -80,15 +72,9 @@ func getStringLiteral(args []ast.Expr, pos int) (string, bool) { // essentially scheme://%s:, or scheme://user:pass@%s:. // // Matching requirements: -<<<<<<< HEAD // - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) // - A format string substitution in the host portion, preceded by an optional username/password@ // - A colon indicating a port will be specified -======= -// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) -// - A format string substitution in the host portion, preceded by an optional username/password@ -// - A colon indicating a port will be specified ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func checkForHostPortConstruction(sprintf *ast.CallExpr) error { fs, ok := getStringLiteral(sprintf.Args, 0) if !ok { @@ -107,8 +93,4 @@ func checkForHostPortConstruction(sprintf *ast.CallExpr) error { } return nil -<<<<<<< HEAD } -======= -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index 8bb51e9da1..3bb5e02ab5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -51,12 +51,9 @@ const ( DefaultImagePullBackOffTimeout = 0 * time.Minute -<<<<<<< HEAD // Default maximum resolution timeout used by the resolution controller before timing out when exceeded DefaultMaximumResolutionTimeout = 1 * time.Minute -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defaultTimeoutMinutesKey = "default-timeout-minutes" defaultServiceAccountKey = "default-service-account" defaultManagedByLabelValueKey = "default-managed-by-label-value" @@ -69,10 +66,7 @@ const ( defaultResolverTypeKey = "default-resolver-type" defaultContainerResourceRequirementsKey = "default-container-resource-requirements" defaultImagePullBackOffTimeout = "default-imagepullbackoff-timeout" -<<<<<<< HEAD defaultMaximumResolutionTimeout = "default-maximum-resolution-timeout" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // DefaultConfig holds all the default configurations for the config. @@ -93,10 +87,7 @@ type Defaults struct { DefaultResolverType string DefaultContainerResourceRequirements map[string]corev1.ResourceRequirements DefaultImagePullBackOffTimeout time.Duration -<<<<<<< HEAD DefaultMaximumResolutionTimeout time.Duration -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GetDefaultsConfigName returns the name of the configmap containing all @@ -128,10 +119,7 @@ func (cfg *Defaults) Equals(other *Defaults) bool { other.DefaultMaxMatrixCombinationsCount == cfg.DefaultMaxMatrixCombinationsCount && other.DefaultResolverType == cfg.DefaultResolverType && other.DefaultImagePullBackOffTimeout == cfg.DefaultImagePullBackOffTimeout && -<<<<<<< HEAD other.DefaultMaximumResolutionTimeout == cfg.DefaultMaximumResolutionTimeout && -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reflect.DeepEqual(other.DefaultForbiddenEnv, cfg.DefaultForbiddenEnv) } @@ -145,20 +133,13 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { DefaultMaxMatrixCombinationsCount: DefaultMaxMatrixCombinationsCount, DefaultResolverType: DefaultResolverTypeValue, DefaultImagePullBackOffTimeout: DefaultImagePullBackOffTimeout, -<<<<<<< HEAD DefaultMaximumResolutionTimeout: DefaultMaximumResolutionTimeout, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if defaultTimeoutMin, ok := cfgMap[defaultTimeoutMinutesKey]; ok { timeout, err := strconv.ParseInt(defaultTimeoutMin, 10, 0) if err != nil { -<<<<<<< HEAD return nil, fmt.Errorf("failed parsing default config %q", defaultTimeoutMinutesKey) -======= - return nil, fmt.Errorf("failed parsing tracing config %q", defaultTimeoutMinutesKey) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tc.DefaultTimeoutMinutes = int(timeout) } @@ -198,11 +179,7 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { if defaultMaxMatrixCombinationsCount, ok := cfgMap[defaultMaxMatrixCombinationsCountKey]; ok { matrixCombinationsCount, err := strconv.ParseInt(defaultMaxMatrixCombinationsCount, 10, 0) if err != nil { -<<<<<<< HEAD return nil, fmt.Errorf("failed parsing default config %q", defaultMaxMatrixCombinationsCountKey) -======= - return nil, fmt.Errorf("failed parsing tracing config %q", defaultMaxMatrixCombinationsCountKey) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tc.DefaultMaxMatrixCombinationsCount = int(matrixCombinationsCount) } @@ -230,16 +207,11 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { if defaultImagePullBackOff, ok := cfgMap[defaultImagePullBackOffTimeout]; ok { timeout, err := time.ParseDuration(defaultImagePullBackOff) if err != nil { -<<<<<<< HEAD return nil, fmt.Errorf("failed parsing default config %q", defaultImagePullBackOffTimeout) -======= - return nil, fmt.Errorf("failed parsing tracing config %q", defaultImagePullBackOffTimeout) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } tc.DefaultImagePullBackOffTimeout = timeout } -<<<<<<< HEAD if defaultMaximumResolutionTimeout, ok := cfgMap[defaultMaximumResolutionTimeout]; ok { timeout, err := time.ParseDuration(defaultMaximumResolutionTimeout) if err != nil { @@ -248,8 +220,6 @@ func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { tc.DefaultMaximumResolutionTimeout = timeout } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &tc, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go index e001f21515..286ed06d80 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -146,11 +146,7 @@ func (ps ParamSpecs) ValidateNoDuplicateNames() *apis.FieldError { return errs } -<<<<<<< HEAD // validateParamEnums validates feature flag, duplication and allowed types for Param Enum -======= -// validateParamEnum validates feature flag, duplication and allowed types for Param Enum ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ps ParamSpecs) validateParamEnums(ctx context.Context) *apis.FieldError { var errs *apis.FieldError for _, p := range ps { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go index 474691bdaa..66b8daab06 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/stepaction_validation.go @@ -15,10 +15,7 @@ package v1alpha1 import ( "context" -<<<<<<< HEAD "fmt" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "github.com/tektoncd/pipeline/pkg/apis/config" @@ -132,7 +129,6 @@ func validateParameterVariables(ctx context.Context, sas StepActionSpec, params stringParameterNames := sets.NewString(stringParams.GetNames()...) arrayParameterNames := sets.NewString(arrayParams.GetNames()...) errs = errs.Also(v1.ValidateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams)) -<<<<<<< HEAD errs = errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames)) return errs.Also(validateDefaultParameterReferences(params)) } @@ -207,9 +203,6 @@ func validateDefaultParameterReferences(params v1.ParamSpecs) *apis.FieldError { } return errs -======= - return errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index 557ef24d3c..9844a84435 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -137,11 +137,7 @@ func (ps ParamSpecs) validateNoDuplicateNames() *apis.FieldError { return errs } -<<<<<<< HEAD // validateParamEnums validates feature flag, duplication and allowed types for Param Enum -======= -// validateParamEnum validates feature flag, duplication and allowed types for Param Enum ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ps ParamSpecs) validateParamEnums(ctx context.Context) *apis.FieldError { var errs *apis.FieldError for _, p := range ps { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index ea2ad966b1..a4c0725338 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -640,11 +640,7 @@ func validateTaskResultsVariables(ctx context.Context, steps []Step, results []T resultsNames.Insert(r.Name) } for idx, step := range steps { -<<<<<<< HEAD errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariablesWithDetail(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) -======= - errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, "results", resultsNames).ViaField("script").ViaFieldIndex("steps", idx)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return errs } @@ -794,11 +790,7 @@ func validateStepVariables(ctx context.Context, step Step, prefix string, vars s errs := substitution.ValidateNoReferencesToUnknownVariables(step.Name, prefix, vars).ViaField("name") errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Image, prefix, vars).ViaField("image")) errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.WorkingDir, prefix, vars).ViaField("workingDir")) -<<<<<<< HEAD errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariablesWithDetail(step.Script, prefix, vars).ViaField("script")) -======= - errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(step.Script, prefix, vars).ViaField("script")) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i, cmd := range step.Command { errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index 6958cae036..df428c31bf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -54,7 +54,6 @@ var intIndexRegex = regexp.MustCompile(intIndex) // - prefix: the prefix of the substitutable variable, e.g. "params" or "context.pipeline" // - vars: names of known variables func ValidateNoReferencesToUnknownVariables(value, prefix string, vars sets.String) *apis.FieldError { -<<<<<<< HEAD return validateNoReferencesToUnknownVariables(value, prefix, vars, false) } @@ -65,8 +64,6 @@ func ValidateNoReferencesToUnknownVariablesWithDetail(value, prefix string, vars } func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.String, withDetail bool) *apis.FieldError { -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ @@ -77,7 +74,6 @@ func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.Stri for _, v := range vs { v = TrimArrayIndex(v) if !vars.Has(v) { -<<<<<<< HEAD var msg string if withDetail { msg = fmt.Sprintf("non-existent variable `%s` in %q", v, value) @@ -86,10 +82,6 @@ func validateNoReferencesToUnknownVariables(value, prefix string, vars sets.Stri } return &apis.FieldError{ Message: msg, -======= - return &apis.FieldError{ - Message: fmt.Sprintf("non-existent variable in %q", value), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Empty path is required to make the `ViaField`, … work Paths: []string{""}, } diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh index 16d8b4a319..49f7c72a51 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh @@ -44,15 +44,10 @@ fi header "Setting up environment" -<<<<<<< HEAD set -x install_pipeline_crd export SYSTEM_NAMESPACE=tekton-pipelines set +x -======= -install_pipeline_crd -export SYSTEM_NAMESPACE=tekton-pipelines ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) failed=0 diff --git a/vendor/github.com/tektoncd/pipeline/test/featureflags.go b/vendor/github.com/tektoncd/pipeline/test/featureflags.go index d79115f4a9..be0944eb71 100644 --- a/vendor/github.com/tektoncd/pipeline/test/featureflags.go +++ b/vendor/github.com/tektoncd/pipeline/test/featureflags.go @@ -70,11 +70,7 @@ func requireAnyGate(gates map[string]string) func(context.Context, *testing.T, * } } -<<<<<<< HEAD // requireAllGates returns a setup func that will skip the current -======= -// requireAllgates returns a setup func that will skip the current ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // test if all of the feature-flags in the given map don't match // what's in the feature-flags ConfigMap. It will fatally fail // the test if it cannot get the feature-flag configmap. diff --git a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh index cccbf807f0..ba72739e3d 100644 --- a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh @@ -29,23 +29,6 @@ export DISABLE_YAML_LINTING=1 source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scripts/presubmit-tests.sh -<<<<<<< HEAD -======= -function check_go_lint() { - header "Testing if golint has been done" - - # deadline of 5m, and show all the issues - GOFLAGS="-mod=mod" make golangci-lint-check - - if [[ $? != 0 ]]; then - results_banner "Go Lint" 1 - exit 1 - fi - - results_banner "Go Lint" 0 -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) function check_yaml_lint() { header "Testing if yamllint has been done" @@ -80,10 +63,6 @@ EOF } function post_build_tests() { -<<<<<<< HEAD -======= - check_go_lint ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) check_yaml_lint ko_resolve } diff --git a/vendor/github.com/tektoncd/plumbing/.gitignore b/vendor/github.com/tektoncd/plumbing/.gitignore index 608903f21b..acab30f0bd 100644 --- a/vendor/github.com/tektoncd/plumbing/.gitignore +++ b/vendor/github.com/tektoncd/plumbing/.gitignore @@ -13,12 +13,9 @@ **/.bin **/.DS_Store -<<<<<<< HEAD # Release **/source.tar.gz tekton/**/.ko.yaml tekton/**/vendor/ -.release-*/ -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) +.release-*/ \ No newline at end of file diff --git a/vendor/github.com/tektoncd/plumbing/OWNERS b/vendor/github.com/tektoncd/plumbing/OWNERS index 7235cb16d0..500e269817 100644 --- a/vendor/github.com/tektoncd/plumbing/OWNERS +++ b/vendor/github.com/tektoncd/plumbing/OWNERS @@ -3,7 +3,6 @@ approvers: - abayer - afrittoli -<<<<<<< HEAD - AlanGreene - bobcatfish - chitrangpatel @@ -17,13 +16,3 @@ approvers: # Alumni ❤️ # - nikhil-thomas # - dlorenc -======= -- bobcatfish -- dibyom -- dlorenc -- jerop -- nikhil-thomas -- savitaashture -- vdemeester -- wlynch ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/tetafro/godot/.goreleaser.yml b/vendor/github.com/tetafro/godot/.goreleaser.yml index bfb25164e7..2f0c2466a5 100644 --- a/vendor/github.com/tetafro/godot/.goreleaser.yml +++ b/vendor/github.com/tetafro/godot/.goreleaser.yml @@ -1,8 +1,5 @@ -<<<<<<< HEAD version: 2 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) builds: - dir: ./cmd/godot checksum: diff --git a/vendor/github.com/tetafro/godot/getters.go b/vendor/github.com/tetafro/godot/getters.go index d5b3ba1a53..eff836b405 100644 --- a/vendor/github.com/tetafro/godot/getters.go +++ b/vendor/github.com/tetafro/godot/getters.go @@ -6,10 +6,7 @@ import ( "go/ast" "go/token" "os" -<<<<<<< HEAD "path/filepath" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "regexp" "strings" ) @@ -40,16 +37,10 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { file: file, } -<<<<<<< HEAD -======= - var err error - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Read original file. This is necessary for making a replacements for // inline comments. I couldn't find a better way to get original line // with code and comment without reading the file. Function `Format` // from "go/format" won't help here if the original file is not gofmt-ed. -<<<<<<< HEAD filename := getFilename(fset, file) @@ -60,37 +51,10 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { var err error pf.lines, err = readFile(filename) -======= - pf.lines, err = readFile(file, fset) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, fmt.Errorf("read file: %w", err) } -<<<<<<< HEAD -======= - // Dirty hack. For some cases Go generates temporary files during - // compilation process if there is a cgo block in the source file. Some of - // these temporary files are just copies of original source files but with - // new generated comments at the top. Because of them the content differs - // from AST. For some reason it differs only in golangci-lint. I failed to - // find out the exact description of the process, so let's just skip files - // generated by cgo. - if isCgoGenerated(pf.lines) { - return nil, errUnsuitableInput - } - - // Check consistency to avoid checking slice indexes in each function. - // Note that `PositionFor` is used with `adjusted=false` to skip `//line` - // directives that can set references to other files (e.g. templates) - // instead of the real ones, and break consistency here. - // Issue: https://github.com/tetafro/godot/issues/32 - lastComment := pf.file.Comments[len(pf.file.Comments)-1] - if p := pf.fset.PositionFor(lastComment.End(), false); len(pf.lines) < p.Line { - return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &pf, nil } @@ -267,21 +231,12 @@ func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { } // readFile reads file and returns its lines as strings. -<<<<<<< HEAD func readFile(filename string) ([]string, error) { f, err := os.ReadFile(filepath.Clean(filename)) if err != nil { return nil, err //nolint:wrapcheck } -======= -func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { - fname := fset.File(file.Package) - f, err := os.ReadFile(fname.Name()) - if err != nil { - return nil, err //nolint:wrapcheck - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return strings.Split(string(f), "\n"), nil } @@ -307,7 +262,6 @@ func matchAny(s string, rr []*regexp.Regexp) bool { return false } -<<<<<<< HEAD func getFilename(fset *token.FileSet, file *ast.File) string { filename := fset.PositionFor(file.Pos(), true).Filename if !strings.HasSuffix(filename, ".go") { @@ -315,13 +269,4 @@ func getFilename(fset *token.FileSet, file *ast.File) string { } return filename -======= -func isCgoGenerated(lines []string) bool { - for i := range lines { - if strings.Contains(lines[i], "Code generated by cmd/cgo") { - return true - } - } - return false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go index 0cccd0ab19..ae860d728c 100644 --- a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go +++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -23,11 +23,7 @@ var Analyzer = &analysis.Analyzer{ } const ( -<<<<<<< HEAD Doc = "checks whether HTTP response body is closed successfully" -======= - Doc = "bodyclose checks whether HTTP response body is closed successfully" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nethttpPath = "net/http" closeMethod = "Close" @@ -118,7 +114,6 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { if len(*call.Referrers()) == 0 { return true } -<<<<<<< HEAD if instr, ok := b.Instrs[i].(*ssa.Call); ok { // httptest.ResponseRecorder is not needed closing the response body because no-op. @@ -131,8 +126,6 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cRefs := *call.Referrers() for _, cRef := range cRefs { val, ok := r.getResVal(cRef) @@ -168,7 +161,6 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { return r.calledInFunc(f, called) } -<<<<<<< HEAD // Case when calling Close() from struct field or method if s, ok := aref.(*ssa.Store); ok { if f, ok := s.Addr.(*ssa.FieldAddr); ok { @@ -185,8 +177,6 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case *ssa.Call, *ssa.Defer: // Indirect function call // Hacky way to extract CommonCall @@ -233,7 +223,6 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } -<<<<<<< HEAD case *ssa.Phi: // Called in the higher-level block if resRef.Referrers() == nil { return true @@ -262,8 +251,6 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } } @@ -276,13 +263,9 @@ func (r *runner) getReqCall(instr ssa.Instruction) (*ssa.Call, bool) { if !ok { return nil, false } -<<<<<<< HEAD callType := call.Type().String() if !strings.Contains(callType, r.resTyp.String()) || strings.Contains(callType, "net/http.ResponseController") { -======= - if !strings.Contains(call.Type().String(), r.resTyp.String()) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil, false } return call, true diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go index 5ccac50fd7..127f7efd84 100644 --- a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go +++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -44,13 +44,10 @@ type WrapcheckConfig struct { // list to your config. IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` -<<<<<<< HEAD // ExtraIgnoreSigs defines an additional list of signatures to ignore, on // top of IgnoreSigs. ExtraIgnoreSigs []string `mapstructure:"extraIgnoreSigs" yaml:"extraIgnoreSigs"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // IgnoreSigRegexps defines a list of regular expressions which if matched // to the signature of the function call returning the error, will be ignored. This // allows you to specify functions that wrapcheck will not report as @@ -283,11 +280,7 @@ func reportUnwrapped( // Check for ignored signatures fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() -<<<<<<< HEAD if contains(cfg.IgnoreSigs, fnSig) || contains(cfg.ExtraIgnoreSigs, fnSig) { -======= - if contains(cfg.IgnoreSigs, fnSig) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } else if containsMatch(regexpsSig, fnSig) { return diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md index 019836afd5..8bbe7eab68 100644 --- a/vendor/github.com/ultraware/funlen/README.md +++ b/vendor/github.com/ultraware/funlen/README.md @@ -16,7 +16,6 @@ The default values are used internally, but might to be adjusted for your specif Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. -<<<<<<< HEAD ## Configuration Available configuration options: @@ -37,8 +36,6 @@ linters-settings: ignore-comments: false ``` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Exclude for tests golangci-lint offers a way to exclude linters in certain cases. More info can be found here: https://golangci-lint.run/usage/configuration/#issues-configuration. diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go deleted file mode 100644 index b68ddb926f..0000000000 --- a/vendor/github.com/ultraware/funlen/main.go +++ /dev/null @@ -1,124 +0,0 @@ -package funlen - -import ( - "fmt" - "go/ast" - "go/token" - "reflect" -) - -const ( - defaultLineLimit = 60 - defaultStmtLimit = 40 -) - -// Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, lineLimit int, stmtLimit int, ignoreComments bool) []Message { - if lineLimit == 0 { - lineLimit = defaultLineLimit - } - if stmtLimit == 0 { - stmtLimit = defaultStmtLimit - } - - cmap := ast.NewCommentMap(fset, file, file.Comments) - - var msgs []Message - for _, f := range file.Decls { - decl, ok := f.(*ast.FuncDecl) - if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo - continue - } - - if stmtLimit > 0 { - if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { - msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit)) - continue - } - } - - if lineLimit > 0 { - if lines := getLines(fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { - msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) - } - } - } - - return msgs -} - -// Message contains a message -type Message struct { - Pos token.Position - Message string -} - -func makeLineMessage(fset *token.FileSet, funcInfo *ast.Ident, lines, lineLimit int) Message { - return Message{ - fset.Position(funcInfo.Pos()), - fmt.Sprintf("Function '%s' is too long (%d > %d)\n", funcInfo.Name, lines, lineLimit), - } -} - -func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit int) Message { - return Message{ - fset.Position(funcInfo.Pos()), - fmt.Sprintf("Function '%s' has too many statements (%d > %d)\n", funcInfo.Name, stmts, stmtLimit), - } -} - -func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { // nolint: interfacer - var lineCount int - var commentCount int - - lineCount = fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 - - if !ignoreComments { - return lineCount - } - - for _, c := range cmap.Comments() { - // If the CommenGroup's lines are inside the function - // count how many comments are in the CommentGroup - if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && - (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { - commentCount += len(c.List) - } - } - - return lineCount - commentCount -} - -func parseStmts(s []ast.Stmt) (total int) { - for _, v := range s { - total++ - switch stmt := v.(type) { - case *ast.BlockStmt: - total += parseStmts(stmt.List) - 1 - case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, - *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: - total += parseBodyListStmts(stmt) - case *ast.CaseClause: - total += parseStmts(stmt.Body) - case *ast.AssignStmt: - total += checkInlineFunc(stmt.Rhs[0]) - case *ast.GoStmt: - total += checkInlineFunc(stmt.Call.Fun) - case *ast.DeferStmt: - total += checkInlineFunc(stmt.Call.Fun) - } - } - return -} - -func checkInlineFunc(stmt ast.Expr) int { - if block, ok := stmt.(*ast.FuncLit); ok { - return parseStmts(block.Body.List) - } - return 0 -} - -func parseBodyListStmts(t interface{}) int { - i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() - return parseStmts(i.([]ast.Stmt)) -} diff --git a/vendor/github.com/ultraware/whitespace/README.md b/vendor/github.com/ultraware/whitespace/README.md index 4a69805049..660c13d78c 100644 --- a/vendor/github.com/ultraware/whitespace/README.md +++ b/vendor/github.com/ultraware/whitespace/README.md @@ -4,10 +4,6 @@ Whitespace is a linter that checks for unnecessary newlines at the start and end ## Installation guide -<<<<<<< HEAD To install as a standalone linter, run `go install github.com/ultraware/whitespace/cmd/whitespace@latest`. -======= -To install as a standalone linter, run `go install github.com/ultraware/whitespace`. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Whitespace is also included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. diff --git a/vendor/github.com/ultraware/whitespace/whitespace.go b/vendor/github.com/ultraware/whitespace/whitespace.go index bec0531110..44e68124c3 100644 --- a/vendor/github.com/ultraware/whitespace/whitespace.go +++ b/vendor/github.com/ultraware/whitespace/whitespace.go @@ -9,58 +9,8 @@ import ( "golang.org/x/tools/go/analysis" ) -<<<<<<< HEAD // Settings contains settings for edge-cases. type Settings struct { -======= -// MessageType describes what should happen to fix the warning. -type MessageType uint8 - -// List of MessageTypes. -const ( - MessageTypeRemove MessageType = iota + 1 - MessageTypeAdd -) - -// RunningMode describes the mode the linter is run in. This can be either -// native or golangci-lint. -type RunningMode uint8 - -const ( - RunningModeNative RunningMode = iota - RunningModeGolangCI -) - -// Message contains a message and diagnostic information. -type Message struct { - // Diagnostic is what position the diagnostic should be put at. This isn't - // always the same as the fix start, f.ex. when we fix trailing newlines we - // put the diagnostic at the right bracket but we fix between the end of the - // last statement and the bracket. - Diagnostic token.Pos - - // FixStart is the span start of the fix. - FixStart token.Pos - - // FixEnd is the span end of the fix. - FixEnd token.Pos - - // LineNumbers represent the actual line numbers in the file. This is set - // when finding the diagnostic to make it easier to suggest fixes in - // golangci-lint. - LineNumbers []int - - // MessageType represents the type of message it is. - MessageType MessageType - - // Message is the diagnostic to show. - Message string -} - -// Settings contains settings for edge-cases. -type Settings struct { - Mode RunningMode ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MultiIf bool MultiFunc bool } @@ -91,25 +41,16 @@ func flags(settings *Settings) flag.FlagSet { return *flags } -<<<<<<< HEAD func Run(pass *analysis.Pass, settings *Settings) { for _, file := range pass.Files { filename := pass.Fset.Position(file.Pos()).Filename -======= -func Run(pass *analysis.Pass, settings *Settings) []Message { - messages := []Message{} - - for _, file := range pass.Files { - filename := pass.Fset.Position(file.Pos()).Filename ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !strings.HasSuffix(filename, ".go") { continue } fileMessages := runFile(file, pass.Fset, *settings) -<<<<<<< HEAD for _, message := range fileMessages { pass.Report(message) } @@ -118,38 +59,6 @@ func Run(pass *analysis.Pass, settings *Settings) []Message { func runFile(file *ast.File, fset *token.FileSet, settings Settings) []analysis.Diagnostic { var messages []analysis.Diagnostic -======= - if settings.Mode == RunningModeGolangCI { - messages = append(messages, fileMessages...) - continue - } - - for _, message := range fileMessages { - pass.Report(analysis.Diagnostic{ - Pos: message.Diagnostic, - Category: "whitespace", - Message: message.Message, - SuggestedFixes: []analysis.SuggestedFix{ - { - TextEdits: []analysis.TextEdit{ - { - Pos: message.FixStart, - End: message.FixEnd, - NewText: []byte("\n"), - }, - }, - }, - }, - }) - } - } - - return messages -} - -func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { - var messages []Message ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, f := range file.Decls { decl, ok := f.(*ast.FuncDecl) @@ -169,11 +78,7 @@ func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { type visitor struct { comments []*ast.CommentGroup fset *token.FileSet -<<<<<<< HEAD messages []analysis.Diagnostic -======= - messages []Message ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) wantNewline map[*ast.BlockStmt]bool settings Settings } @@ -207,7 +112,6 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { startMsg := checkStart(v.fset, opening, first) if wantNewline && startMsg == nil && len(stmt.List) >= 1 { -<<<<<<< HEAD v.messages = append(v.messages, analysis.Diagnostic{ Pos: opening, Message: "multi-line statement should be followed by a newline", @@ -218,15 +122,6 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { NewText: []byte("\n"), }}, }}, -======= - v.messages = append(v.messages, Message{ - Diagnostic: opening, - FixStart: stmt.List[0].Pos(), - FixEnd: stmt.List[0].Pos(), - LineNumbers: []int{v.fset.PositionFor(stmt.List[0].Pos(), false).Line}, - MessageType: MessageTypeAdd, - Message: "multi-line statement should be followed by a newline", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } else if !wantNewline && startMsg != nil { v.messages = append(v.messages, *startMsg) @@ -249,11 +144,7 @@ func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { } func posLine(fset *token.FileSet, pos token.Pos) int { -<<<<<<< HEAD return fset.Position(pos).Line -======= - return fset.PositionFor(pos, false).Line ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.BlockStmt) (token.Pos, ast.Node, ast.Node) { @@ -300,17 +191,12 @@ func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.B return openingPos, first, last } -<<<<<<< HEAD func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *analysis.Diagnostic { -======= -func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if first == nil { return nil } if posLine(fset, start)+1 < posLine(fset, first.Pos()) { -<<<<<<< HEAD return &analysis.Diagnostic{ Pos: start, Message: "unnecessary leading newline", @@ -321,32 +207,18 @@ func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { NewText: []byte("\n"), }}, }}, -======= - return &Message{ - Diagnostic: start, - FixStart: start, - FixEnd: first.Pos(), - LineNumbers: linesBetween(fset, start, first.Pos()), - MessageType: MessageTypeRemove, - Message: "unnecessary leading newline", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil } -<<<<<<< HEAD func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *analysis.Diagnostic { -======= -func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if last == nil { return nil } if posLine(fset, end)-1 > posLine(fset, last.End()) { -<<<<<<< HEAD return &analysis.Diagnostic{ Pos: end, Message: "unnecessary trailing newline", @@ -357,32 +229,8 @@ func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { NewText: []byte("\n"), }}, }}, -======= - return &Message{ - Diagnostic: end, - FixStart: last.End(), - FixEnd: end, - LineNumbers: linesBetween(fset, last.End(), end), - MessageType: MessageTypeRemove, - Message: "unnecessary trailing newline", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return nil } -<<<<<<< HEAD -======= - -func linesBetween(fset *token.FileSet, a, b token.Pos) []int { - lines := []int{} - aPosition := fset.PositionFor(a, false) - bPosition := fset.PositionFor(b, false) - - for i := aPosition.Line + 1; i < bPosition.Line; i++ { - lines = append(lines, i) - } - - return lines -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/uudashr/gocognit/README.md b/vendor/github.com/uudashr/gocognit/README.md index 925b9ce33a..415e81e739 100644 --- a/vendor/github.com/uudashr/gocognit/README.md +++ b/vendor/github.com/uudashr/gocognit/README.md @@ -147,24 +147,14 @@ The following structures receive a nesting increment commensurate with their nes ## Installation -<<<<<<< HEAD ```shell go install github.com/uudashr/gocognit/cmd/gocognit@latest -======= -``` -$ go install github.com/uudashr/gocognit/cmd/gocognit@latest ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` or -<<<<<<< HEAD ```shell go get github.com/uudashr/gocognit/cmd/gocognit -======= -``` -$ go get github.com/uudashr/gocognit/cmd/gocognit ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ``` ## Usage @@ -179,7 +169,6 @@ Usage: Flags: -<<<<<<< HEAD -over N show functions with complexity > N only and return exit code 1 if the output is non-empty -top N show the top N most complex functions only @@ -191,16 +180,6 @@ Flags: -f format string the format to use (default "{{.Complexity}} {{.PkgName}} {{.FuncName}} {{.Pos}}") -ignore expr ignore files matching the given regexp -======= - -over N show functions with complexity > N only - and return exit code 1 if the output is non-empty - -top N show the top N most complex functions only - -avg show the average complexity over all functions, - not depending on whether -over or -top are set - -json encode the output as JSON - -f format string the format to use - (default "{{.PkgName}}.{{.FuncName}}:{{.Complexity}}:{{.Pos}}") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) The (default) output fields for each line are: @@ -215,7 +194,6 @@ or equal to The struct being passed to the template is: type Stat struct { -<<<<<<< HEAD PkgName string FuncName string Complexity int @@ -234,12 +212,6 @@ The struct being passed to the template is: Offset int Line int Column int -======= - PkgName string - FuncName string - Complexity int - Pos token.Position ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ``` @@ -268,7 +240,6 @@ func IgnoreMe() { } ``` -<<<<<<< HEAD ## Diagnostic To understand how the complexity are calculated, we can enable the diagnostic by using `-d` flag. @@ -339,8 +310,6 @@ It will show the diagnostic output in JSON format ``` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Related project - [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. - [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. diff --git a/vendor/github.com/uudashr/gocognit/doc.go b/vendor/github.com/uudashr/gocognit/doc.go index 036fbe86fe..797b192282 100644 --- a/vendor/github.com/uudashr/gocognit/doc.go +++ b/vendor/github.com/uudashr/gocognit/doc.go @@ -1,7 +1,3 @@ -<<<<<<< HEAD // Package gocognit defines Analyzer other utilities to checks and calculate // the complexity of function based on "cognitive complexity" methods. -======= -// Package gocognit defines Analyzer other utilities to checks and calculate the complexity of function based on "cognitive complexity" methods. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gocognit diff --git a/vendor/github.com/uudashr/gocognit/gocognit.go b/vendor/github.com/uudashr/gocognit/gocognit.go index d9759a8d48..e51ee2a042 100644 --- a/vendor/github.com/uudashr/gocognit/gocognit.go +++ b/vendor/github.com/uudashr/gocognit/gocognit.go @@ -4,10 +4,7 @@ import ( "fmt" "go/ast" "go/token" -<<<<<<< HEAD "strconv" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" @@ -16,7 +13,6 @@ import ( // Stat is statistic of the complexity. type Stat struct { -<<<<<<< HEAD PkgName string FuncName string Complexity int @@ -69,12 +65,6 @@ func (d Diagnostic) String() string { } return fmt.Sprintf("+%d (nesting=%d)", d.Inc, d.Nesting) -======= - PkgName string - FuncName string - Complexity int - Pos token.Position ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s Stat) String() string { @@ -83,14 +73,11 @@ func (s Stat) String() string { // ComplexityStats builds the complexity statistics. func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { -<<<<<<< HEAD return ComplexityStatsWithDiagnostic(f, fset, stats, false) } // ComplexityStatsWithDiagnostic builds the complexity statistics with diagnostic. func ComplexityStatsWithDiagnostic(f *ast.File, fset *token.FileSet, stats []Stat, enableDiagnostics bool) []Stat { -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, decl := range f.Decls { if fn, ok := decl.(*ast.FuncDecl); ok { d := parseDirective(fn.Doc) @@ -98,7 +85,6 @@ func ComplexityStatsWithDiagnostic(f *ast.File, fset *token.FileSet, stats []Sta continue } -<<<<<<< HEAD res := ScanComplexity(fn, enableDiagnostics) stats = append(stats, Stat{ @@ -136,19 +122,6 @@ func generateDiagnostics(fset *token.FileSet, diags []diagnostic) []Diagnostic { return out } -======= - stats = append(stats, Stat{ - PkgName: f.Name.Name, - FuncName: funcName(fn), - Complexity: Complexity(fn), - Pos: fset.Position(fn.Pos()), - }) - } - } - return stats -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type directive struct { Ignore bool } @@ -173,23 +146,16 @@ func funcName(fn *ast.FuncDecl) string { if fn.Recv != nil { if fn.Recv.NumFields() > 0 { typ := fn.Recv.List[0].Type -<<<<<<< HEAD return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) } } -======= - return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return fn.Name.Name } // Complexity calculates the cognitive complexity of a function. func Complexity(fn *ast.FuncDecl) int { -<<<<<<< HEAD res := ScanComplexity(fn, false) return res.Complexity @@ -220,14 +186,6 @@ type diagnostic struct { Nesting int Text string Pos token.Pos -======= - v := complexityVisitor{ - name: fn.Name, - } - - ast.Walk(&v, fn) - return v.complexity ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type complexityVisitor struct { @@ -236,12 +194,9 @@ type complexityVisitor struct { nesting int elseNodes map[ast.Node]bool calculatedExprs map[ast.Expr]bool -<<<<<<< HEAD diagnosticsEnabled bool diagnostics []diagnostic -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (v *complexityVisitor) incNesting() { @@ -252,7 +207,6 @@ func (v *complexityVisitor) decNesting() { v.nesting-- } -<<<<<<< HEAD func (v *complexityVisitor) incComplexity(text string, pos token.Pos) { v.complexity++ @@ -280,14 +234,6 @@ func (v *complexityVisitor) nestIncComplexity(text string, pos token.Pos) { Text: text, Pos: pos, }) -======= -func (v *complexityVisitor) incComplexity() { - v.complexity++ -} - -func (v *complexityVisitor) nestIncComplexity() { - v.complexity += (v.nesting + 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (v *complexityVisitor) markAsElseNode(n ast.Node) { @@ -346,19 +292,12 @@ func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { case *ast.CallExpr: return v.visitCallExpr(n) } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { -<<<<<<< HEAD v.incIfComplexity(n, "if", n.Pos()) -======= - v.incIfComplexity(n) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -366,26 +305,12 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { ast.Walk(v, n.Cond) -<<<<<<< HEAD v.incNesting() ast.Walk(v, n.Body) v.decNesting() if _, ok := n.Else.(*ast.BlockStmt); ok { v.incComplexity("else", n.Else.Pos()) -======= - pure := !v.markedAsElseNode(n) // pure `if` statement, not an `else if` - if pure { - v.incNesting() - ast.Walk(v, n.Body) - v.decNesting() - } else { - ast.Walk(v, n.Body) - } - - if _, ok := n.Else.(*ast.BlockStmt); ok { - v.incComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ast.Walk(v, n.Else) } else if _, ok := n.Else.(*ast.IfStmt); ok { @@ -397,11 +322,7 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { } func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { -<<<<<<< HEAD v.nestIncComplexity("switch", n.Pos()) -======= - v.nestIncComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -414,19 +335,12 @@ func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visitor { -<<<<<<< HEAD v.nestIncComplexity("switch", n.Pos()) -======= - v.nestIncComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -439,36 +353,22 @@ func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visit v.incNesting() ast.Walk(v, n.Body) v.decNesting() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitSelectStmt(n *ast.SelectStmt) ast.Visitor { -<<<<<<< HEAD v.nestIncComplexity("select", n.Pos()) -======= - v.nestIncComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) v.incNesting() ast.Walk(v, n.Body) v.decNesting() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { -<<<<<<< HEAD v.nestIncComplexity("for", n.Pos()) -======= - v.nestIncComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Init; n != nil { ast.Walk(v, n) @@ -485,19 +385,12 @@ func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { -<<<<<<< HEAD v.nestIncComplexity("for", n.Pos()) -======= - v.nestIncComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n := n.Key; n != nil { ast.Walk(v, n) @@ -512,10 +405,7 @@ func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -525,23 +415,15 @@ func (v *complexityVisitor) visitFuncLit(n *ast.FuncLit) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { if n.Label != nil { -<<<<<<< HEAD v.incComplexity(n.Tok.String(), n.Pos()) } -======= - v.incComplexity() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } @@ -552,19 +434,12 @@ func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { var lastOp token.Token for _, op := range ops { if lastOp != op { -<<<<<<< HEAD v.incComplexity(op.String(), n.OpPos) -======= - v.incComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) lastOp = op } } } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } @@ -573,61 +448,38 @@ func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { obj, name := callIdent.Obj, callIdent.Name if obj == v.name.Obj && name == v.name.Name { // called by same function directly (direct recursion) -<<<<<<< HEAD v.incComplexity(name, n.Pos()) } } -======= - v.incComplexity() - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return v } func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { v.markCalculated(exp) -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if exp, ok := exp.(*ast.BinaryExpr); ok { return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) } return nil } -<<<<<<< HEAD func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt, text string, pos token.Pos) { if v.markedAsElseNode(n) { v.incComplexity(text, pos) } else { v.nestIncComplexity(text, pos) -======= -func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { - if v.markedAsElseNode(n) { - v.incComplexity() - } else { - v.nestIncComplexity() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { var out []token.Token out = append(out, x...) -<<<<<<< HEAD if isBinaryLogicalOp(op) { out = append(out, op) } -======= - if isBinaryLogicalOp(op) { - out = append(out, op) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) out = append(out, y...) return out } diff --git a/vendor/github.com/uudashr/gocognit/recv.go b/vendor/github.com/uudashr/gocognit/recv.go index 6b7a6b3479..eaf3c9762d 100644 --- a/vendor/github.com/uudashr/gocognit/recv.go +++ b/vendor/github.com/uudashr/gocognit/recv.go @@ -20,9 +20,6 @@ func recvString(recv ast.Expr) string { case *ast.IndexListExpr: return recvString(t.X) } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "BADRECV" } diff --git a/vendor/github.com/uudashr/gocognit/recv_pre118.go b/vendor/github.com/uudashr/gocognit/recv_pre118.go index 0bbc62434f..a47ba1bd5a 100644 --- a/vendor/github.com/uudashr/gocognit/recv_pre118.go +++ b/vendor/github.com/uudashr/gocognit/recv_pre118.go @@ -16,9 +16,6 @@ func recvString(recv ast.Expr) string { case *ast.StarExpr: return "*" + recvString(t.X) } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "BADRECV" } diff --git a/vendor/github.com/uudashr/iface/opaque/opaque.go b/vendor/github.com/uudashr/iface/opaque/opaque.go index e0b53f43b2..59e406322f 100644 --- a/vendor/github.com/uudashr/iface/opaque/opaque.go +++ b/vendor/github.com/uudashr/iface/opaque/opaque.go @@ -274,17 +274,11 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { stmtTypName = removePkgPrefix(stmtTypName) } -<<<<<<< HEAD msg := fmt.Sprintf("%s function return %s interface at the %s result, abstract a single concrete implementation of %s", -======= - pass.Reportf(result.Pos(), - "%s function return %s interface at the %s result, abstract a single concrete implementation of %s", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) funcDecl.Name.Name, retTypeName, positionStr(currentIdx), stmtTypName) -<<<<<<< HEAD pass.Report(analysis.Diagnostic{ Pos: result.Pos(), @@ -302,8 +296,6 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { }, }, }) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }) diff --git a/vendor/github.com/uudashr/iface/unused/unused.go b/vendor/github.com/uudashr/iface/unused/unused.go index b4080adb0c..9c301ae673 100644 --- a/vendor/github.com/uudashr/iface/unused/unused.go +++ b/vendor/github.com/uudashr/iface/unused/unused.go @@ -48,12 +48,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Collect all interface type declarations -<<<<<<< HEAD ifaceDecls := make(map[string]*ast.TypeSpec) genDecls := make(map[string]*ast.GenDecl) // ifaceName -> GenDecl -======= - ifaceDecls := make(map[string]token.Pos) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nodeFilter := []ast.Node{ (*ast.GenDecl)(nil), @@ -85,11 +81,7 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { _, ok = ts.Type.(*ast.InterfaceType) if !ok { -<<<<<<< HEAD continue -======= - return ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if r.debug { @@ -102,12 +94,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { continue } -<<<<<<< HEAD ifaceDecls[ts.Name.Name] = ts genDecls[ts.Name.Name] = decl -======= - ifaceDecls[ts.Name.Name] = ts.Pos() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }) @@ -131,33 +119,24 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { return } -<<<<<<< HEAD ts, ok := ifaceDecls[ident.Name] if !ok { return } if ts.Pos() == ident.Pos() { -======= - pos := ifaceDecls[ident.Name] - if pos == ident.Pos() { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The identifier is the interface type declaration return } delete(ifaceDecls, ident.Name) -<<<<<<< HEAD delete(genDecls, ident.Name) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if r.debug { fmt.Printf("Package %s %s\n", pass.Pkg.Path(), pass.Pkg.Name()) } -<<<<<<< HEAD for name, ts := range ifaceDecls { decl := genDecls[name] @@ -185,10 +164,6 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { }, }, }) -======= - for name, pos := range ifaceDecls { - pass.Reportf(pos, "interface %s is declared but not used within the package", name) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil, nil diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 3b6eaab29c..6a6b3e0182 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -56,14 +56,11 @@ func (tr *Reader) RawBytes() []byte { } -<<<<<<< HEAD // ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next() func (tr *Reader) ExpectedPadding() int64 { return tr.pad } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewReader creates a new Reader reading from r. func NewReader(r io.Reader) *Reader { return &Reader{r: r, curr: ®FileReader{r, 0}} diff --git a/vendor/github.com/xanzy/go-gitlab/.gitignore b/vendor/github.com/xanzy/go-gitlab/.gitignore deleted file mode 100644 index 76a9f4df79..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# IDE specific files and folders -.idea -*.iml -*.swp -*.swo - -# vendor -vendor diff --git a/vendor/github.com/xanzy/go-gitlab/.golangci.yml b/vendor/github.com/xanzy/go-gitlab/.golangci.yml deleted file mode 100644 index 7c05feeefc..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/.golangci.yml +++ /dev/null @@ -1,56 +0,0 @@ -# This file contains all available configuration options -# with their default values. - -# Options for analysis running -run: - concurrency: 4 - timeout: 10m - issues-exit-code: 1 - tests: true - -# Output configuration options -output: - formats: - - format: line-number - -# All available settings of specific linters -linters-settings: - misspell: - locale: US - ignore-words: - - noteable - -linters: - enable: - - asciicheck - - dogsled - - errorlint - - exportloopref - - goconst - - gosimple - - govet - - ineffassign - - megacheck - - misspell - - nakedret - - nolintlint - - staticcheck - - typecheck - - unconvert - - unused - - whitespace - disable: - - errcheck - disable-all: false - fast: false - -issues: - # List of regexps of issue texts to exclude. - exclude: - - "^.*, make it a constant$" - - # Maximum issues count per one linter (set to 0 to disable) - max-issues-per-linter: 0 - - # Maximum count of issues with the same text (set to 0 to disable) - max-same-issues: 0 diff --git a/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md b/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md deleted file mode 100644 index 32bd822745..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md +++ /dev/null @@ -1,53 +0,0 @@ -# How to Contribute - -We want to make contributing to this project as easy as possible. - -## Reporting Issues - -If you have an issue, please report it on the [issue tracker](https://github.com/xanzy/go-gitlab/issues). - -When you are up for writing a PR to solve the issue you encountered, it's not -needed to first open a separate issue. In that case only opening a PR with a -description of the issue you are trying to solve is just fine. - -## Contributing Code - -Pull requests are always welcome. When in doubt if your contribution fits within -the rest of the project, feel free to first open an issue to discuss your idea. - -This is not needed when fixing a bug or adding an enhancement, as long as the -enhancement you are trying to add can be found in the public GitLab API docs as -this project only supports what is in the public API docs. - -## Coding style - -We try to follow the Go best practices, where it makes sense, and use -[`gofumpt`](https://github.com/mvdan/gofumpt) to format code in this project. -As a general rule of thumb we prefer to keep line width for comments below 80 -chars and for code (where possible and sensible) below 100 chars. - -Before making a PR, please look at the rest this package and try to make sure -your contribution is consistent with the rest of the coding style. - -New struct field or methods should be placed (as much as possible) in the same -order as the ordering used in the public API docs. The idea is that this makes it -easier to find things. - -### Setting up your local development environment to Contribute to `go-gitlab` - -1. [Fork](https://github.com/xanzy/go-gitlab/fork), then clone the repository. - ```sh - git clone https://github.com//go-gitlab.git - # or via ssh - git clone git@github.com:/go-gitlab.git - ``` -1. Install dependencies: - ```sh - make setup - ``` -1. Make your changes on your feature branch -1. Run the tests and `gofumpt` - ```sh - make test && make fmt - ``` -1. Open up your pull request diff --git a/vendor/github.com/xanzy/go-gitlab/LICENSE b/vendor/github.com/xanzy/go-gitlab/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/xanzy/go-gitlab/Makefile b/vendor/github.com/xanzy/go-gitlab/Makefile deleted file mode 100644 index 749cb2953e..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -##@ General - -.PHONY: help -help: ## Display this help - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) - -##@ Development - -fmt: ## Format code - @gofumpt -l -w . - -lint: ## Run linter - @golangci-lint run - -setup: ## Setup your local environment - go mod tidy - @go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - @go install mvdan.cc/gofumpt@latest -.PHONY: setup - -test: ## Run tests - go test ./... -race diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md deleted file mode 100644 index fa5a049a3b..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/README.md +++ /dev/null @@ -1,208 +0,0 @@ -# go-gitlab - -A GitLab API client enabling Go programs to interact with GitLab in a simple and uniform way - -[![Build Status](https://github.com/xanzy/go-gitlab/workflows/Lint%20and%20Test/badge.svg)](https://github.com/xanzy/go-gitlab/actions?workflow=Lint%20and%20Test) -[![Sourcegraph](https://sourcegraph.com/github.com/xanzy/go-gitlab/-/badge.svg)](https://sourcegraph.com/github.com/xanzy/go-gitlab?badge) -[![GoDoc](https://godoc.org/github.com/xanzy/go-gitlab?status.svg)](https://godoc.org/github.com/xanzy/go-gitlab) -[![Go Report Card](https://goreportcard.com/badge/github.com/xanzy/go-gitlab)](https://goreportcard.com/report/github.com/xanzy/go-gitlab) -[![Coverage](https://github.com/xanzy/go-gitlab/wiki/coverage.svg)](https://raw.githack.com/wiki/xanzy/go-gitlab/coverage.html) - -## NOTE - -Release v0.6.0 (released on 25-08-2017) no longer supports the older V3 GitLab API. If -you need V3 support, please use the `f-api-v3` branch. This release contains some backwards -incompatible changes that were needed to fully support the V4 GitLab API. - -## Coverage - -This API client package covers most of the existing GitLab API calls and is updated regularly -to add new and/or missing endpoints. Currently, the following services are supported: - -- [x] Applications -- [x] Award Emojis -- [x] Branches -- [x] Broadcast Messages -- [x] Commits -- [x] Container Registry -- [x] Custom Attributes -- [x] Deploy Keys -- [x] Deployments -- [x] Discussions (threaded comments) -- [x] Environments -- [x] Epic Issues -- [x] Epics -- [x] Error Tracking -- [x] Events -- [x] Feature Flags -- [x] Geo Nodes -- [x] Generic Packages -- [x] GitLab CI Config Templates -- [x] Gitignores Templates -- [x] Group Access Requests -- [x] Group Issue Boards -- [x] Group Members -- [x] Group Milestones -- [x] Group Wikis -- [x] Group-Level Variables -- [x] Groups -- [x] Instance Clusters -- [x] Invites -- [x] Issue Boards -- [x] Issues -- [x] Jobs -- [x] Keys -- [x] Labels -- [x] License -- [x] Markdown -- [x] Merge Request Approvals -- [x] Merge Requests -- [x] Namespaces -- [x] Notes (comments) -- [x] Notification Settings -- [x] Open Source License Templates -- [x] Packages -- [x] Pages -- [x] Pages Domains -- [x] Personal Access Tokens -- [x] Pipeline Schedules -- [x] Pipeline Triggers -- [x] Pipelines -- [x] Plan limits -- [x] Project Access Requests -- [x] Project Badges -- [x] Project Clusters -- [x] Project Import/export -- [x] Project Members -- [x] Project Milestones -- [x] Project Repository Storage Moves -- [x] Project Snippets -- [x] Project Vulnerabilities -- [x] Project-Level Variables -- [x] Projects (including setting Webhooks) -- [x] Protected Branches -- [x] Protected Environments -- [x] Protected Tags -- [x] Repositories -- [x] Repository Files -- [x] Repository Submodules -- [x] Runners -- [x] Search -- [x] Services -- [x] Settings -- [x] Sidekiq Metrics -- [x] System Hooks -- [x] Tags -- [x] Todos -- [x] Topics -- [x] Users -- [x] Validate CI Configuration -- [x] Version -- [x] Wikis - -## Usage - -```go -import "github.com/xanzy/go-gitlab" -``` - -Construct a new GitLab client, then use the various services on the client to -access different parts of the GitLab API. For example, to list all -users: - -```go -git, err := gitlab.NewClient("yourtokengoeshere") -if err != nil { - log.Fatalf("Failed to create client: %v", err) -} -users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) -``` - -There are a few `With...` option functions that can be used to customize -the API client. For example, to set a custom base URL: - -```go -git, err := gitlab.NewClient("yourtokengoeshere", gitlab.WithBaseURL("https://git.mydomain.com/api/v4")) -if err != nil { - log.Fatalf("Failed to create client: %v", err) -} -users, _, err := git.Users.ListUsers(&gitlab.ListUsersOptions{}) -``` - -Some API methods have optional parameters that can be passed. For example, -to list all projects for user "svanharmelen": - -```go -git := gitlab.NewClient("yourtokengoeshere") -opt := &gitlab.ListProjectsOptions{Search: gitlab.Ptr("svanharmelen")} -projects, _, err := git.Projects.ListProjects(opt) -``` - -### Examples - -The [examples](https://github.com/xanzy/go-gitlab/tree/master/examples) directory -contains a couple for clear examples, of which one is partially listed here as well: - -```go -package main - -import ( - "log" - - "github.com/xanzy/go-gitlab" -) - -func main() { - git, err := gitlab.NewClient("yourtokengoeshere") - if err != nil { - log.Fatalf("Failed to create client: %v", err) - } - - // Create new project - p := &gitlab.CreateProjectOptions{ - Name: gitlab.Ptr("My Project"), - Description: gitlab.Ptr("Just a test project to play with"), - MergeRequestsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), - SnippetsAccessLevel: gitlab.Ptr(gitlab.EnabledAccessControl), - Visibility: gitlab.Ptr(gitlab.PublicVisibility), - } - project, _, err := git.Projects.CreateProject(p) - if err != nil { - log.Fatal(err) - } - - // Add a new snippet - s := &gitlab.CreateProjectSnippetOptions{ - Title: gitlab.Ptr("Dummy Snippet"), - FileName: gitlab.Ptr("snippet.go"), - Content: gitlab.Ptr("package main...."), - Visibility: gitlab.Ptr(gitlab.PublicVisibility), - } - _, _, err = git.ProjectSnippets.CreateSnippet(project.ID, s) - if err != nil { - log.Fatal(err) - } -} -``` - -For complete usage of go-gitlab, see the full [package docs](https://godoc.org/github.com/xanzy/go-gitlab). - -## ToDo - -- The biggest thing this package still needs is tests :disappointed: - -## Issues - -- If you have an issue: report it on the [issue tracker](https://github.com/xanzy/go-gitlab/issues) - -## Author - -Sander van Harmelen () - -## Contributing - -Contributions are always welcome. For more information, check out the [contributing guide](https://github.com/xanzy/go-gitlab/blob/master/CONTRIBUTING.md) - -## License - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/vendor/github.com/xanzy/go-gitlab/access_requests.go b/vendor/github.com/xanzy/go-gitlab/access_requests.go deleted file mode 100644 index 2e07187e58..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/access_requests.go +++ /dev/null @@ -1,253 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// AccessRequest represents a access request for a group or project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html -type AccessRequest struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - RequestedAt *time.Time `json:"requested_at"` - AccessLevel AccessLevelValue `json:"access_level"` -} - -// AccessRequestsService handles communication with the project/group -// access requests related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/access_requests.html -type AccessRequestsService struct { - client *Client -} - -// ListAccessRequestsOptions represents the available -// ListProjectAccessRequests() or ListGroupAccessRequests() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project -type ListAccessRequestsOptions ListOptions - -// ListProjectAccessRequests gets a list of access requests -// viewable by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project -func (s *AccessRequestsService) ListProjectAccessRequests(pid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ars []*AccessRequest - resp, err := s.client.Do(req, &ars) - if err != nil { - return nil, resp, err - } - - return ars, resp, nil -} - -// ListGroupAccessRequests gets a list of access requests -// viewable by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project -func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ars []*AccessRequest - resp, err := s.client.Do(req, &ars) - if err != nil { - return nil, resp, err - } - - return ars, resp, nil -} - -// RequestProjectAccess requests access for the authenticated user -// to a group or project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#request-access-to-a-group-or-project -func (s *AccessRequestsService) RequestProjectAccess(pid interface{}, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil -} - -// RequestGroupAccess requests access for the authenticated user -// to a group or project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#request-access-to-a-group-or-project -func (s *AccessRequestsService) RequestGroupAccess(gid interface{}, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil -} - -// ApproveAccessRequestOptions represents the available -// ApproveProjectAccessRequest() and ApproveGroupAccessRequest() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#approve-an-access-request -type ApproveAccessRequestOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` -} - -// ApproveProjectAccessRequest approves an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#approve-an-access-request -func (s *AccessRequestsService) ApproveProjectAccessRequest(pid interface{}, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_requests/%d/approve", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil -} - -// ApproveGroupAccessRequest approves an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#approve-an-access-request -func (s *AccessRequestsService) ApproveGroupAccessRequest(gid interface{}, user int, opt *ApproveAccessRequestOptions, options ...RequestOptionFunc) (*AccessRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_requests/%d/approve", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ar := new(AccessRequest) - resp, err := s.client.Do(req, ar) - if err != nil { - return nil, resp, err - } - - return ar, resp, nil -} - -// DenyProjectAccessRequest denies an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#deny-an-access-request -func (s *AccessRequestsService) DenyProjectAccessRequest(pid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/access_requests/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DenyGroupAccessRequest denies an access request for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/access_requests.html#deny-an-access-request -func (s *AccessRequestsService) DenyGroupAccessRequest(gid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/access_requests/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/appearance.go b/vendor/github.com/xanzy/go-gitlab/appearance.go deleted file mode 100644 index f21893c0e0..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/appearance.go +++ /dev/null @@ -1,110 +0,0 @@ -// -// Copyright 2023, 徐晓伟 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import "net/http" - -// AppearanceService handles communication with appearance of the Gitlab API. -// -// Gitlab API docs : https://docs.gitlab.com/ee/api/appearance.html -type AppearanceService struct { - client *Client -} - -// Appearance represents a GitLab appearance. -// -// Gitlab API docs : https://docs.gitlab.com/ee/api/appearance.html -type Appearance struct { - Title string `json:"title"` - Description string `json:"description"` - PWAName string `json:"pwa_name"` - PWAShortName string `json:"pwa_short_name"` - PWADescription string `json:"pwa_description"` - PWAIcon string `json:"pwa_icon"` - Logo string `json:"logo"` - HeaderLogo string `json:"header_logo"` - Favicon string `json:"favicon"` - NewProjectGuidelines string `json:"new_project_guidelines"` - ProfileImageGuidelines string `json:"profile_image_guidelines"` - HeaderMessage string `json:"header_message"` - FooterMessage string `json:"footer_message"` - MessageBackgroundColor string `json:"message_background_color"` - MessageFontColor string `json:"message_font_color"` - EmailHeaderAndFooterEnabled bool `json:"email_header_and_footer_enabled"` -} - -// GetAppearance gets the current appearance configuration of the GitLab instance. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/appearance.html#get-current-appearance-configuration -func (s *AppearanceService) GetAppearance(options ...RequestOptionFunc) (*Appearance, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/appearance", nil, options) - if err != nil { - return nil, nil, err - } - - as := new(Appearance) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// ChangeAppearanceOptions represents the available ChangeAppearance() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/appearance.html#change-appearance-configuration -type ChangeAppearanceOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - PWAName *string `url:"pwa_name,omitempty" json:"pwa_name,omitempty"` - PWAShortName *string `url:"pwa_short_name,omitempty" json:"pwa_short_name,omitempty"` - PWADescription *string `url:"pwa_description,omitempty" json:"pwa_description,omitempty"` - PWAIcon *string `url:"pwa_icon,omitempty" json:"pwa_icon,omitempty"` - Logo *string `url:"logo,omitempty" json:"logo,omitempty"` - HeaderLogo *string `url:"header_logo,omitempty" json:"header_logo,omitempty"` - Favicon *string `url:"favicon,omitempty" json:"favicon,omitempty"` - NewProjectGuidelines *string `url:"new_project_guidelines,omitempty" json:"new_project_guidelines,omitempty"` - ProfileImageGuidelines *string `url:"profile_image_guidelines,omitempty" json:"profile_image_guidelines,omitempty"` - HeaderMessage *string `url:"header_message,omitempty" json:"header_message,omitempty"` - FooterMessage *string `url:"footer_message,omitempty" json:"footer_message,omitempty"` - MessageBackgroundColor *string `url:"message_background_color,omitempty" json:"message_background_color,omitempty"` - MessageFontColor *string `url:"message_font_color,omitempty" json:"message_font_color,omitempty"` - EmailHeaderAndFooterEnabled *bool `url:"email_header_and_footer_enabled,omitempty" json:"email_header_and_footer_enabled,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` -} - -// ChangeAppearance changes the appearance configuration. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/appearance.html#change-appearance-configuration -func (s *AppearanceService) ChangeAppearance(opt *ChangeAppearanceOptions, options ...RequestOptionFunc) (*Appearance, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/appearance", opt, options) - if err != nil { - return nil, nil, err - } - - as := new(Appearance) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/applications.go b/vendor/github.com/xanzy/go-gitlab/applications.go deleted file mode 100644 index 5335f6cffc..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/applications.go +++ /dev/null @@ -1,106 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ApplicationsService handles communication with administrables applications -// of the Gitlab API. -// -// Gitlab API docs : https://docs.gitlab.com/ee/api/applications.html -type ApplicationsService struct { - client *Client -} - -// Application represents a GitLab application -type Application struct { - ID int `json:"id"` - ApplicationID string `json:"application_id"` - ApplicationName string `json:"application_name"` - Secret string `json:"secret"` - CallbackURL string `json:"callback_url"` - Confidential bool `json:"confidential"` -} - -// CreateApplicationOptions represents the available CreateApplication() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/applications.html#create-an-application -type CreateApplicationOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - RedirectURI *string `url:"redirect_uri,omitempty" json:"redirect_uri,omitempty"` - Scopes *string `url:"scopes,omitempty" json:"scopes,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` -} - -// CreateApplication creates a new application owned by the authenticated user. -// -// Gitlab API docs : https://docs.gitlab.com/ee/api/applications.html#create-an-application -func (s *ApplicationsService) CreateApplication(opt *CreateApplicationOptions, options ...RequestOptionFunc) (*Application, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "applications", opt, options) - if err != nil { - return nil, nil, err - } - - a := new(Application) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// ListApplicationsOptions represents the available -// ListApplications() options. -type ListApplicationsOptions ListOptions - -// ListApplications get a list of administrables applications by the authenticated user -// -// Gitlab API docs : https://docs.gitlab.com/ee/api/applications.html#list-all-applications -func (s *ApplicationsService) ListApplications(opt *ListApplicationsOptions, options ...RequestOptionFunc) ([]*Application, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "applications", opt, options) - if err != nil { - return nil, nil, err - } - - var as []*Application - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// DeleteApplication removes a specific application. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/applications.html#delete-an-application -func (s *ApplicationsService) DeleteApplication(application int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("applications/%d", application) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/audit_events.go b/vendor/github.com/xanzy/go-gitlab/audit_events.go deleted file mode 100644 index de312e5606..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/audit_events.go +++ /dev/null @@ -1,202 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// AuditEvent represents an audit event for a group, a project or the instance. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html -type AuditEvent struct { - ID int `json:"id"` - AuthorID int `json:"author_id"` - EntityID int `json:"entity_id"` - EntityType string `json:"entity_type"` - Details AuditEventDetails `json:"details"` - CreatedAt *time.Time `json:"created_at"` - EventType string `json:"event_type"` -} - -// AuditEventDetails represents the details portion of an audit event for -// a group, a project or the instance. The exact fields that are returned -// for an audit event depend on the action being recorded. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html -type AuditEventDetails struct { - With string `json:"with"` - Add string `json:"add"` - As string `json:"as"` - Change string `json:"change"` - From string `json:"from"` - To string `json:"to"` - Remove string `json:"remove"` - CustomMessage string `json:"custom_message"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthorClass string `json:"author_class"` - TargetID interface{} `json:"target_id"` - TargetType string `json:"target_type"` - TargetDetails string `json:"target_details"` - IPAddress string `json:"ip_address"` - EntityPath string `json:"entity_path"` - FailedLogin string `json:"failed_login"` -} - -// AuditEventsService handles communication with the project/group/instance -// audit event related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html -type AuditEventsService struct { - client *Client -} - -// ListAuditEventsOptions represents the available ListProjectAuditEvents(), -// ListGroupAuditEvents() or ListInstanceAuditEvents() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html -type ListAuditEventsOptions struct { - ListOptions - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` -} - -// ListInstanceAuditEvents gets a list of audit events for instance. -// Authentication as Administrator is required. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-all-instance-audit-events -func (s *AuditEventsService) ListInstanceAuditEvents(opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "audit_events", opt, options) - if err != nil { - return nil, nil, err - } - - var aes []*AuditEvent - resp, err := s.client.Do(req, &aes) - if err != nil { - return nil, resp, err - } - - return aes, resp, nil -} - -// GetInstanceAuditEvent gets a specific instance audit event. -// Authentication as Administrator is required. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-single-instance-audit-event -func (s *AuditEventsService) GetInstanceAuditEvent(event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { - u := fmt.Sprintf("audit_events/%d", event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ae := new(AuditEvent) - resp, err := s.client.Do(req, ae) - if err != nil { - return nil, resp, err - } - - return ae, resp, nil -} - -// ListGroupAuditEvents gets a list of audit events for the specified group -// viewable by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-all-group-audit-events -func (s *AuditEventsService) ListGroupAuditEvents(gid interface{}, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/audit_events", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var aes []*AuditEvent - resp, err := s.client.Do(req, &aes) - if err != nil { - return nil, resp, err - } - - return aes, resp, nil -} - -// GetGroupAuditEvent gets a specific group audit event. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-a-specific-group-audit-event -func (s *AuditEventsService) GetGroupAuditEvent(gid interface{}, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/audit_events/%d", PathEscape(group), event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ae := new(AuditEvent) - resp, err := s.client.Do(req, ae) - if err != nil { - return nil, resp, err - } - - return ae, resp, nil -} - -// ListProjectAuditEvents gets a list of audit events for the specified project -// viewable by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/audit_events.html#retrieve-all-project-audit-events -func (s *AuditEventsService) ListProjectAuditEvents(pid interface{}, opt *ListAuditEventsOptions, options ...RequestOptionFunc) ([]*AuditEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/audit_events", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var aes []*AuditEvent - resp, err := s.client.Do(req, &aes) - if err != nil { - return nil, resp, err - } - - return aes, resp, nil -} - -// GetProjectAuditEvent gets a specific project audit event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/audit_events.html#retrieve-a-specific-project-audit-event -func (s *AuditEventsService) GetProjectAuditEvent(pid interface{}, event int, options ...RequestOptionFunc) (*AuditEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/audit_events/%d", PathEscape(project), event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ae := new(AuditEvent) - resp, err := s.client.Do(req, ae) - if err != nil { - return nil, resp, err - } - - return ae, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/avatar.go b/vendor/github.com/xanzy/go-gitlab/avatar.go deleted file mode 100644 index 1a7b923f3d..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/avatar.go +++ /dev/null @@ -1,64 +0,0 @@ -// -// Copyright 2021, Pavel Kostohrys -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "net/http" -) - -// AvatarRequestsService handles communication with the avatar related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/avatar.html -type AvatarRequestsService struct { - client *Client -} - -// Avatar represents a GitLab avatar. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/avatar.html -type Avatar struct { - AvatarURL string `json:"avatar_url"` -} - -// GetAvatarOptions represents the available GetAvatar() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/avatar.html#get-a-single-avatar-url -type GetAvatarOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` - Size *int `url:"size,omitempty" json:"size,omitempty"` -} - -// GetAvatar gets the avatar URL for a user with the given email address. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/avatar.html#get-a-single-avatar-url -func (s *AvatarRequestsService) GetAvatar(opt *GetAvatarOptions, options ...RequestOptionFunc) (*Avatar, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "avatar", opt, options) - if err != nil { - return nil, nil, err - } - - avatar := new(Avatar) - response, err := s.client.Do(req, avatar) - if err != nil { - return nil, response, err - } - - return avatar, response, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/award_emojis.go b/vendor/github.com/xanzy/go-gitlab/award_emojis.go deleted file mode 100644 index f7673a3a5a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/award_emojis.go +++ /dev/null @@ -1,468 +0,0 @@ -// -// Copyright 2021, Arkbriar -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// AwardEmojiService handles communication with the emoji awards related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/award_emoji.html -type AwardEmojiService struct { - client *Client -} - -// AwardEmoji represents a GitLab Award Emoji. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/award_emoji.html -type AwardEmoji struct { - ID int `json:"id"` - Name string `json:"name"` - User struct { - Name string `json:"name"` - Username string `json:"username"` - ID int `json:"id"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"user"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - AwardableID int `json:"awardable_id"` - AwardableType string `json:"awardable_type"` -} - -const ( - awardMergeRequest = "merge_requests" - awardIssue = "issues" - awardSnippets = "snippets" -) - -// ListAwardEmojiOptions represents the available options for listing emoji -// for each resources -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html -type ListAwardEmojiOptions ListOptions - -// ListMergeRequestAwardEmoji gets a list of all award emoji on the merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#list-an-awardables-award-emojis -func (s *AwardEmojiService) ListMergeRequestAwardEmoji(pid interface{}, mergeRequestIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - return s.listAwardEmoji(pid, awardMergeRequest, mergeRequestIID, opt, options...) -} - -// ListIssueAwardEmoji gets a list of all award emoji on the issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#list-an-awardables-award-emojis -func (s *AwardEmojiService) ListIssueAwardEmoji(pid interface{}, issueIID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - return s.listAwardEmoji(pid, awardIssue, issueIID, opt, options...) -} - -// ListSnippetAwardEmoji gets a list of all award emoji on the snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#list-an-awardables-award-emojis -func (s *AwardEmojiService) ListSnippetAwardEmoji(pid interface{}, snippetID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - return s.listAwardEmoji(pid, awardSnippets, snippetID, opt, options...) -} - -func (s *AwardEmojiService) listAwardEmoji(pid interface{}, resource string, resourceID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji", - PathEscape(project), - resource, - resourceID, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var as []*AwardEmoji - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// GetMergeRequestAwardEmoji get an award emoji from merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#get-single-award-emoji -func (s *AwardEmojiService) GetMergeRequestAwardEmoji(pid interface{}, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.getAwardEmoji(pid, awardMergeRequest, mergeRequestIID, awardID, options...) -} - -// GetIssueAwardEmoji get an award emoji from issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#get-single-award-emoji -func (s *AwardEmojiService) GetIssueAwardEmoji(pid interface{}, issueIID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.getAwardEmoji(pid, awardIssue, issueIID, awardID, options...) -} - -// GetSnippetAwardEmoji get an award emoji from snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#get-single-award-emoji -func (s *AwardEmojiService) GetSnippetAwardEmoji(pid interface{}, snippetID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.getAwardEmoji(pid, awardSnippets, snippetID, awardID, options...) -} - -func (s *AwardEmojiService) getAwardEmoji(pid interface{}, resource string, resourceID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji/%d", - PathEscape(project), - resource, - resourceID, - awardID, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// CreateAwardEmojiOptions represents the available options for awarding emoji -// for a resource -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji -type CreateAwardEmojiOptions struct { - Name string `json:"name"` -} - -// CreateMergeRequestAwardEmoji get an award emoji from merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji -func (s *AwardEmojiService) CreateMergeRequestAwardEmoji(pid interface{}, mergeRequestIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.createAwardEmoji(pid, awardMergeRequest, mergeRequestIID, opt, options...) -} - -// CreateIssueAwardEmoji get an award emoji from issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji -func (s *AwardEmojiService) CreateIssueAwardEmoji(pid interface{}, issueIID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.createAwardEmoji(pid, awardIssue, issueIID, opt, options...) -} - -// CreateSnippetAwardEmoji get an award emoji from snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji -func (s *AwardEmojiService) CreateSnippetAwardEmoji(pid interface{}, snippetID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.createAwardEmoji(pid, awardSnippets, snippetID, opt, options...) -} - -func (s *AwardEmojiService) createAwardEmoji(pid interface{}, resource string, resourceID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji", - PathEscape(project), - resource, - resourceID, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// DeleteIssueAwardEmoji delete award emoji on an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji -func (s *AwardEmojiService) DeleteIssueAwardEmoji(pid interface{}, issueIID, awardID int, options ...RequestOptionFunc) (*Response, error) { - return s.deleteAwardEmoji(pid, awardIssue, issueIID, awardID, options...) -} - -// DeleteMergeRequestAwardEmoji delete award emoji on a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji -func (s *AwardEmojiService) DeleteMergeRequestAwardEmoji(pid interface{}, mergeRequestIID, awardID int, options ...RequestOptionFunc) (*Response, error) { - return s.deleteAwardEmoji(pid, awardMergeRequest, mergeRequestIID, awardID, options...) -} - -// DeleteSnippetAwardEmoji delete award emoji on a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji -func (s *AwardEmojiService) DeleteSnippetAwardEmoji(pid interface{}, snippetID, awardID int, options ...RequestOptionFunc) (*Response, error) { - return s.deleteAwardEmoji(pid, awardSnippets, snippetID, awardID, options...) -} - -// DeleteAwardEmoji Delete an award emoji on the specified resource. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji -func (s *AwardEmojiService) deleteAwardEmoji(pid interface{}, resource string, resourceID, awardID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/award_emoji/%d", PathEscape(project), resource, - resourceID, awardID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// ListIssuesAwardEmojiOnNote gets a list of all award emoji on a note from the -// issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#list-a-comments-award-emojis -func (s *AwardEmojiService) ListIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - return s.listAwardEmojiOnNote(pid, awardIssue, issueID, noteID, opt, options...) -} - -// ListMergeRequestAwardEmojiOnNote gets a list of all award emoji on a note -// from the merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#list-a-comments-award-emojis -func (s *AwardEmojiService) ListMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - return s.listAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, opt, options...) -} - -// ListSnippetAwardEmojiOnNote gets a list of all award emoji on a note from the -// snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#list-a-comments-award-emojis -func (s *AwardEmojiService) ListSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - return s.listAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, opt, options...) -} - -func (s *AwardEmojiService) listAwardEmojiOnNote(pid interface{}, resources string, ressourceID, noteID int, opt *ListAwardEmojiOptions, options ...RequestOptionFunc) ([]*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji", PathEscape(project), resources, - ressourceID, noteID) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var as []*AwardEmoji - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// GetIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#get-an-award-emoji-for-a-comment -func (s *AwardEmojiService) GetIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.getSingleNoteAwardEmoji(pid, awardIssue, issueID, noteID, awardID, options...) -} - -// GetMergeRequestAwardEmojiOnNote gets an award emoji on a note from a -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#get-an-award-emoji-for-a-comment -func (s *AwardEmojiService) GetMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.getSingleNoteAwardEmoji(pid, awardMergeRequest, mergeRequestIID, noteID, awardID, - options...) -} - -// GetSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#get-an-award-emoji-for-a-comment -func (s *AwardEmojiService) GetSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.getSingleNoteAwardEmoji(pid, awardSnippets, snippetIID, noteID, awardID, options...) -} - -func (s *AwardEmojiService) getSingleNoteAwardEmoji(pid interface{}, ressource string, resourceID, noteID, awardID int, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji/%d", - PathEscape(project), - ressource, - resourceID, - noteID, - awardID, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// CreateIssuesAwardEmojiOnNote gets an award emoji on a note from an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment -func (s *AwardEmojiService) CreateIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.createAwardEmojiOnNote(pid, awardIssue, issueID, noteID, opt, options...) -} - -// CreateMergeRequestAwardEmojiOnNote gets an award emoji on a note from a -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment -func (s *AwardEmojiService) CreateMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.createAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, opt, options...) -} - -// CreateSnippetAwardEmojiOnNote gets an award emoji on a note from a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment -func (s *AwardEmojiService) CreateSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - return s.createAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, opt, options...) -} - -// CreateAwardEmojiOnNote award emoji on a note. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#award-a-new-emoji-on-a-comment -func (s *AwardEmojiService) createAwardEmojiOnNote(pid interface{}, resource string, resourceID, noteID int, opt *CreateAwardEmojiOptions, options ...RequestOptionFunc) (*AwardEmoji, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji", - PathEscape(project), - resource, - resourceID, - noteID, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - a := new(AwardEmoji) - resp, err := s.client.Do(req, &a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// DeleteIssuesAwardEmojiOnNote deletes an award emoji on a note from an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji-from-a-comment -func (s *AwardEmojiService) DeleteIssuesAwardEmojiOnNote(pid interface{}, issueID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { - return s.deleteAwardEmojiOnNote(pid, awardIssue, issueID, noteID, awardID, options...) -} - -// DeleteMergeRequestAwardEmojiOnNote deletes an award emoji on a note from a -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji-from-a-comment -func (s *AwardEmojiService) DeleteMergeRequestAwardEmojiOnNote(pid interface{}, mergeRequestIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { - return s.deleteAwardEmojiOnNote(pid, awardMergeRequest, mergeRequestIID, noteID, awardID, - options...) -} - -// DeleteSnippetAwardEmojiOnNote deletes an award emoji on a note from a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/award_emoji.html#delete-an-award-emoji-from-a-comment -func (s *AwardEmojiService) DeleteSnippetAwardEmojiOnNote(pid interface{}, snippetIID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { - return s.deleteAwardEmojiOnNote(pid, awardSnippets, snippetIID, noteID, awardID, options...) -} - -func (s *AwardEmojiService) deleteAwardEmojiOnNote(pid interface{}, resource string, resourceID, noteID, awardID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/notes/%d/award_emoji/%d", - PathEscape(project), - resource, - resourceID, - noteID, - awardID, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/boards.go b/vendor/github.com/xanzy/go-gitlab/boards.go deleted file mode 100644 index 22e2cd7d9a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/boards.go +++ /dev/null @@ -1,367 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// IssueBoardsService handles communication with the issue board related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html -type IssueBoardsService struct { - client *Client -} - -// IssueBoard represents a GitLab issue board. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html -type IssueBoard struct { - ID int `json:"id"` - Name string `json:"name"` - Project *Project `json:"project"` - Milestone *Milestone `json:"milestone"` - Assignee *struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"assignee"` - Lists []*BoardList `json:"lists"` - Weight int `json:"weight"` - Labels []*LabelDetails `json:"labels"` -} - -func (b IssueBoard) String() string { - return Stringify(b) -} - -// BoardList represents a GitLab board list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html -type BoardList struct { - ID int `json:"id"` - Assignee *struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - } `json:"assignee"` - Iteration *ProjectIteration `json:"iteration"` - Label *Label `json:"label"` - MaxIssueCount int `json:"max_issue_count"` - MaxIssueWeight int `json:"max_issue_weight"` - Milestone *Milestone `json:"milestone"` - Position int `json:"position"` -} - -func (b BoardList) String() string { - return Stringify(b) -} - -// CreateIssueBoardOptions represents the available CreateIssueBoard() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-an-issue-board -type CreateIssueBoardOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// CreateIssueBoard creates a new issue board. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-an-issue-board -func (s *IssueBoardsService) CreateIssueBoard(pid interface{}, opt *CreateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - board := new(IssueBoard) - resp, err := s.client.Do(req, board) - if err != nil { - return nil, resp, err - } - - return board, resp, nil -} - -// UpdateIssueBoardOptions represents the available UpdateIssueBoard() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#update-an-issue-board -type UpdateIssueBoardOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` -} - -// UpdateIssueBoard update an issue board. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#update-an-issue-board -func (s *IssueBoardsService) UpdateIssueBoard(pid interface{}, board int, opt *UpdateIssueBoardOptions, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssueBoard) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// DeleteIssueBoard deletes an issue board. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#delete-an-issue-board -func (s *IssueBoardsService) DeleteIssueBoard(pid interface{}, board int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListIssueBoardsOptions represents the available ListIssueBoards() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-project-issue-boards -type ListIssueBoardsOptions ListOptions - -// ListIssueBoards gets a list of all issue boards in a project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-project-issue-boards -func (s *IssueBoardsService) ListIssueBoards(pid interface{}, opt *ListIssueBoardsOptions, options ...RequestOptionFunc) ([]*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var is []*IssueBoard - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// GetIssueBoard gets a single issue board of a project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#show-a-single-issue-board -func (s *IssueBoardsService) GetIssueBoard(pid interface{}, board int, options ...RequestOptionFunc) (*IssueBoard, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ib := new(IssueBoard) - resp, err := s.client.Do(req, ib) - if err != nil { - return nil, resp, err - } - - return ib, resp, nil -} - -// GetIssueBoardListsOptions represents the available GetIssueBoardLists() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-board-lists-in-a-project-issue-board -type GetIssueBoardListsOptions ListOptions - -// GetIssueBoardLists gets a list of the issue board's lists. Does not include -// backlog and closed lists. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#list-board-lists-in-a-project-issue-board -func (s *IssueBoardsService) GetIssueBoardLists(pid interface{}, board int, opt *GetIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var bl []*BoardList - resp, err := s.client.Do(req, &bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil -} - -// GetIssueBoardList gets a single issue board list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#show-a-single-board-list -func (s *IssueBoardsService) GetIssueBoardList(pid interface{}, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", - PathEscape(project), - board, - list, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - bl := new(BoardList) - resp, err := s.client.Do(req, bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil -} - -// CreateIssueBoardListOptions represents the available CreateIssueBoardList() -// options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-a-board-list -type CreateIssueBoardListOptions struct { - LabelID *int `url:"label_id,omitempty" json:"label_id,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` -} - -// CreateIssueBoardList creates a new issue board list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#create-a-board-list -func (s *IssueBoardsService) CreateIssueBoardList(pid interface{}, board int, opt *CreateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists", PathEscape(project), board) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - bl := new(BoardList) - resp, err := s.client.Do(req, bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil -} - -// UpdateIssueBoardListOptions represents the available UpdateIssueBoardList() -// options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#reorder-a-list-in-a-board -type UpdateIssueBoardListOptions struct { - Position *int `url:"position" json:"position"` -} - -// UpdateIssueBoardList updates the position of an existing issue board list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/boards.html#reorder-a-list-in-a-board -func (s *IssueBoardsService) UpdateIssueBoardList(pid interface{}, board, list int, opt *UpdateIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", - PathEscape(project), - board, - list, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - bl := new(BoardList) - resp, err := s.client.Do(req, bl) - if err != nil { - return nil, resp, err - } - - return bl, resp, nil -} - -// DeleteIssueBoardList soft deletes an issue board list. Only for admins and -// project owners. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/boards.html#delete-a-board-list-from-a-board -func (s *IssueBoardsService) DeleteIssueBoardList(pid interface{}, board, list int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/boards/%d/lists/%d", - PathEscape(project), - board, - list, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/branches.go b/vendor/github.com/xanzy/go-gitlab/branches.go deleted file mode 100644 index 2ff5b81ea8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/branches.go +++ /dev/null @@ -1,252 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// BranchesService handles communication with the branch related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/branches.html -type BranchesService struct { - client *Client -} - -// Branch represents a GitLab branch. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/branches.html -type Branch struct { - Commit *Commit `json:"commit"` - Name string `json:"name"` - Protected bool `json:"protected"` - Merged bool `json:"merged"` - Default bool `json:"default"` - CanPush bool `json:"can_push"` - DevelopersCanPush bool `json:"developers_can_push"` - DevelopersCanMerge bool `json:"developers_can_merge"` - WebURL string `json:"web_url"` -} - -func (b Branch) String() string { - return Stringify(b) -} - -// ListBranchesOptions represents the available ListBranches() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#list-repository-branches -type ListBranchesOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` - Regex *string `url:"regex,omitempty" json:"regex,omitempty"` -} - -// ListBranches gets a list of repository branches from a project, sorted by -// name alphabetically. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#list-repository-branches -func (s *BranchesService) ListBranches(pid interface{}, opts *ListBranchesOptions, options ...RequestOptionFunc) ([]*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var b []*Branch - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// GetBranch gets a single project repository branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#get-single-repository-branch -func (s *BranchesService) GetBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// ProtectBranchOptions represents the available ProtectBranch() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#protect-repository-branch -type ProtectBranchOptions struct { - DevelopersCanPush *bool `url:"developers_can_push,omitempty" json:"developers_can_push,omitempty"` - DevelopersCanMerge *bool `url:"developers_can_merge,omitempty" json:"developers_can_merge,omitempty"` -} - -// ProtectBranch protects a single project repository branch. This is an -// idempotent function, protecting an already protected repository branch -// still returns a 200 OK status code. -// -// Deprecated: This endpoint has been replaced by -// ProtectedBranchesService.ProtectRepositoryBranches() -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#protect-repository-branch -func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches/%s/protect", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodPut, u, opts, options) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// UnprotectBranch unprotects a single project repository branch. This is an -// idempotent function, unprotecting an already unprotected repository branch -// still returns a 200 OK status code. -// -// Deprecated: This endpoint has been replaced by -// ProtectedBranchesService.UnprotectRepositoryBranches() -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#unprotect-repository-branch -func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches/%s/unprotect", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// CreateBranchOptions represents the available CreateBranch() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#create-repository-branch -type CreateBranchOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -// CreateBranch creates branch from commit SHA or existing branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#create-repository-branch -func (s *BranchesService) CreateBranch(pid interface{}, opt *CreateBranchOptions, options ...RequestOptionFunc) (*Branch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - b := new(Branch) - resp, err := s.client.Do(req, b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// DeleteBranch deletes an existing branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#delete-repository-branch -func (s *BranchesService) DeleteBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteMergedBranches deletes all branches that are merged into the project's default branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/branches.html#delete-merged-branches -func (s *BranchesService) DeleteMergedBranches(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/merged_branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go b/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go deleted file mode 100644 index 3d0c61d9fc..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/broadcast_messages.go +++ /dev/null @@ -1,191 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// BroadcastMessagesService handles communication with the broadcast -// messages methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/broadcast_messages.html -type BroadcastMessagesService struct { - client *Client -} - -// BroadcastMessage represents a GitLab issue board. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages -type BroadcastMessage struct { - Message string `json:"message"` - StartsAt *time.Time `json:"starts_at"` - EndsAt *time.Time `json:"ends_at"` - Font string `json:"font"` - ID int `json:"id"` - Active bool `json:"active"` - TargetAccessLevels []AccessLevelValue `json:"target_access_levels"` - TargetPath string `json:"target_path"` - BroadcastType string `json:"broadcast_type"` - Dismissable bool `json:"dismissable"` - - // Deprecated: This parameter was removed in GitLab 15.6. - Color string `json:"color"` -} - -// ListBroadcastMessagesOptions represents the available ListBroadcastMessages() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages -type ListBroadcastMessagesOptions ListOptions - -// ListBroadcastMessages gets a list of all broadcasted messages. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-all-broadcast-messages -func (s *BroadcastMessagesService) ListBroadcastMessages(opt *ListBroadcastMessagesOptions, options ...RequestOptionFunc) ([]*BroadcastMessage, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "broadcast_messages", opt, options) - if err != nil { - return nil, nil, err - } - - var bs []*BroadcastMessage - resp, err := s.client.Do(req, &bs) - if err != nil { - return nil, resp, err - } - - return bs, resp, nil -} - -// GetBroadcastMessage gets a single broadcast message. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#get-a-specific-broadcast-message -func (s *BroadcastMessagesService) GetBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { - u := fmt.Sprintf("broadcast_messages/%d", broadcast) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - b := new(BroadcastMessage) - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// CreateBroadcastMessageOptions represents the available CreateBroadcastMessage() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#create-a-broadcast-message -type CreateBroadcastMessageOptions struct { - Message *string `url:"message" json:"message"` - StartsAt *time.Time `url:"starts_at,omitempty" json:"starts_at,omitempty"` - EndsAt *time.Time `url:"ends_at,omitempty" json:"ends_at,omitempty"` - Font *string `url:"font,omitempty" json:"font,omitempty"` - TargetAccessLevels []AccessLevelValue `url:"target_access_levels,omitempty" json:"target_access_levels,omitempty"` - TargetPath *string `url:"target_path,omitempty" json:"target_path,omitempty"` - BroadcastType *string `url:"broadcast_type,omitempty" json:"broadcast_type,omitempty"` - Dismissable *bool `url:"dismissable,omitempty" json:"dismissable,omitempty"` - - // Deprecated: This parameter was removed in GitLab 15.6. - Color *string `url:"color,omitempty" json:"color,omitempty"` -} - -// CreateBroadcastMessage creates a message to broadcast. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#create-a-broadcast-message -func (s *BroadcastMessagesService) CreateBroadcastMessage(opt *CreateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "broadcast_messages", opt, options) - if err != nil { - return nil, nil, err - } - - b := new(BroadcastMessage) - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// UpdateBroadcastMessageOptions represents the available CreateBroadcastMessage() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#update-a-broadcast-message -type UpdateBroadcastMessageOptions struct { - Message *string `url:"message,omitempty" json:"message,omitempty"` - StartsAt *time.Time `url:"starts_at,omitempty" json:"starts_at,omitempty"` - EndsAt *time.Time `url:"ends_at,omitempty" json:"ends_at,omitempty"` - Font *string `url:"font,omitempty" json:"font,omitempty"` - TargetAccessLevels []AccessLevelValue `url:"target_access_levels,omitempty" json:"target_access_levels,omitempty"` - TargetPath *string `url:"target_path,omitempty" json:"target_path,omitempty"` - BroadcastType *string `url:"broadcast_type,omitempty" json:"broadcast_type,omitempty"` - Dismissable *bool `url:"dismissable,omitempty" json:"dismissable,omitempty"` - - // Deprecated: This parameter was removed in GitLab 15.6. - Color *string `url:"color,omitempty" json:"color,omitempty"` -} - -// UpdateBroadcastMessage update a broadcasted message. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#update-a-broadcast-message -func (s *BroadcastMessagesService) UpdateBroadcastMessage(broadcast int, opt *UpdateBroadcastMessageOptions, options ...RequestOptionFunc) (*BroadcastMessage, *Response, error) { - u := fmt.Sprintf("broadcast_messages/%d", broadcast) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - b := new(BroadcastMessage) - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b, resp, nil -} - -// DeleteBroadcastMessage deletes a broadcasted message. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/broadcast_messages.html#delete-a-broadcast-message -func (s *BroadcastMessagesService) DeleteBroadcastMessage(broadcast int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("broadcast_messages/%d", broadcast) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go b/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go deleted file mode 100644 index 992fe11802..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/ci_yml_templates.go +++ /dev/null @@ -1,95 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// CIYMLTemplatesService handles communication with the gitlab -// CI YML templates related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html -type CIYMLTemplatesService struct { - client *Client -} - -// CIYMLTemplate represents a GitLab CI YML template. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html -type CIYMLTemplate struct { - Name string `json:"name"` - Content string `json:"content"` -} - -// CIYMLTemplateListItem represents a GitLab CI YML template from the list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html -type CIYMLTemplateListItem struct { - Key string `json:"key"` - Name string `json:"name"` -} - -// ListCIYMLTemplatesOptions represents the available ListAllTemplates() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html#list-gitlab-ci-yaml-templates -type ListCIYMLTemplatesOptions ListOptions - -// ListAllTemplates get all GitLab CI YML templates. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html#list-gitlab-ci-yaml-templates -func (s *CIYMLTemplatesService) ListAllTemplates(opt *ListCIYMLTemplatesOptions, options ...RequestOptionFunc) ([]*CIYMLTemplateListItem, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/gitlab_ci_ymls", opt, options) - if err != nil { - return nil, nil, err - } - - var cts []*CIYMLTemplateListItem - resp, err := s.client.Do(req, &cts) - if err != nil { - return nil, resp, err - } - - return cts, resp, nil -} - -// GetTemplate get a single GitLab CI YML template. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitlab_ci_ymls.html#single-gitlab-ci-yaml-template -func (s *CIYMLTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*CIYMLTemplate, *Response, error) { - u := fmt.Sprintf("templates/gitlab_ci_ymls/%s", PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ct := new(CIYMLTemplate) - resp, err := s.client.Do(req, ct) - if err != nil { - return nil, resp, err - } - - return ct, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/client_options.go b/vendor/github.com/xanzy/go-gitlab/client_options.go deleted file mode 100644 index 2ff7bab9bf..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/client_options.go +++ /dev/null @@ -1,142 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "net/http" - "time" - - retryablehttp "github.com/hashicorp/go-retryablehttp" -) - -// ClientOptionFunc can be used to customize a new GitLab API client. -type ClientOptionFunc func(*Client) error - -// WithBaseURL sets the base URL for API requests to a custom endpoint. -func WithBaseURL(urlStr string) ClientOptionFunc { - return func(c *Client) error { - return c.setBaseURL(urlStr) - } -} - -// WithCustomBackoff can be used to configure a custom backoff policy. -func WithCustomBackoff(backoff retryablehttp.Backoff) ClientOptionFunc { - return func(c *Client) error { - c.client.Backoff = backoff - return nil - } -} - -// WithCustomLeveledLogger can be used to configure a custom retryablehttp -// leveled logger. -func WithCustomLeveledLogger(leveledLogger retryablehttp.LeveledLogger) ClientOptionFunc { - return func(c *Client) error { - c.client.Logger = leveledLogger - return nil - } -} - -// WithCustomLimiter injects a custom rate limiter to the client. -func WithCustomLimiter(limiter RateLimiter) ClientOptionFunc { - return func(c *Client) error { - c.configureLimiterOnce.Do(func() {}) - c.limiter = limiter - return nil - } -} - -// WithCustomLogger can be used to configure a custom retryablehttp logger. -func WithCustomLogger(logger retryablehttp.Logger) ClientOptionFunc { - return func(c *Client) error { - c.client.Logger = logger - return nil - } -} - -// WithCustomRetry can be used to configure a custom retry policy. -func WithCustomRetry(checkRetry retryablehttp.CheckRetry) ClientOptionFunc { - return func(c *Client) error { - c.client.CheckRetry = checkRetry - return nil - } -} - -// WithCustomRetryMax can be used to configure a custom maximum number of retries. -func WithCustomRetryMax(retryMax int) ClientOptionFunc { - return func(c *Client) error { - c.client.RetryMax = retryMax - return nil - } -} - -// WithCustomRetryWaitMinMax can be used to configure a custom minimum and -// maximum time to wait between retries. -func WithCustomRetryWaitMinMax(waitMin, waitMax time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.client.RetryWaitMin = waitMin - c.client.RetryWaitMax = waitMax - return nil - } -} - -// WithErrorHandler can be used to configure a custom error handler. -func WithErrorHandler(handler retryablehttp.ErrorHandler) ClientOptionFunc { - return func(c *Client) error { - c.client.ErrorHandler = handler - return nil - } -} - -// WithHTTPClient can be used to configure a custom HTTP client. -func WithHTTPClient(httpClient *http.Client) ClientOptionFunc { - return func(c *Client) error { - c.client.HTTPClient = httpClient - return nil - } -} - -// WithRequestLogHook can be used to configure a custom request log hook. -func WithRequestLogHook(hook retryablehttp.RequestLogHook) ClientOptionFunc { - return func(c *Client) error { - c.client.RequestLogHook = hook - return nil - } -} - -// WithResponseLogHook can be used to configure a custom response log hook. -func WithResponseLogHook(hook retryablehttp.ResponseLogHook) ClientOptionFunc { - return func(c *Client) error { - c.client.ResponseLogHook = hook - return nil - } -} - -// WithoutRetries disables the default retry logic. -func WithoutRetries() ClientOptionFunc { - return func(c *Client) error { - c.disableRetries = true - return nil - } -} - -// WithRequestOptions can be used to configure default request options applied to every request. -func WithRequestOptions(options ...RequestOptionFunc) ClientOptionFunc { - return func(c *Client) error { - c.defaultRequestOptions = append(c.defaultRequestOptions, options...) - return nil - } -} diff --git a/vendor/github.com/xanzy/go-gitlab/cluster_agents.go b/vendor/github.com/xanzy/go-gitlab/cluster_agents.go deleted file mode 100644 index 1153feae68..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/cluster_agents.go +++ /dev/null @@ -1,294 +0,0 @@ -// -// Copyright 2022, Timo Furrer -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ClusterAgentsService handles communication with the cluster agents related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/cluster_agents.html -type ClusterAgentsService struct { - client *Client -} - -// Agent represents a GitLab agent for Kubernetes. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/cluster_agents.html -type Agent struct { - ID int `json:"id"` - Name string `json:"name"` - CreatedAt *time.Time `json:"created_at"` - CreatedByUserID int `json:"created_by_user_id"` - ConfigProject ConfigProject `json:"config_project"` -} - -type ConfigProject struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` -} - -func (a Agent) String() string { - return Stringify(a) -} - -// AgentToken represents a GitLab agent token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent -type AgentToken struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AgentID int `json:"agent_id"` - Status string `json:"status"` - CreatedAt *time.Time `json:"created_at"` - CreatedByUserID int `json:"created_by_user_id"` - LastUsedAt *time.Time `json:"last_used_at"` - Token string `json:"token"` -} - -func (a AgentToken) String() string { - return Stringify(a) -} - -// ListAgentsOptions represents the available ListAgents() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#list-the-agents-for-a-project -type ListAgentsOptions ListOptions - -// ListAgents returns a list of agents registered for the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#list-the-agents-for-a-project -func (s *ClusterAgentsService) ListAgents(pid interface{}, opt *ListAgentsOptions, options ...RequestOptionFunc) ([]*Agent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) - if err != nil { - return nil, nil, err - } - - var as []*Agent - resp, err := s.client.Do(req, &as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// GetAgent gets a single agent details. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#get-details-about-an-agent -func (s *ClusterAgentsService) GetAgent(pid interface{}, id int, options ...RequestOptionFunc) (*Agent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(Agent) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// RegisterAgentOptions represents the available RegisterAgent() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#register-an-agent-with-a-project -type RegisterAgentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// RegisterAgent registers an agent to the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#register-an-agent-with-a-project -func (s *ClusterAgentsService) RegisterAgent(pid interface{}, opt *RegisterAgentOptions, options ...RequestOptionFunc) (*Agent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) - if err != nil { - return nil, nil, err - } - - a := new(Agent) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// DeleteAgent deletes an existing agent registration. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#delete-a-registered-agent -func (s *ClusterAgentsService) DeleteAgent(pid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListAgentTokensOptions represents the available ListAgentTokens() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent -type ListAgentTokensOptions ListOptions - -// ListAgentTokens returns a list of tokens for an agent. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent -func (s *ClusterAgentsService) ListAgentTokens(pid interface{}, aid int, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) - - req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) - if err != nil { - return nil, nil, err - } - - var ats []*AgentToken - resp, err := s.client.Do(req, &ats) - if err != nil { - return nil, resp, err - } - - return ats, resp, nil -} - -// GetAgentToken gets a single agent token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#get-a-single-agent-token -func (s *ClusterAgentsService) GetAgentToken(pid interface{}, aid int, id int, options ...RequestOptionFunc) (*AgentToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) - - req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) - if err != nil { - return nil, nil, err - } - - at := new(AgentToken) - resp, err := s.client.Do(req, at) - if err != nil { - return nil, resp, err - } - - return at, resp, nil -} - -// CreateAgentTokenOptions represents the available CreateAgentToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#create-an-agent-token -type CreateAgentTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` -} - -// CreateAgentToken creates a new token for an agent. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#create-an-agent-token -func (s *ClusterAgentsService) CreateAgentToken(pid interface{}, aid int, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) - - req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) - if err != nil { - return nil, nil, err - } - - at := new(AgentToken) - resp, err := s.client.Do(req, at) - if err != nil { - return nil, resp, err - } - - return at, resp, nil -} - -// RevokeAgentToken revokes an agent token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/cluster_agents.html#revoke-an-agent-token -func (s *ClusterAgentsService) RevokeAgentToken(pid interface{}, aid int, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) - - req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/github.com/xanzy/go-gitlab/commits.go deleted file mode 100644 index c1a9ef3d60..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/commits.go +++ /dev/null @@ -1,610 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" - "time" -) - -// CommitsService handles communication with the commit related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html -type CommitsService struct { - client *Client -} - -// Commit represents a GitLab commit. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html -type Commit struct { - ID string `json:"id"` - ShortID string `json:"short_id"` - Title string `json:"title"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthoredDate *time.Time `json:"authored_date"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - CommittedDate *time.Time `json:"committed_date"` - CreatedAt *time.Time `json:"created_at"` - Message string `json:"message"` - ParentIDs []string `json:"parent_ids"` - Stats *CommitStats `json:"stats"` - Status *BuildStateValue `json:"status"` - LastPipeline *PipelineInfo `json:"last_pipeline"` - ProjectID int `json:"project_id"` - Trailers map[string]string `json:"trailers"` - ExtendedTrailers map[string]string `json:"extended_trailers"` - WebURL string `json:"web_url"` -} - -// CommitStats represents the number of added and deleted files in a commit. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html -type CommitStats struct { - Additions int `json:"additions"` - Deletions int `json:"deletions"` - Total int `json:"total"` -} - -func (c Commit) String() string { - return Stringify(c) -} - -// ListCommitsOptions represents the available ListCommits() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-repository-commits -type ListCommitsOptions struct { - ListOptions - RefName *string `url:"ref_name,omitempty" json:"ref_name,omitempty"` - Since *time.Time `url:"since,omitempty" json:"since,omitempty"` - Until *time.Time `url:"until,omitempty" json:"until,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Author *string `url:"author,omitempty" json:"author,omitempty"` - All *bool `url:"all,omitempty" json:"all,omitempty"` - WithStats *bool `url:"with_stats,omitempty" json:"with_stats,omitempty"` - FirstParent *bool `url:"first_parent,omitempty" json:"first_parent,omitempty"` - Trailers *bool `url:"trailers,omitempty" json:"trailers,omitempty"` -} - -// ListCommits gets a list of repository commits in a project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-repository-commits -func (s *CommitsService) ListCommits(pid interface{}, opt *ListCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*Commit - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// CommitRef represents the reference of branches/tags in a commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-references-a-commit-is-pushed-to -type CommitRef struct { - Type string `json:"type"` - Name string `json:"name"` -} - -// GetCommitRefsOptions represents the available GetCommitRefs() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-references-a-commit-is-pushed-to -type GetCommitRefsOptions struct { - ListOptions - Type *string `url:"type,omitempty" json:"type,omitempty"` -} - -// GetCommitRefs gets all references (from branches or tags) a commit is pushed to -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-references-a-commit-is-pushed-to -func (s *CommitsService) GetCommitRefs(pid interface{}, sha string, opt *GetCommitRefsOptions, options ...RequestOptionFunc) ([]*CommitRef, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/refs", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*CommitRef - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// GetCommitOptions represents the available GetCommit() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit -type GetCommitOptions struct { - Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` -} - -// GetCommit gets a specific commit identified by the commit hash or name of a -// branch or tag. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#get-a-single-commit -func (s *CommitsService) GetCommit(pid interface{}, sha string, opt *GetCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - if sha == "" { - return nil, nil, fmt.Errorf("SHA must be a non-empty string") - } - u := fmt.Sprintf("projects/%s/repository/commits/%s", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// CreateCommitOptions represents the available options for a new commit. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions -type CreateCommitOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` - StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` - StartSHA *string `url:"start_sha,omitempty" json:"start_sha,omitempty"` - StartProject *string `url:"start_project,omitempty" json:"start_project,omitempty"` - Actions []*CommitActionOptions `url:"actions" json:"actions"` - AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` - AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` - Stats *bool `url:"stats,omitempty" json:"stats,omitempty"` - Force *bool `url:"force,omitempty" json:"force,omitempty"` -} - -// CommitActionOptions represents the available options for a new single -// file action. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions -type CommitActionOptions struct { - Action *FileActionValue `url:"action,omitempty" json:"action,omitempty"` - FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"` - PreviousPath *string `url:"previous_path,omitempty" json:"previous_path,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"` - LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"` - ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` -} - -// CreateCommit creates a commit with multiple files and actions. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions -func (s *CommitsService) CreateCommit(pid interface{}, opt *CreateCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// Diff represents a GitLab diff. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html -type Diff struct { - Diff string `json:"diff"` - NewPath string `json:"new_path"` - OldPath string `json:"old_path"` - AMode string `json:"a_mode"` - BMode string `json:"b_mode"` - NewFile bool `json:"new_file"` - RenamedFile bool `json:"renamed_file"` - DeletedFile bool `json:"deleted_file"` -} - -func (d Diff) String() string { - return Stringify(d) -} - -// GetCommitDiffOptions represents the available GetCommitDiff() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-the-diff-of-a-commit -type GetCommitDiffOptions struct { - ListOptions - Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` -} - -// GetCommitDiff gets the diff of a commit in a project.. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-the-diff-of-a-commit -func (s *CommitsService) GetCommitDiff(pid interface{}, sha string, opt *GetCommitDiffOptions, options ...RequestOptionFunc) ([]*Diff, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/diff", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var d []*Diff - resp, err := s.client.Do(req, &d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CommitComment represents a GitLab commit comment. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html -type CommitComment struct { - Note string `json:"note"` - Path string `json:"path"` - Line int `json:"line"` - LineType string `json:"line_type"` - Author Author `json:"author"` -} - -// Author represents a GitLab commit author -type Author struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - Blocked bool `json:"blocked"` - CreatedAt *time.Time `json:"created_at"` -} - -func (c CommitComment) String() string { - return Stringify(c) -} - -// GetCommitCommentsOptions represents the available GetCommitComments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-the-comments-of-a-commit -type GetCommitCommentsOptions ListOptions - -// GetCommitComments gets the comments of a commit in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-the-comments-of-a-commit -func (s *CommitsService) GetCommitComments(pid interface{}, sha string, opt *GetCommitCommentsOptions, options ...RequestOptionFunc) ([]*CommitComment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/comments", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*CommitComment - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// PostCommitCommentOptions represents the available PostCommitComment() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#post-comment-to-commit -type PostCommitCommentOptions struct { - Note *string `url:"note,omitempty" json:"note,omitempty"` - Path *string `url:"path" json:"path"` - Line *int `url:"line" json:"line"` - LineType *string `url:"line_type" json:"line_type"` -} - -// PostCommitComment adds a comment to a commit. Optionally you can post -// comments on a specific line of a commit. Therefor both path, line_new and -// line_old are required. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#post-comment-to-commit -func (s *CommitsService) PostCommitComment(pid interface{}, sha string, opt *PostCommitCommentOptions, options ...RequestOptionFunc) (*CommitComment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/comments", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(CommitComment) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// GetCommitStatusesOptions represents the available GetCommitStatuses() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-the-statuses-of-a-commit -type GetCommitStatusesOptions struct { - ListOptions - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - Stage *string `url:"stage,omitempty" json:"stage,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - All *bool `url:"all,omitempty" json:"all,omitempty"` -} - -// CommitStatus represents a GitLab commit status. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#commit-status -type CommitStatus struct { - ID int `json:"id"` - SHA string `json:"sha"` - Ref string `json:"ref"` - Status string `json:"status"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - Name string `json:"name"` - AllowFailure bool `json:"allow_failure"` - Coverage float64 `json:"coverage"` - PipelineId int `json:"pipeline_id"` - Author Author `json:"author"` - Description string `json:"description"` - TargetURL string `json:"target_url"` -} - -// GetCommitStatuses gets the statuses of a commit in a project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#list-the-statuses-of-a-commit -func (s *CommitsService) GetCommitStatuses(pid interface{}, sha string, opt *GetCommitStatusesOptions, options ...RequestOptionFunc) ([]*CommitStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/statuses", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*CommitStatus - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// SetCommitStatusOptions represents the available SetCommitStatus() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#set-the-pipeline-status-of-a-commit -type SetCommitStatusOptions struct { - State BuildStateValue `url:"state" json:"state"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Context *string `url:"context,omitempty" json:"context,omitempty"` - TargetURL *string `url:"target_url,omitempty" json:"target_url,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Coverage *float64 `url:"coverage,omitempty" json:"coverage,omitempty"` - PipelineID *int `url:"pipeline_id,omitempty" json:"pipeline_id,omitempty"` -} - -// SetCommitStatus sets the status of a commit in a project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#set-the-pipeline-status-of-a-commit -func (s *CommitsService) SetCommitStatus(pid interface{}, sha string, opt *SetCommitStatusOptions, options ...RequestOptionFunc) (*CommitStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/statuses/%s", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - cs := new(CommitStatus) - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// ListMergeRequestsByCommit gets merge request associated with a commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#list-merge-requests-associated-with-a-commit -func (s *CommitsService) ListMergeRequestsByCommit(pid interface{}, sha string, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/merge_requests", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MergeRequest - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil -} - -// CherryPickCommitOptions represents the available CherryPickCommit() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#cherry-pick-a-commit -type CherryPickCommitOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` - Message *string `url:"message,omitempty" json:"message,omitempty"` -} - -// CherryPickCommit cherry picks a commit to a given branch. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#cherry-pick-a-commit -func (s *CommitsService) CherryPickCommit(pid interface{}, sha string, opt *CherryPickCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/cherry_pick", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// RevertCommitOptions represents the available RevertCommit() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#revert-a-commit -type RevertCommitOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` -} - -// RevertCommit reverts a commit in a given branch. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#revert-a-commit -func (s *CommitsService) RevertCommit(pid interface{}, sha string, opt *RevertCommitOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/revert", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// GPGSignature represents a Gitlab commit's GPG Signature. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#get-gpg-signature-of-a-commit -type GPGSignature struct { - KeyID int `json:"gpg_key_id"` - KeyPrimaryKeyID string `json:"gpg_key_primary_keyid"` - KeyUserName string `json:"gpg_key_user_name"` - KeyUserEmail string `json:"gpg_key_user_email"` - VerificationStatus string `json:"verification_status"` - KeySubkeyID int `json:"gpg_key_subkey_id"` -} - -// GetGPGSignature gets a GPG signature of a commit. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/commits.html#get-gpg-signature-of-a-commit -func (s *CommitsService) GetGPGSignature(pid interface{}, sha string, options ...RequestOptionFunc) (*GPGSignature, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/signature", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - sig := new(GPGSignature) - resp, err := s.client.Do(req, &sig) - if err != nil { - return nil, resp, err - } - - return sig, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/container_registry.go b/vendor/github.com/xanzy/go-gitlab/container_registry.go deleted file mode 100644 index bec477df65..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/container_registry.go +++ /dev/null @@ -1,311 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ContainerRegistryService handles communication with the container registry -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/container_registry.html -type ContainerRegistryService struct { - client *Client -} - -// RegistryRepository represents a GitLab content registry repository. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/container_registry.html -type RegistryRepository struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - ProjectID int `json:"project_id"` - Location string `json:"location"` - CreatedAt *time.Time `json:"created_at"` - CleanupPolicyStartedAt *time.Time `json:"cleanup_policy_started_at"` - Status *ContainerRegistryStatus `json:"status"` - TagsCount int `json:"tags_count"` - Tags []*RegistryRepositoryTag `json:"tags"` -} - -func (s RegistryRepository) String() string { - return Stringify(s) -} - -// RegistryRepositoryTag represents a GitLab registry image tag. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/container_registry.html -type RegistryRepositoryTag struct { - Name string `json:"name"` - Path string `json:"path"` - Location string `json:"location"` - Revision string `json:"revision"` - ShortRevision string `json:"short_revision"` - Digest string `json:"digest"` - CreatedAt *time.Time `json:"created_at"` - TotalSize int `json:"total_size"` -} - -func (s RegistryRepositoryTag) String() string { - return Stringify(s) -} - -// ListRegistryRepositoriesOptions represents the available -// ListRegistryRepositories() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repositories -type ListRegistryRepositoriesOptions struct { - ListOptions - - // Deprecated: These options are deprecated for ListGroupRegistryRepositories calls. (Removed in GitLab 15.0) - Tags *bool `url:"tags,omitempty" json:"tags,omitempty"` - TagsCount *bool `url:"tags_count,omitempty" json:"tags_count,omitempty"` -} - -// ListProjectRegistryRepositories gets a list of registry repositories in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#within-a-project -func (s *ContainerRegistryService) ListProjectRegistryRepositories(pid interface{}, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var repos []*RegistryRepository - resp, err := s.client.Do(req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// ListGroupRegistryRepositories gets a list of registry repositories in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#within-a-group -func (s *ContainerRegistryService) ListGroupRegistryRepositories(gid interface{}, opt *ListRegistryRepositoriesOptions, options ...RequestOptionFunc) ([]*RegistryRepository, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/registry/repositories", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var repos []*RegistryRepository - resp, err := s.client.Do(req, &repos) - if err != nil { - return nil, resp, err - } - - return repos, resp, nil -} - -// GetSingleRegistryRepositoryOptions represents the available -// GetSingleRegistryRepository() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#get-details-of-a-single-repository -type GetSingleRegistryRepositoryOptions struct { - Tags *bool `url:"tags,omitempty" json:"tags,omitempty"` - TagsCount *bool `url:"tags_count,omitempty" json:"tags_count,omitempty"` -} - -// GetSingleRegistryRepository gets the details of single registry repository. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#get-details-of-a-single-repository -func (s *ContainerRegistryService) GetSingleRegistryRepository(pid interface{}, opt *GetSingleRegistryRepositoryOptions, options ...RequestOptionFunc) (*RegistryRepository, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("registry/repositories/%s", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - repo := new(RegistryRepository) - resp, err := s.client.Do(req, repo) - if err != nil { - return nil, resp, err - } - - return repo, resp, nil -} - -// DeleteRegistryRepository deletes a repository in a registry. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#delete-registry-repository -func (s *ContainerRegistryService) DeleteRegistryRepository(pid interface{}, repository int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d", PathEscape(project), repository) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListRegistryRepositoryTagsOptions represents the available -// ListRegistryRepositoryTags() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repository-tags -type ListRegistryRepositoryTagsOptions ListOptions - -// ListRegistryRepositoryTags gets a list of tags for given registry repository. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repository-tags -func (s *ContainerRegistryService) ListRegistryRepositoryTags(pid interface{}, repository int, opt *ListRegistryRepositoryTagsOptions, options ...RequestOptionFunc) ([]*RegistryRepositoryTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags", - PathEscape(project), - repository, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var tags []*RegistryRepositoryTag - resp, err := s.client.Do(req, &tags) - if err != nil { - return nil, resp, err - } - - return tags, resp, nil -} - -// GetRegistryRepositoryTagDetail get details of a registry repository tag -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#get-details-of-a-registry-repository-tag -func (s *ContainerRegistryService) GetRegistryRepositoryTagDetail(pid interface{}, repository int, tagName string, options ...RequestOptionFunc) (*RegistryRepositoryTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags/%s", - PathEscape(project), - repository, - tagName, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - tag := new(RegistryRepositoryTag) - resp, err := s.client.Do(req, &tag) - if err != nil { - return nil, resp, err - } - - return tag, resp, nil -} - -// DeleteRegistryRepositoryTag deletes a registry repository tag. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#delete-a-registry-repository-tag -func (s *ContainerRegistryService) DeleteRegistryRepositoryTag(pid interface{}, repository int, tagName string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags/%s", - PathEscape(project), - repository, - tagName, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteRegistryRepositoryTagsOptions represents the available -// DeleteRegistryRepositoryTags() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#delete-registry-repository-tags-in-bulk -type DeleteRegistryRepositoryTagsOptions struct { - NameRegexpDelete *string `url:"name_regex_delete,omitempty" json:"name_regex_delete,omitempty"` - NameRegexpKeep *string `url:"name_regex_keep,omitempty" json:"name_regex_keep,omitempty"` - KeepN *int `url:"keep_n,omitempty" json:"keep_n,omitempty"` - OlderThan *string `url:"older_than,omitempty" json:"older_than,omitempty"` - - // Deprecated: NameRegexp is deprecated in favor of NameRegexpDelete. - NameRegexp *string `url:"name_regex,omitempty" json:"name_regex,omitempty"` -} - -// DeleteRegistryRepositoryTags deletes repository tags in bulk based on -// given criteria. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#delete-registry-repository-tags-in-bulk -func (s *ContainerRegistryService) DeleteRegistryRepositoryTags(pid interface{}, repository int, opt *DeleteRegistryRepositoryTagsOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/registry/repositories/%d/tags", - PathEscape(project), - repository, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/custom_attributes.go b/vendor/github.com/xanzy/go-gitlab/custom_attributes.go deleted file mode 100644 index 244059db7c..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/custom_attributes.go +++ /dev/null @@ -1,188 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// CustomAttributesService handles communication with the group, project and -// user custom attributes related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/custom_attributes.html -type CustomAttributesService struct { - client *Client -} - -// CustomAttribute struct is used to unmarshal response to api calls. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/custom_attributes.html -type CustomAttribute struct { - Key string `json:"key"` - Value string `json:"value"` -} - -// ListCustomUserAttributes lists the custom attributes of the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#list-custom-attributes -func (s *CustomAttributesService) ListCustomUserAttributes(user int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { - return s.listCustomAttributes("users", user, options...) -} - -// ListCustomGroupAttributes lists the custom attributes of the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#list-custom-attributes -func (s *CustomAttributesService) ListCustomGroupAttributes(group int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { - return s.listCustomAttributes("groups", group, options...) -} - -// ListCustomProjectAttributes lists the custom attributes of the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#list-custom-attributes -func (s *CustomAttributesService) ListCustomProjectAttributes(project int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { - return s.listCustomAttributes("projects", project, options...) -} - -func (s *CustomAttributesService) listCustomAttributes(resource string, id int, options ...RequestOptionFunc) ([]*CustomAttribute, *Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes", resource, id) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var cas []*CustomAttribute - resp, err := s.client.Do(req, &cas) - if err != nil { - return nil, resp, err - } - return cas, resp, nil -} - -// GetCustomUserAttribute returns the user attribute with a speciifc key. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#single-custom-attribute -func (s *CustomAttributesService) GetCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - return s.getCustomAttribute("users", user, key, options...) -} - -// GetCustomGroupAttribute returns the group attribute with a speciifc key. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#single-custom-attribute -func (s *CustomAttributesService) GetCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - return s.getCustomAttribute("groups", group, key, options...) -} - -// GetCustomProjectAttribute returns the project attribute with a speciifc key. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#single-custom-attribute -func (s *CustomAttributesService) GetCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - return s.getCustomAttribute("projects", project, key, options...) -} - -func (s *CustomAttributesService) getCustomAttribute(resource string, id int, key string, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, key) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var ca *CustomAttribute - resp, err := s.client.Do(req, &ca) - if err != nil { - return nil, resp, err - } - return ca, resp, nil -} - -// SetCustomUserAttribute sets the custom attributes of the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#set-custom-attribute -func (s *CustomAttributesService) SetCustomUserAttribute(user int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - return s.setCustomAttribute("users", user, c, options...) -} - -// SetCustomGroupAttribute sets the custom attributes of the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#set-custom-attribute -func (s *CustomAttributesService) SetCustomGroupAttribute(group int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - return s.setCustomAttribute("groups", group, c, options...) -} - -// SetCustomProjectAttribute sets the custom attributes of the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#set-custom-attribute -func (s *CustomAttributesService) SetCustomProjectAttribute(project int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - return s.setCustomAttribute("projects", project, c, options...) -} - -func (s *CustomAttributesService) setCustomAttribute(resource string, id int, c CustomAttribute, options ...RequestOptionFunc) (*CustomAttribute, *Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, c.Key) - req, err := s.client.NewRequest(http.MethodPut, u, c, options) - if err != nil { - return nil, nil, err - } - - ca := new(CustomAttribute) - resp, err := s.client.Do(req, ca) - if err != nil { - return nil, resp, err - } - return ca, resp, nil -} - -// DeleteCustomUserAttribute removes the custom attribute of the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#delete-custom-attribute -func (s *CustomAttributesService) DeleteCustomUserAttribute(user int, key string, options ...RequestOptionFunc) (*Response, error) { - return s.deleteCustomAttribute("users", user, key, options...) -} - -// DeleteCustomGroupAttribute removes the custom attribute of the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#delete-custom-attribute -func (s *CustomAttributesService) DeleteCustomGroupAttribute(group int, key string, options ...RequestOptionFunc) (*Response, error) { - return s.deleteCustomAttribute("groups", group, key, options...) -} - -// DeleteCustomProjectAttribute removes the custom attribute of the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/custom_attributes.html#delete-custom-attribute -func (s *CustomAttributesService) DeleteCustomProjectAttribute(project int, key string, options ...RequestOptionFunc) (*Response, error) { - return s.deleteCustomAttribute("projects", project, key, options...) -} - -func (s *CustomAttributesService) deleteCustomAttribute(resource string, id int, key string, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("%s/%d/custom_attributes/%s", resource, id, key) - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go deleted file mode 100644 index e343bef980..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go +++ /dev/null @@ -1,275 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// DeployKeysService handles communication with the keys related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/deploy_keys.html -type DeployKeysService struct { - client *Client -} - -// InstanceDeployKey represents a GitLab deploy key with the associated -// projects it has write access to. -type InstanceDeployKey struct { - ID int `json:"id"` - Title string `json:"title"` - CreatedAt *time.Time `json:"created_at"` - Key string `json:"key"` - Fingerprint string `json:"fingerprint"` - ProjectsWithWriteAccess []*DeployKeyProject `json:"projects_with_write_access"` -} - -func (k InstanceDeployKey) String() string { - return Stringify(k) -} - -// DeployKeyProject refers to a project an InstanceDeployKey has write access to. -type DeployKeyProject struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` -} - -func (k DeployKeyProject) String() string { - return Stringify(k) -} - -// ProjectDeployKey represents a GitLab project deploy key. -type ProjectDeployKey struct { - ID int `json:"id"` - Title string `json:"title"` - Key string `json:"key"` - CreatedAt *time.Time `json:"created_at"` - CanPush bool `json:"can_push"` -} - -func (k ProjectDeployKey) String() string { - return Stringify(k) -} - -// ListProjectDeployKeysOptions represents the available ListAllDeployKeys() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#list-all-deploy-keys -type ListInstanceDeployKeysOptions struct { - ListOptions - Public *bool `url:"public,omitempty" json:"public,omitempty"` -} - -// ListAllDeployKeys gets a list of all deploy keys -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#list-all-deploy-keys -func (s *DeployKeysService) ListAllDeployKeys(opt *ListInstanceDeployKeysOptions, options ...RequestOptionFunc) ([]*InstanceDeployKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "deploy_keys", opt, options) - if err != nil { - return nil, nil, err - } - - var ks []*InstanceDeployKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil -} - -// ListProjectDeployKeysOptions represents the available ListProjectDeployKeys() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#list-deploy-keys-for-project -type ListProjectDeployKeysOptions ListOptions - -// ListProjectDeployKeys gets a list of a project's deploy keys -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#list-deploy-keys-for-project -func (s *DeployKeysService) ListProjectDeployKeys(pid interface{}, opt *ListProjectDeployKeysOptions, options ...RequestOptionFunc) ([]*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ks []*ProjectDeployKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil -} - -// GetDeployKey gets a single deploy key. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#get-a-single-deploy-key -func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// AddDeployKeyOptions represents the available ADDDeployKey() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key -type AddDeployKeyOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Key *string `url:"key,omitempty" json:"key,omitempty"` - CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` -} - -// AddDeployKey creates a new deploy key for a project. If deploy key already -// exists in another project - it will be joined to project but only if -// original one was is accessible by same user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#add-deploy-key -func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteDeployKey deletes a deploy key from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#delete-deploy-key -func (s *DeployKeysService) DeleteDeployKey(pid interface{}, deployKey int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// EnableDeployKey enables a deploy key. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#enable-a-deploy-key -func (s *DeployKeysService) EnableDeployKey(pid interface{}, deployKey int, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d/enable", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// UpdateDeployKeyOptions represents the available UpdateDeployKey() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#update-deploy-key -type UpdateDeployKeyOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"` -} - -// UpdateDeployKey updates a deploy key for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_keys.html#update-deploy-key -func (s *DeployKeysService) UpdateDeployKey(pid interface{}, deployKey int, opt *UpdateDeployKeyOptions, options ...RequestOptionFunc) (*ProjectDeployKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_keys/%d", PathEscape(project), deployKey) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(ProjectDeployKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go b/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go deleted file mode 100644 index f744b0ec90..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/deploy_tokens.go +++ /dev/null @@ -1,290 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// DeployTokensService handles communication with the deploy tokens related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/deploy_tokens.html -type DeployTokensService struct { - client *Client -} - -// DeployToken represents a GitLab deploy token. -type DeployToken struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - ExpiresAt *time.Time `json:"expires_at"` - Revoked bool `json:"revoked"` - Expired bool `json:"expired"` - Token string `json:"token,omitempty"` - Scopes []string `json:"scopes"` -} - -func (k DeployToken) String() string { - return Stringify(k) -} - -// ListAllDeployTokens gets a list of all deploy tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-all-deploy-tokens -func (s *DeployTokensService) ListAllDeployTokens(options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "deploy_tokens", nil, options) - if err != nil { - return nil, nil, err - } - - var ts []*DeployToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil -} - -// ListProjectDeployTokensOptions represents the available ListProjectDeployTokens() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-project-deploy-tokens -type ListProjectDeployTokensOptions ListOptions - -// ListProjectDeployTokens gets a list of a project's deploy tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-project-deploy-tokens -func (s *DeployTokensService) ListProjectDeployTokens(pid interface{}, opt *ListProjectDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ts []*DeployToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil -} - -// GetProjectDeployToken gets a single deploy token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#get-a-project-deploy-token -func (s *DeployTokensService) GetProjectDeployToken(pid interface{}, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens/%d", PathEscape(project), deployToken) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreateProjectDeployTokenOptions represents the available CreateProjectDeployToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-project-deploy-token -type CreateProjectDeployTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` -} - -// CreateProjectDeployToken creates a new deploy token for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-project-deploy-token -func (s *DeployTokensService) CreateProjectDeployToken(pid interface{}, opt *CreateProjectDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// DeleteProjectDeployToken removes a deploy token from the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#delete-a-project-deploy-token -func (s *DeployTokensService) DeleteProjectDeployToken(pid interface{}, deployToken int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deploy_tokens/%d", PathEscape(project), deployToken) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListGroupDeployTokensOptions represents the available ListGroupDeployTokens() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-group-deploy-tokens -type ListGroupDeployTokensOptions ListOptions - -// ListGroupDeployTokens gets a list of a group’s deploy tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#list-group-deploy-tokens -func (s *DeployTokensService) ListGroupDeployTokens(gid interface{}, opt *ListGroupDeployTokensOptions, options ...RequestOptionFunc) ([]*DeployToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ts []*DeployToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil -} - -// GetGroupDeployToken gets a single deploy token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#get-a-group-deploy-token -func (s *DeployTokensService) GetGroupDeployToken(gid interface{}, deployToken int, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens/%d", PathEscape(group), deployToken) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreateGroupDeployTokenOptions represents the available CreateGroupDeployToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-group-deploy-token -type CreateGroupDeployTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` -} - -// CreateGroupDeployToken creates a new deploy token for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#create-a-group-deploy-token -func (s *DeployTokensService) CreateGroupDeployToken(gid interface{}, opt *CreateGroupDeployTokenOptions, options ...RequestOptionFunc) (*DeployToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(DeployToken) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// DeleteGroupDeployToken removes a deploy token from the group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deploy_tokens.html#delete-a-group-deploy-token -func (s *DeployTokensService) DeleteGroupDeployToken(gid interface{}, deployToken int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/deploy_tokens/%d", PathEscape(group), deployToken) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/deployments.go b/vendor/github.com/xanzy/go-gitlab/deployments.go deleted file mode 100644 index 05301acfc8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/deployments.go +++ /dev/null @@ -1,260 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// DeploymentsService handles communication with the deployment related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/deployments.html -type DeploymentsService struct { - client *Client -} - -// Deployment represents the Gitlab deployment -type Deployment struct { - ID int `json:"id"` - IID int `json:"iid"` - Ref string `json:"ref"` - SHA string `json:"sha"` - Status string `json:"status"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - User *ProjectUser `json:"user"` - Environment *Environment `json:"environment"` - Deployable struct { - ID int `json:"id"` - Status string `json:"status"` - Stage string `json:"stage"` - Name string `json:"name"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - Coverage float64 `json:"coverage"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - Duration float64 `json:"duration"` - User *User `json:"user"` - Commit *Commit `json:"commit"` - Pipeline struct { - ID int `json:"id"` - SHA string `json:"sha"` - Ref string `json:"ref"` - Status string `json:"status"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - } `json:"pipeline"` - Runner *Runner `json:"runner"` - } `json:"deployable"` -} - -// ListProjectDeploymentsOptions represents the available ListProjectDeployments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#list-project-deployments -type ListProjectDeploymentsOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Environment *string `url:"environment,omitempty" json:"environment,omitempty"` - Status *string `url:"status,omitempty" json:"status,omitempty"` - - // Only for Gitlab versions less than 14 - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - - // Only for Gitlab 14 or higher - FinishedAfter *time.Time `url:"finished_after,omitempty" json:"finished_after,omitempty"` - FinishedBefore *time.Time `url:"finished_before,omitempty" json:"finished_before,omitempty"` -} - -// ListProjectDeployments gets a list of deployments in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#list-project-deployments -func (s *DeploymentsService) ListProjectDeployments(pid interface{}, opts *ListProjectDeploymentsOptions, options ...RequestOptionFunc) ([]*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var ds []*Deployment - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil -} - -// GetProjectDeployment get a deployment for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#get-a-specific-deployment -func (s *DeploymentsService) GetProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateProjectDeploymentOptions represents the available -// CreateProjectDeployment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment -type CreateProjectDeploymentOptions struct { - Environment *string `url:"environment,omitempty" json:"environment,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` - Tag *bool `url:"tag,omitempty" json:"tag,omitempty"` - Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` -} - -// CreateProjectDeployment creates a project deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#create-a-deployment -func (s *DeploymentsService) CreateProjectDeployment(pid interface{}, opt *CreateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, &d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// UpdateProjectDeploymentOptions represents the available -// UpdateProjectDeployment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment -type UpdateProjectDeploymentOptions struct { - Status *DeploymentStatusValue `url:"status,omitempty" json:"status,omitempty"` -} - -// UpdateProjectDeployment updates a project deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#update-a-deployment -func (s *DeploymentsService) UpdateProjectDeployment(pid interface{}, deployment int, opt *UpdateProjectDeploymentOptions, options ...RequestOptionFunc) (*Deployment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Deployment) - resp, err := s.client.Do(req, &d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// ApproveOrRejectProjectDeploymentOptions represents the available -// ApproveOrRejectProjectDeployment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#approve-or-reject-a-blocked-deployment -type ApproveOrRejectProjectDeploymentOptions struct { - Status *DeploymentApprovalStatus `url:"status,omitempty" json:"status,omitempty"` - Comment *string `url:"comment,omitempty" json:"comment,omitempty"` - RepresentedAs *string `url:"represented_as,omitempty" json:"represented_as,omitempty"` -} - -// ApproveOrRejectProjectDeployment approve or reject a blocked deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#approve-or-reject-a-blocked-deployment -func (s *DeploymentsService) ApproveOrRejectProjectDeployment(pid interface{}, deployment int, - opt *ApproveOrRejectProjectDeploymentOptions, options ...RequestOptionFunc, -) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d/approval", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteProjectDeployment delete a project deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#delete-a-specific-deployment -func (s *DeploymentsService) DeleteProjectDeployment(pid interface{}, deployment int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go b/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go deleted file mode 100644 index 8417f9177f..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022, Daniela Filipe Bento -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package gitlab - -import ( - "fmt" - "net/http" -) - -// DeploymentMergeRequestsService handles communication with the deployment's -// merge requests related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#list-of-merge-requests-associated-with-a-deployment -type DeploymentMergeRequestsService struct { - client *Client -} - -// ListDeploymentMergeRequests get the merge requests associated with deployment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/deployments.html#list-of-merge-requests-associated-with-a-deployment -func (s *DeploymentMergeRequestsService) ListDeploymentMergeRequests(pid interface{}, deployment int, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/deployments/%d/merge_requests", PathEscape(project), deployment) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MergeRequest - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/discussions.go b/vendor/github.com/xanzy/go-gitlab/discussions.go deleted file mode 100644 index 7f791c585f..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/discussions.go +++ /dev/null @@ -1,1143 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// DiscussionsService handles communication with the discussions related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/discussions.html -type DiscussionsService struct { - client *Client -} - -// Discussion represents a GitLab discussion. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/discussions.html -type Discussion struct { - ID string `json:"id"` - IndividualNote bool `json:"individual_note"` - Notes []*Note `json:"notes"` -} - -func (d Discussion) String() string { - return Stringify(d) -} - -// ListIssueDiscussionsOptions represents the available ListIssueDiscussions() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-issue-discussion-items -type ListIssueDiscussionsOptions ListOptions - -// ListIssueDiscussions gets a list of all discussions for a single -// issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-issue-discussion-items -func (s *DiscussionsService) ListIssueDiscussions(pid interface{}, issue int, opt *ListIssueDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil -} - -// GetIssueDiscussion returns a single discussion for a specific project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#get-single-issue-discussion-item -func (s *DiscussionsService) GetIssueDiscussion(pid interface{}, issue int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s", - PathEscape(project), - issue, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateIssueDiscussionOptions represents the available CreateIssueDiscussion() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-issue-thread -type CreateIssueDiscussionOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// CreateIssueDiscussion creates a new discussion to a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-issue-thread -func (s *DiscussionsService) CreateIssueDiscussion(pid interface{}, issue int, opt *CreateIssueDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// AddIssueDiscussionNoteOptions represents the available AddIssueDiscussionNote() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-issue-thread -type AddIssueDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// AddIssueDiscussionNote creates a new discussion to a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-issue-thread -func (s *DiscussionsService) AddIssueDiscussionNote(pid interface{}, issue int, discussion string, opt *AddIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes", - PathEscape(project), - issue, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateIssueDiscussionNoteOptions represents the available -// UpdateIssueDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-issue-thread-note -type UpdateIssueDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// UpdateIssueDiscussionNote modifies existing discussion of an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-issue-thread-note -func (s *DiscussionsService) UpdateIssueDiscussionNote(pid interface{}, issue int, discussion string, note int, opt *UpdateIssueDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes/%d", - PathEscape(project), - issue, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteIssueDiscussionNote deletes an existing discussion of an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#delete-an-issue-thread-note -func (s *DiscussionsService) DeleteIssueDiscussionNote(pid interface{}, issue int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/discussions/%s/notes/%d", - PathEscape(project), - issue, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListSnippetDiscussionsOptions represents the available ListSnippetDiscussions() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-snippet-discussion-items -type ListSnippetDiscussionsOptions ListOptions - -// ListSnippetDiscussions gets a list of all discussions for a single -// snippet. Snippet discussions are comments users can post to a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-snippet-discussion-items -func (s *DiscussionsService) ListSnippetDiscussions(pid interface{}, snippet int, opt *ListSnippetDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil -} - -// GetSnippetDiscussion returns a single discussion for a given snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#get-single-snippet-discussion-item -func (s *DiscussionsService) GetSnippetDiscussion(pid interface{}, snippet int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s", - PathEscape(project), - snippet, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateSnippetDiscussionOptions represents the available -// CreateSnippetDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-snippet-thread -type CreateSnippetDiscussionOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// CreateSnippetDiscussion creates a new discussion for a single snippet. -// Snippet discussions are comments users can post to a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-snippet-thread -func (s *DiscussionsService) CreateSnippetDiscussion(pid interface{}, snippet int, opt *CreateSnippetDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// AddSnippetDiscussionNoteOptions represents the available -// AddSnippetDiscussionNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-snippet-thread -type AddSnippetDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// AddSnippetDiscussionNote creates a new discussion to a single project -// snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-snippet-thread -func (s *DiscussionsService) AddSnippetDiscussionNote(pid interface{}, snippet int, discussion string, opt *AddSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes", - PathEscape(project), - snippet, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateSnippetDiscussionNoteOptions represents the available -// UpdateSnippetDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-snippet-thread-note -type UpdateSnippetDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// UpdateSnippetDiscussionNote modifies existing discussion of a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-snippet-thread-note -func (s *DiscussionsService) UpdateSnippetDiscussionNote(pid interface{}, snippet int, discussion string, note int, opt *UpdateSnippetDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes/%d", - PathEscape(project), - snippet, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteSnippetDiscussionNote deletes an existing discussion of a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#delete-a-snippet-thread-note -func (s *DiscussionsService) DeleteSnippetDiscussionNote(pid interface{}, snippet int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/discussions/%s/notes/%d", - PathEscape(project), - snippet, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListGroupEpicDiscussionsOptions represents the available -// ListEpicDiscussions() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-group-epic-discussion-items -type ListGroupEpicDiscussionsOptions ListOptions - -// ListGroupEpicDiscussions gets a list of all discussions for a single -// epic. Epic discussions are comments users can post to a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-group-epic-discussion-items -func (s *DiscussionsService) ListGroupEpicDiscussions(gid interface{}, epic int, opt *ListGroupEpicDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions", - PathEscape(group), - epic, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil -} - -// GetEpicDiscussion returns a single discussion for a given epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#get-single-epic-discussion-item -func (s *DiscussionsService) GetEpicDiscussion(gid interface{}, epic int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s", - PathEscape(group), - epic, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateEpicDiscussionOptions represents the available CreateEpicDiscussion() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-epic-thread -type CreateEpicDiscussionOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// CreateEpicDiscussion creates a new discussion for a single epic. Epic -// discussions are comments users can post to a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-epic-thread -func (s *DiscussionsService) CreateEpicDiscussion(gid interface{}, epic int, opt *CreateEpicDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions", - PathEscape(group), - epic, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// AddEpicDiscussionNoteOptions represents the available -// AddEpicDiscussionNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-epic-thread -type AddEpicDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// AddEpicDiscussionNote creates a new discussion to a single project epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-epic-thread -func (s *DiscussionsService) AddEpicDiscussionNote(gid interface{}, epic int, discussion string, opt *AddEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes", - PathEscape(group), - epic, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateEpicDiscussionNoteOptions represents the available UpdateEpicDiscussion() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-epic-thread-note -type UpdateEpicDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// UpdateEpicDiscussionNote modifies existing discussion of a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-existing-epic-thread-note -func (s *DiscussionsService) UpdateEpicDiscussionNote(gid interface{}, epic int, discussion string, note int, opt *UpdateEpicDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes/%d", - PathEscape(group), - epic, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteEpicDiscussionNote deletes an existing discussion of a epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#delete-an-epic-thread-note -func (s *DiscussionsService) DeleteEpicDiscussionNote(gid interface{}, epic int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/discussions/%s/notes/%d", - PathEscape(group), - epic, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListMergeRequestDiscussionsOptions represents the available -// ListMergeRequestDiscussions() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-merge-request-discussion-items -type ListMergeRequestDiscussionsOptions ListOptions - -// ListMergeRequestDiscussions gets a list of all discussions for a single -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-merge-request-discussion-items -func (s *DiscussionsService) ListMergeRequestDiscussions(pid interface{}, mergeRequest int, opt *ListMergeRequestDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions", - PathEscape(project), - mergeRequest, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil -} - -// GetMergeRequestDiscussion returns a single discussion for a given merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#get-single-merge-request-discussion-item -func (s *DiscussionsService) GetMergeRequestDiscussion(pid interface{}, mergeRequest int, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s", - PathEscape(project), - mergeRequest, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateMergeRequestDiscussionOptions represents the available -// CreateMergeRequestDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-merge-request-thread -type CreateMergeRequestDiscussionOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CommitID *string `url:"commit_id,omitempty" json:"commit_id,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` - Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` -} - -// PositionOptions represents the position option of a discussion. -type PositionOptions struct { - BaseSHA *string `url:"base_sha,omitempty" json:"base_sha,omitempty"` - HeadSHA *string `url:"head_sha,omitempty" json:"head_sha,omitempty"` - StartSHA *string `url:"start_sha,omitempty" json:"start_sha,omitempty"` - NewPath *string `url:"new_path,omitempty" json:"new_path,omitempty"` - OldPath *string `url:"old_path,omitempty" json:"old_path,omitempty"` - PositionType *string `url:"position_type,omitempty" json:"position_type"` - NewLine *int `url:"new_line,omitempty" json:"new_line,omitempty"` - OldLine *int `url:"old_line,omitempty" json:"old_line,omitempty"` - LineRange *LineRangeOptions `url:"line_range,omitempty" json:"line_range,omitempty"` - Width *int `url:"width,omitempty" json:"width,omitempty"` - Height *int `url:"height,omitempty" json:"height,omitempty"` - X *float64 `url:"x,omitempty" json:"x,omitempty"` - Y *float64 `url:"y,omitempty" json:"y,omitempty"` -} - -// LineRangeOptions represents the line range option of a discussion. -type LineRangeOptions struct { - Start *LinePositionOptions `url:"start,omitempty" json:"start,omitempty"` - End *LinePositionOptions `url:"end,omitempty" json:"end,omitempty"` -} - -// LinePositionOptions represents the line position option of a discussion. -type LinePositionOptions struct { - LineCode *string `url:"line_code,omitempty" json:"line_code,omitempty"` - Type *string `url:"type,omitempty" json:"type,omitempty"` -} - -// CreateMergeRequestDiscussion creates a new discussion for a single merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-merge-request-thread -func (s *DiscussionsService) CreateMergeRequestDiscussion(pid interface{}, mergeRequest int, opt *CreateMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions", - PathEscape(project), - mergeRequest, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// ResolveMergeRequestDiscussionOptions represents the available -// ResolveMergeRequestDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#resolve-a-merge-request-thread -type ResolveMergeRequestDiscussionOptions struct { - Resolved *bool `url:"resolved,omitempty" json:"resolved,omitempty"` -} - -// ResolveMergeRequestDiscussion resolves/unresolves whole discussion of a merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#resolve-a-merge-request-thread -func (s *DiscussionsService) ResolveMergeRequestDiscussion(pid interface{}, mergeRequest int, discussion string, opt *ResolveMergeRequestDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s", - PathEscape(project), - mergeRequest, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// AddMergeRequestDiscussionNoteOptions represents the available -// AddMergeRequestDiscussionNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-merge-request-thread -type AddMergeRequestDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// AddMergeRequestDiscussionNote creates a new discussion to a single project -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-merge-request-thread -func (s *DiscussionsService) AddMergeRequestDiscussionNote(pid interface{}, mergeRequest int, discussion string, opt *AddMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes", - PathEscape(project), - mergeRequest, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateMergeRequestDiscussionNoteOptions represents the available -// UpdateMergeRequestDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-merge-request-thread-note -type UpdateMergeRequestDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` - Resolved *bool `url:"resolved,omitempty" json:"resolved,omitempty"` -} - -// UpdateMergeRequestDiscussionNote modifies existing discussion of a merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-merge-request-thread-note -func (s *DiscussionsService) UpdateMergeRequestDiscussionNote(pid interface{}, mergeRequest int, discussion string, note int, opt *UpdateMergeRequestDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes/%d", - PathEscape(project), - mergeRequest, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteMergeRequestDiscussionNote deletes an existing discussion of a merge -// request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#delete-a-merge-request-thread-note -func (s *DiscussionsService) DeleteMergeRequestDiscussionNote(pid interface{}, mergeRequest int, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/discussions/%s/notes/%d", - PathEscape(project), - mergeRequest, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListCommitDiscussionsOptions represents the available -// ListCommitDiscussions() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-commit-discussion-items -type ListCommitDiscussionsOptions ListOptions - -// ListCommitDiscussions gets a list of all discussions for a single -// commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#list-project-commit-discussion-items -func (s *DiscussionsService) ListCommitDiscussions(pid interface{}, commit string, opt *ListCommitDiscussionsOptions, options ...RequestOptionFunc) ([]*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions", - PathEscape(project), - commit, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ds []*Discussion - resp, err := s.client.Do(req, &ds) - if err != nil { - return nil, resp, err - } - - return ds, resp, nil -} - -// GetCommitDiscussion returns a single discussion for a specific project -// commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#get-single-commit-discussion-item -func (s *DiscussionsService) GetCommitDiscussion(pid interface{}, commit string, discussion string, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s", - PathEscape(project), - commit, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// CreateCommitDiscussionOptions represents the available -// CreateCommitDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-commit-thread -type CreateCommitDiscussionOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` - Position *NotePosition `url:"position,omitempty" json:"position,omitempty"` -} - -// CreateCommitDiscussion creates a new discussion to a single project commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#create-new-commit-thread -func (s *DiscussionsService) CreateCommitDiscussion(pid interface{}, commit string, opt *CreateCommitDiscussionOptions, options ...RequestOptionFunc) (*Discussion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions", - PathEscape(project), - commit, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - d := new(Discussion) - resp, err := s.client.Do(req, d) - if err != nil { - return nil, resp, err - } - - return d, resp, nil -} - -// AddCommitDiscussionNoteOptions represents the available -// AddCommitDiscussionNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-commit-thread -type AddCommitDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// AddCommitDiscussionNote creates a new discussion to a single project commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#add-note-to-existing-commit-thread -func (s *DiscussionsService) AddCommitDiscussionNote(pid interface{}, commit string, discussion string, opt *AddCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes", - PathEscape(project), - commit, - discussion, - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateCommitDiscussionNoteOptions represents the available -// UpdateCommitDiscussion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-commit-thread-note -type UpdateCommitDiscussionNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// UpdateCommitDiscussionNote modifies existing discussion of an commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#modify-an-existing-commit-thread-note -func (s *DiscussionsService) UpdateCommitDiscussionNote(pid interface{}, commit string, discussion string, note int, opt *UpdateCommitDiscussionNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes/%d", - PathEscape(project), - commit, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteCommitDiscussionNote deletes an existing discussion of an commit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/discussions.html#delete-a-commit-thread-note -func (s *DiscussionsService) DeleteCommitDiscussionNote(pid interface{}, commit string, discussion string, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/commits/%s/discussions/%s/notes/%d", - PathEscape(project), - commit, - discussion, - note, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go b/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go deleted file mode 100644 index 3608c5a4de..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright 2022, FantasyTeddy -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// DockerfileTemplatesService handles communication with the Dockerfile -// templates related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/dockerfiles.html -type DockerfileTemplatesService struct { - client *Client -} - -// DockerfileTemplate represents a GitLab Dockerfile template. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/dockerfiles.html -type DockerfileTemplate struct { - Name string `json:"name"` - Content string `json:"content"` -} - -// DockerfileTemplateListItem represents a GitLab Dockerfile template from the list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/dockerfiles.html -type DockerfileTemplateListItem struct { - Key string `json:"key"` - Name string `json:"name"` -} - -// ListDockerfileTemplatesOptions represents the available ListAllTemplates() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/dockerfiles.html#list-dockerfile-templates -type ListDockerfileTemplatesOptions ListOptions - -// ListTemplates get a list of available Dockerfile templates. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/dockerfiles.html#list-dockerfile-templates -func (s *DockerfileTemplatesService) ListTemplates(opt *ListDockerfileTemplatesOptions, options ...RequestOptionFunc) ([]*DockerfileTemplateListItem, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/dockerfiles", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*DockerfileTemplateListItem - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// GetTemplate get a single Dockerfile template. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/dockerfiles.html#single-dockerfile-template -func (s *DockerfileTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*DockerfileTemplate, *Response, error) { - u := fmt.Sprintf("templates/dockerfiles/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(DockerfileTemplate) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/dora_metrics.go b/vendor/github.com/xanzy/go-gitlab/dora_metrics.go deleted file mode 100644 index a2ad418eb1..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/dora_metrics.go +++ /dev/null @@ -1,110 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// DORAMetricsService handles communication with the DORA metrics related methods -// of the GitLab API. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -type DORAMetricsService struct { - client *Client -} - -// DORAMetric represents a single DORA metric data point. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -type DORAMetric struct { - Date string `json:"date"` - Value float64 `json:"value"` -} - -// Gets a string representation of a DORAMetric data point -// -// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -func (m DORAMetric) String() string { - return Stringify(m) -} - -// GetDORAMetricsOptions represent the request body options for getting -// DORA metrics -// -// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -type GetDORAMetricsOptions struct { - Metric *DORAMetricType `url:"metric,omitempty" json:"metric,omitempty"` - EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` - EnvironmentTiers *[]string `url:"environment_tiers,comma,omitempty" json:"environment_tiers,omitempty"` - Interval *DORAMetricInterval `url:"interval,omitempty" json:"interval,omitempty"` - StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` - - // Deprecated, use environment tiers instead - EnvironmentTier *string `url:"environment_tier,omitempty" json:"environment_tier,omitempty"` -} - -// GetProjectDORAMetrics gets the DORA metrics for a project. -// -// GitLab API Docs: -// https://docs.gitlab.com/ee/api/dora/metrics.html#get-project-level-dora-metrics -func (s *DORAMetricsService) GetProjectDORAMetrics(pid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/dora/metrics", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var metrics []DORAMetric - resp, err := s.client.Do(req, &metrics) - if err != nil { - return nil, resp, err - } - - return metrics, resp, err -} - -// GetGroupDORAMetrics gets the DORA metrics for a group. -// -// GitLab API Docs: -// https://docs.gitlab.com/ee/api/dora/metrics.html#get-group-level-dora-metrics -func (s *DORAMetricsService) GetGroupDORAMetrics(gid interface{}, opt GetDORAMetricsOptions, options ...RequestOptionFunc) ([]DORAMetric, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/dora/metrics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var metrics []DORAMetric - resp, err := s.client.Do(req, &metrics) - if err != nil { - return nil, resp, err - } - - return metrics, resp, err -} diff --git a/vendor/github.com/xanzy/go-gitlab/draft_notes.go b/vendor/github.com/xanzy/go-gitlab/draft_notes.go deleted file mode 100644 index 376e4d0c86..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/draft_notes.go +++ /dev/null @@ -1,233 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -type DraftNote struct { - ID int `json:"id"` - AuthorID int `json:"author_id"` - MergeRequestID int `json:"merge_request_id"` - ResolveDiscussion bool `json:"resolve_discussion"` - DiscussionID string `json:"discussion_id"` - Note string `json:"note"` - CommitID string `json:"commit_id"` - LineCode string `json:"line_code"` - Position *NotePosition `json:"position"` -} - -// DraftNotesService handles communication with the draft notes related methods -// of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes -type DraftNotesService struct { - client *Client -} - -// ListDraftNotesOptions represents the available ListDraftNotes() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes -type ListDraftNotesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListDraftNotes gets a list of all draft notes for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#list-all-merge-request-draft-notes -func (s *DraftNotesService) ListDraftNotes(pid interface{}, mergeRequest int, opt *ListDraftNotesOptions, options ...RequestOptionFunc) ([]*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*DraftNote - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// GetDraftNote gets a single draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#get-a-single-draft-note -func (s *DraftNotesService) GetDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(DraftNote) - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// CreateDraftNoteOptions represents the available CreateDraftNote() -// options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note -type CreateDraftNoteOptions struct { - Note *string `url:"note" json:"note"` - CommitID *string `url:"commit_id,omitempty" json:"commit_id,omitempty"` - InReplyToDiscussionID *string `url:"in_reply_to_discussion_id,omitempty" json:"in_reply_to_discussion_id,omitempty"` - ResolveDiscussion *bool `url:"resolve_discussion,omitempty" json:"resolve_discussion,omitempty"` - Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` -} - -// CreateDraftNote creates a draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note -func (s *DraftNotesService) CreateDraftNote(pid interface{}, mergeRequest int, opt *CreateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(DraftNote) - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateDraftNoteOptions represents the available UpdateDraftNote() -// options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note -type UpdateDraftNoteOptions struct { - Note *string `url:"note,omitempty" json:"note,omitempty"` - Position *PositionOptions `url:"position,omitempty" json:"position,omitempty"` -} - -// UpdateDraftNote updates a draft note for a merge request. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/draft_notes.html#create-a-draft-note -func (s *DraftNotesService) UpdateDraftNote(pid interface{}, mergeRequest int, note int, opt *UpdateDraftNoteOptions, options ...RequestOptionFunc) (*DraftNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(DraftNote) - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteDraftNote deletes a single draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#delete-a-draft-note -func (s *DraftNotesService) DeleteDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// PublishDraftNote publishes a single draft note for a merge request. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#publish-a-draft-note -func (s *DraftNotesService) PublishDraftNote(pid interface{}, mergeRequest int, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/%d/publish", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// PublishAllDraftNotes publishes all draft notes for a merge request that belong to the user. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/draft_notes.html#publish-a-draft-note -func (s *DraftNotesService) PublishAllDraftNotes(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/draft_notes/bulk_publish", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/environments.go b/vendor/github.com/xanzy/go-gitlab/environments.go deleted file mode 100644 index b6d902f86f..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/environments.go +++ /dev/null @@ -1,238 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// EnvironmentsService handles communication with the environment related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/environments.html -type EnvironmentsService struct { - client *Client -} - -// Environment represents a GitLab environment. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/environments.html -type Environment struct { - ID int `json:"id"` - Name string `json:"name"` - Slug string `json:"slug"` - State string `json:"state"` - Tier string `json:"tier"` - ExternalURL string `json:"external_url"` - Project *Project `json:"project"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - LastDeployment *Deployment `json:"last_deployment"` -} - -func (env Environment) String() string { - return Stringify(env) -} - -// ListEnvironmentsOptions represents the available ListEnvironments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#list-environments -type ListEnvironmentsOptions struct { - ListOptions - Name *string `url:"name,omitempty" json:"name,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - States *string `url:"states,omitempty" json:"states,omitempty"` -} - -// ListEnvironments gets a list of environments from a project, sorted by name -// alphabetically. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#list-environments -func (s *EnvironmentsService) ListEnvironments(pid interface{}, opts *ListEnvironmentsOptions, options ...RequestOptionFunc) ([]*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var envs []*Environment - resp, err := s.client.Do(req, &envs) - if err != nil { - return nil, resp, err - } - - return envs, resp, nil -} - -// GetEnvironment gets a specific environment from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#get-a-specific-environment -func (s *EnvironmentsService) GetEnvironment(pid interface{}, environment int, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil -} - -// CreateEnvironmentOptions represents the available CreateEnvironment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#create-a-new-environment -type CreateEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - Tier *string `url:"tier,omitempty" json:"tier,omitempty"` -} - -// CreateEnvironment adds an environment to a project. This is an idempotent -// method and can be called multiple times with the same parameters. Createing -// an environment that is already a environment does not affect the -// existing environmentship. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#create-a-new-environment -func (s *EnvironmentsService) CreateEnvironment(pid interface{}, opt *CreateEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil -} - -// EditEnvironmentOptions represents the available EditEnvironment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#update-an-existing-environment -type EditEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - Tier *string `url:"tier,omitempty" json:"tier,omitempty"` -} - -// EditEnvironment updates a project team environment to a specified access level.. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#update-an-existing-environment -func (s *EnvironmentsService) EditEnvironment(pid interface{}, environment int, opt *EditEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil -} - -// DeleteEnvironment removes an environment from a project team. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#delete-an-environment -func (s *EnvironmentsService) DeleteEnvironment(pid interface{}, environment int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d", PathEscape(project), environment) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// StopEnvironmentOptions represents the available StopEnvironment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#stop-an-environment -type StopEnvironmentOptions struct { - Force *bool `url:"force,omitempty" json:"force,omitempty"` -} - -// StopEnvironment stops an environment within a specific project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/environments.html#stop-an-environment -func (s *EnvironmentsService) StopEnvironment(pid interface{}, environmentID int, opt *StopEnvironmentOptions, options ...RequestOptionFunc) (*Environment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/environments/%d/stop", PathEscape(project), environmentID) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - env := new(Environment) - resp, err := s.client.Do(req, env) - if err != nil { - return nil, resp, err - } - - return env, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/epic_issues.go b/vendor/github.com/xanzy/go-gitlab/epic_issues.go deleted file mode 100644 index 545357bd78..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/epic_issues.go +++ /dev/null @@ -1,152 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// EpicIssuesService handles communication with the epic issue related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epic_issues.html -type EpicIssuesService struct { - client *Client -} - -// EpicIssueAssignment contains both the epic and issue objects returned from -// Gitlab with the assignment ID. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epic_issues.html -type EpicIssueAssignment struct { - ID int `json:"id"` - Epic *Epic `json:"epic"` - Issue *Issue `json:"issue"` -} - -// ListEpicIssues get a list of epic issues. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/epic_issues.html#list-issues-for-an-epic -func (s *EpicIssuesService) ListEpicIssues(gid interface{}, epic int, opt *ListOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var is []*Issue - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// AssignEpicIssue assigns an existing issue to an epic. -// -// Gitlab API Docs: -// https://docs.gitlab.com/ee/api/epic_issues.html#assign-an-issue-to-the-epic -func (s *EpicIssuesService) AssignEpicIssue(gid interface{}, epic, issue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(EpicIssueAssignment) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// RemoveEpicIssue removes an issue from an epic. -// -// Gitlab API Docs: -// https://docs.gitlab.com/ee/api/epic_issues.html#remove-an-issue-from-the-epic -func (s *EpicIssuesService) RemoveEpicIssue(gid interface{}, epic, epicIssue int, options ...RequestOptionFunc) (*EpicIssueAssignment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, epicIssue) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(EpicIssueAssignment) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// UpdateEpicIsssueAssignmentOptions describes the UpdateEpicIssueAssignment() -// options. -// -// Gitlab API Docs: -// https://docs.gitlab.com/ee/api/epic_issues.html#update-epic---issue-association -type UpdateEpicIsssueAssignmentOptions struct { - *ListOptions - MoveBeforeID *int `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` - MoveAfterID *int `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` -} - -// UpdateEpicIssueAssignment moves an issue before or after another issue in an -// epic issue list. -// -// Gitlab API Docs: -// https://docs.gitlab.com/ee/api/epic_issues.html#update-epic---issue-association -func (s *EpicIssuesService) UpdateEpicIssueAssignment(gid interface{}, epic, epicIssue int, opt *UpdateEpicIsssueAssignmentOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/issues/%d", PathEscape(group), epic, epicIssue) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - var is []*Issue - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/epics.go b/vendor/github.com/xanzy/go-gitlab/epics.go deleted file mode 100644 index 684ffb3343..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/epics.go +++ /dev/null @@ -1,275 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// EpicsService handles communication with the epic related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html -type EpicsService struct { - client *Client -} - -// EpicAuthor represents a author of the epic. -type EpicAuthor struct { - ID int `json:"id"` - State string `json:"state"` - WebURL string `json:"web_url"` - Name string `json:"name"` - AvatarURL string `json:"avatar_url"` - Username string `json:"username"` -} - -// Epic represents a GitLab epic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html -type Epic struct { - ID int `json:"id"` - IID int `json:"iid"` - GroupID int `json:"group_id"` - ParentID int `json:"parent_id"` - Title string `json:"title"` - Description string `json:"description"` - State string `json:"state"` - Confidential bool `json:"confidential"` - WebURL string `json:"web_url"` - Author *EpicAuthor `json:"author"` - StartDate *ISOTime `json:"start_date"` - StartDateIsFixed bool `json:"start_date_is_fixed"` - StartDateFixed *ISOTime `json:"start_date_fixed"` - StartDateFromMilestones *ISOTime `json:"start_date_from_milestones"` - DueDate *ISOTime `json:"due_date"` - DueDateIsFixed bool `json:"due_date_is_fixed"` - DueDateFixed *ISOTime `json:"due_date_fixed"` - DueDateFromMilestones *ISOTime `json:"due_date_from_milestones"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - ClosedAt *time.Time `json:"closed_at"` - Labels []string `json:"labels"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` - UserNotesCount int `json:"user_notes_count"` - URL string `json:"url"` -} - -func (e Epic) String() string { - return Stringify(e) -} - -// ListGroupEpicsOptions represents the available ListGroupEpics() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#list-epics-for-a-group -type ListGroupEpicsOptions struct { - ListOptions - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` - IncludeDescendantGroups *bool `url:"include_descendant_groups,omitempty" json:"include_descendant_groups,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` -} - -// ListGroupEpics gets a list of group epics. This function accepts pagination -// parameters page and per_page to return the list of group epics. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#list-epics-for-a-group -func (s *EpicsService) ListGroupEpics(gid interface{}, opt *ListGroupEpicsOptions, options ...RequestOptionFunc) ([]*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var es []*Epic - resp, err := s.client.Do(req, &es) - if err != nil { - return nil, resp, err - } - - return es, resp, nil -} - -// GetEpic gets a single group epic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#single-epic -func (s *EpicsService) GetEpic(gid interface{}, epic int, options ...RequestOptionFunc) (*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - e := new(Epic) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// GetEpicLinks gets all child epics of an epic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epic_links.html -func (s *EpicsService) GetEpicLinks(gid interface{}, epic int, options ...RequestOptionFunc) ([]*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/epics", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var e []*Epic - resp, err := s.client.Do(req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// CreateEpicOptions represents the available CreateEpic() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#new-epic -type CreateEpicOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Color *string `url:"color,omitempty" json:"color,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` - StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` - StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` - DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` - DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` -} - -// CreateEpic creates a new group epic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#new-epic -func (s *EpicsService) CreateEpic(gid interface{}, opt *CreateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Epic) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// UpdateEpicOptions represents the available UpdateEpic() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic -type UpdateEpicOptions struct { - AddLabels *LabelOptions `url:"add_labels,omitempty" json:"add_labels,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - DueDateFixed *ISOTime `url:"due_date_fixed,omitempty" json:"due_date_fixed,omitempty"` - DueDateIsFixed *bool `url:"due_date_is_fixed,omitempty" json:"due_date_is_fixed,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` - RemoveLabels *LabelOptions `url:"remove_labels,omitempty" json:"remove_labels,omitempty"` - StartDateFixed *ISOTime `url:"start_date_fixed,omitempty" json:"start_date_fixed,omitempty"` - StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` - StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` - Color *string `url:"color,omitempty" json:"color,omitempty"` -} - -// UpdateEpic updates an existing group epic. This function is also used -// to mark an epic as closed. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic -func (s *EpicsService) UpdateEpic(gid interface{}, epic int, opt *UpdateEpicOptions, options ...RequestOptionFunc) (*Epic, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Epic) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// DeleteEpic deletes a single group epic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#delete-epic -func (s *EpicsService) DeleteEpic(gid interface{}, epic int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/error_tracking.go b/vendor/github.com/xanzy/go-gitlab/error_tracking.go deleted file mode 100644 index fed334a0f8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/error_tracking.go +++ /dev/null @@ -1,196 +0,0 @@ -// -// Copyright 2022, Ryan Glab -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ErrorTrackingService handles communication with the error tracking -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/error_tracking.html -type ErrorTrackingService struct { - client *Client -} - -// ErrorTrackingClientKey represents an error tracking client key. -// -// GitLab docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#error-tracking-client-keys -type ErrorTrackingClientKey struct { - ID int `json:"id"` - Active bool `json:"active"` - PublicKey string `json:"public_key"` - SentryDsn string `json:"sentry_dsn"` -} - -func (p ErrorTrackingClientKey) String() string { - return Stringify(p) -} - -// ErrorTrackingSettings represents error tracking settings for a GitLab project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/error_tracking.html#error-tracking-project-settings -type ErrorTrackingSettings struct { - Active bool `json:"active"` - ProjectName string `json:"project_name"` - SentryExternalURL string `json:"sentry_external_url"` - APIURL string `json:"api_url"` - Integrated bool `json:"integrated"` -} - -func (p ErrorTrackingSettings) String() string { - return Stringify(p) -} - -// GetErrorTrackingSettings gets error tracking settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#get-error-tracking-settings -func (s *ErrorTrackingService) GetErrorTrackingSettings(pid interface{}, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ets := new(ErrorTrackingSettings) - resp, err := s.client.Do(req, ets) - if err != nil { - return nil, resp, err - } - - return ets, resp, nil -} - -// EnableDisableErrorTrackingOptions represents the available -// EnableDisableErrorTracking() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#enable-or-disable-the-error-tracking-project-settings -type EnableDisableErrorTrackingOptions struct { - Active *bool `url:"active,omitempty" json:"active,omitempty"` - Integrated *bool `url:"integrated,omitempty" json:"integrated,omitempty"` -} - -// EnableDisableErrorTracking allows you to enable or disable the error tracking -// settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#enable-or-disable-the-error-tracking-project-settings -func (s *ErrorTrackingService) EnableDisableErrorTracking(pid interface{}, opt *EnableDisableErrorTrackingOptions, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - ets := new(ErrorTrackingSettings) - resp, err := s.client.Do(req, &ets) - if err != nil { - return nil, resp, err - } - - return ets, resp, nil -} - -// ListClientKeysOptions represents the available ListClientKeys() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#list-project-client-keys -type ListClientKeysOptions ListOptions - -// ListClientKeys lists error tracking project client keys. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#list-project-client-keys -func (s *ErrorTrackingService) ListClientKeys(pid interface{}, opt *ListClientKeysOptions, options ...RequestOptionFunc) ([]*ErrorTrackingClientKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cks []*ErrorTrackingClientKey - resp, err := s.client.Do(req, &cks) - if err != nil { - return nil, resp, err - } - - return cks, resp, nil -} - -// CreateClientKey creates a new client key for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#create-a-client-key -func (s *ErrorTrackingService) CreateClientKey(pid interface{}, options ...RequestOptionFunc) (*ErrorTrackingClientKey, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - ck := new(ErrorTrackingClientKey) - resp, err := s.client.Do(req, ck) - if err != nil { - return nil, resp, err - } - - return ck, resp, nil -} - -// DeleteClientKey removes a client key from the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/error_tracking.html#delete-a-client-key -func (s *ErrorTrackingService) DeleteClientKey(pid interface{}, keyID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/error_tracking/client_keys/%d", PathEscape(project), keyID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/event_parsing.go b/vendor/github.com/xanzy/go-gitlab/event_parsing.go deleted file mode 100644 index 0f474211d3..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/event_parsing.go +++ /dev/null @@ -1,312 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "net/http" -) - -// EventType represents a Gitlab event type. -type EventType string - -// List of available event types. -const ( - EventConfidentialIssue EventType = "Confidential Issue Hook" - EventConfidentialNote EventType = "Confidential Note Hook" - EventTypeBuild EventType = "Build Hook" - EventTypeDeployment EventType = "Deployment Hook" - EventTypeFeatureFlag EventType = "Feature Flag Hook" - EventTypeIssue EventType = "Issue Hook" - EventTypeJob EventType = "Job Hook" - EventTypeMember EventType = "Member Hook" - EventTypeMergeRequest EventType = "Merge Request Hook" - EventTypeNote EventType = "Note Hook" - EventTypePipeline EventType = "Pipeline Hook" - EventTypePush EventType = "Push Hook" - EventTypeRelease EventType = "Release Hook" - EventTypeResourceAccessToken EventType = "Resource Access Token Hook" - EventTypeServiceHook EventType = "Service Hook" - EventTypeSubGroup EventType = "Subgroup Hook" - EventTypeSystemHook EventType = "System Hook" - EventTypeTagPush EventType = "Tag Push Hook" - EventTypeWikiPage EventType = "Wiki Page Hook" -) - -const ( - eventObjectKindPush = "push" - eventObjectKindTagPush = "tag_push" - eventObjectKindMergeRequest = "merge_request" -) - -const ( - noteableTypeCommit = "Commit" - noteableTypeIssue = "Issue" - noteableTypeMergeRequest = "MergeRequest" - noteableTypeSnippet = "Snippet" -) - -type noteEvent struct { - ObjectKind string `json:"object_kind"` - ObjectAttributes struct { - NoteableType string `json:"noteable_type"` - } `json:"object_attributes"` -} - -type serviceEvent struct { - ObjectKind string `json:"object_kind"` -} - -const eventTokenHeader = "X-Gitlab-Token" - -// HookEventToken returns the token for the given request. -func HookEventToken(r *http.Request) string { - return r.Header.Get(eventTokenHeader) -} - -const eventTypeHeader = "X-Gitlab-Event" - -// HookEventType returns the event type for the given request. -func HookEventType(r *http.Request) EventType { - return EventType(r.Header.Get(eventTypeHeader)) -} - -// ParseHook tries to parse both web- and system hooks. -// -// Example usage: -// -// func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := ioutil.ReadAll(r.Body) -// if err != nil { ... } -// event, err := gitlab.ParseHook(gitlab.HookEventType(r), payload) -// if err != nil { ... } -// switch event := event.(type) { -// case *gitlab.PushEvent: -// processPushEvent(event) -// case *gitlab.MergeEvent: -// processMergeEvent(event) -// ... -// } -// } -func ParseHook(eventType EventType, payload []byte) (event interface{}, err error) { - switch eventType { - case EventTypeSystemHook: - return ParseSystemhook(payload) - default: - return ParseWebhook(eventType, payload) - } -} - -// ParseSystemhook parses the event payload. For recognized event types, a -// value of the corresponding struct type will be returned. An error will be -// returned for unrecognized event types. -// -// Example usage: -// -// func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := ioutil.ReadAll(r.Body) -// if err != nil { ... } -// event, err := gitlab.ParseSystemhook(payload) -// if err != nil { ... } -// switch event := event.(type) { -// case *gitlab.PushSystemEvent: -// processPushSystemEvent(event) -// case *gitlab.MergeSystemEvent: -// processMergeSystemEvent(event) -// ... -// } -// } -func ParseSystemhook(payload []byte) (event interface{}, err error) { - e := &systemHookEvent{} - err = json.Unmarshal(payload, e) - if err != nil { - return nil, err - } - - switch e.EventName { - case eventObjectKindPush: - event = &PushSystemEvent{} - case eventObjectKindTagPush: - event = &TagPushSystemEvent{} - case "repository_update": - event = &RepositoryUpdateSystemEvent{} - case - "project_create", - "project_update", - "project_destroy", - "project_transfer", - "project_rename": - event = &ProjectSystemEvent{} - case - "group_create", - "group_destroy", - "group_rename": - event = &GroupSystemEvent{} - case "key_create", "key_destroy": - event = &KeySystemEvent{} - case - "user_create", - "user_destroy", - "user_rename", - "user_failed_login": - event = &UserSystemEvent{} - case - "user_add_to_group", - "user_remove_from_group", - "user_update_for_group": - event = &UserGroupSystemEvent{} - case - "user_add_to_team", - "user_remove_from_team", - "user_update_for_team": - event = &UserTeamSystemEvent{} - default: - switch e.ObjectKind { - case string(MergeRequestEventTargetType): - event = &MergeEvent{} - default: - return nil, fmt.Errorf("unexpected system hook type %s", e.EventName) - } - } - - if err := json.Unmarshal(payload, event); err != nil { - return nil, err - } - - return event, nil -} - -// WebhookEventType returns the event type for the given request. -func WebhookEventType(r *http.Request) EventType { - return EventType(r.Header.Get(eventTypeHeader)) -} - -// ParseWebhook parses the event payload. For recognized event types, a -// value of the corresponding struct type will be returned. An error will -// be returned for unrecognized event types. -// -// Example usage: -// -// func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := ioutil.ReadAll(r.Body) -// if err != nil { ... } -// event, err := gitlab.ParseWebhook(gitlab.HookEventType(r), payload) -// if err != nil { ... } -// switch event := event.(type) { -// case *gitlab.PushEvent: -// processPushEvent(event) -// case *gitlab.MergeEvent: -// processMergeEvent(event) -// ... -// } -// } -func ParseWebhook(eventType EventType, payload []byte) (event interface{}, err error) { - switch eventType { - case EventTypeBuild: - event = &BuildEvent{} - case EventTypeDeployment: - event = &DeploymentEvent{} - case EventTypeFeatureFlag: - event = &FeatureFlagEvent{} - case EventTypeIssue, EventConfidentialIssue: - event = &IssueEvent{} - case EventTypeJob: - event = &JobEvent{} - case EventTypeMember: - event = &MemberEvent{} - case EventTypeMergeRequest: - event = &MergeEvent{} - case EventTypeNote, EventConfidentialNote: - note := ¬eEvent{} - err := json.Unmarshal(payload, note) - if err != nil { - return nil, err - } - - if note.ObjectKind != string(NoteEventTargetType) { - return nil, fmt.Errorf("unexpected object kind %s", note.ObjectKind) - } - - switch note.ObjectAttributes.NoteableType { - case noteableTypeCommit: - event = &CommitCommentEvent{} - case noteableTypeMergeRequest: - event = &MergeCommentEvent{} - case noteableTypeIssue: - event = &IssueCommentEvent{} - case noteableTypeSnippet: - event = &SnippetCommentEvent{} - default: - return nil, fmt.Errorf("unexpected noteable type %s", note.ObjectAttributes.NoteableType) - } - case EventTypePipeline: - event = &PipelineEvent{} - case EventTypePush: - event = &PushEvent{} - case EventTypeRelease: - event = &ReleaseEvent{} - case EventTypeResourceAccessToken: - data := map[string]interface{}{} - err := json.Unmarshal(payload, &data) - if err != nil { - return nil, err - } - - _, groupEvent := data["group"] - _, projectEvent := data["project"] - - switch { - case groupEvent: - event = &GroupResourceAccessTokenEvent{} - case projectEvent: - event = &ProjectResourceAccessTokenEvent{} - default: - return nil, fmt.Errorf("unexpected resource access token payload") - } - case EventTypeServiceHook: - service := &serviceEvent{} - err := json.Unmarshal(payload, service) - if err != nil { - return nil, err - } - switch service.ObjectKind { - case eventObjectKindPush: - event = &PushEvent{} - case eventObjectKindTagPush: - event = &TagEvent{} - case eventObjectKindMergeRequest: - event = &MergeEvent{} - default: - return nil, fmt.Errorf("unexpected service type %s", service.ObjectKind) - } - case EventTypeSubGroup: - event = &SubGroupEvent{} - case EventTypeTagPush: - event = &TagEvent{} - case EventTypeWikiPage: - event = &WikiPageEvent{} - default: - return nil, fmt.Errorf("unexpected event type: %s", eventType) - } - - if err := json.Unmarshal(payload, event); err != nil { - return nil, err - } - - return event, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go deleted file mode 100644 index a653d15b58..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/event_systemhook_types.go +++ /dev/null @@ -1,249 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import "time" - -// systemHookEvent is used to pre-process events to determine the -// system hook event type. -type systemHookEvent struct { - BaseSystemEvent - ObjectKind string `json:"object_kind"` -} - -// BaseSystemEvent contains system hook's common properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type BaseSystemEvent struct { - EventName string `json:"event_name"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` -} - -// ProjectSystemEvent represents a project system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type ProjectSystemEvent struct { - BaseSystemEvent - Name string `json:"name"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - ProjectID int `json:"project_id"` - OwnerName string `json:"owner_name"` - OwnerEmail string `json:"owner_email"` - ProjectVisibility string `json:"project_visibility"` - OldPathWithNamespace string `json:"old_path_with_namespace,omitempty"` -} - -// GroupSystemEvent represents a group system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type GroupSystemEvent struct { - BaseSystemEvent - Name string `json:"name"` - Path string `json:"path"` - PathWithNamespace string `json:"full_path"` - GroupID int `json:"group_id"` - OwnerName string `json:"owner_name"` - OwnerEmail string `json:"owner_email"` - ProjectVisibility string `json:"project_visibility"` - OldPath string `json:"old_path,omitempty"` - OldPathWithNamespace string `json:"old_full_path,omitempty"` -} - -// KeySystemEvent represents a key system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type KeySystemEvent struct { - BaseSystemEvent - ID int `json:"id"` - Username string `json:"username"` - Key string `json:"key"` -} - -// UserSystemEvent represents a user system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type UserSystemEvent struct { - BaseSystemEvent - ID int `json:"user_id"` - Name string `json:"name"` - Username string `json:"username"` - OldUsername string `json:"old_username,omitempty"` - Email string `json:"email"` - State string `json:"state,omitempty"` -} - -// UserGroupSystemEvent represents a user group system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type UserGroupSystemEvent struct { - BaseSystemEvent - ID int `json:"user_id"` - Name string `json:"user_name"` - Username string `json:"user_username"` - Email string `json:"user_email"` - GroupID int `json:"group_id"` - GroupName string `json:"group_name"` - GroupPath string `json:"group_path"` - GroupAccess string `json:"group_access"` -} - -// UserTeamSystemEvent represents a user team system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html -type UserTeamSystemEvent struct { - BaseSystemEvent - ID int `json:"user_id"` - Name string `json:"user_name"` - Username string `json:"user_username"` - Email string `json:"user_email"` - ProjectID int `json:"project_id"` - ProjectName string `json:"project_name"` - ProjectPath string `json:"project_path"` - ProjectPathWithNamespace string `json:"project_path_with_namespace"` - ProjectVisibility string `json:"project_visibility"` - AccessLevel string `json:"access_level"` -} - -// PushSystemEvent represents a push system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html#push-events -type PushSystemEvent struct { - BaseSystemEvent - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitHTTPURL string `json:"git_http_url"` - GitSSHURL string `json:"git_ssh_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - } `json:"project"` - Commits []struct { - ID string `json:"id"` - Message string `json:"message"` - Timestamp time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` -} - -// TagPushSystemEvent represents a tag push system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html#tag-events -type TagPushSystemEvent struct { - BaseSystemEvent - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitHTTPURL string `json:"git_http_url"` - GitSSHURL string `json:"git_ssh_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - } `json:"project"` - Commits []struct { - ID string `json:"id"` - Message string `json:"message"` - Timestamp time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` -} - -// RepositoryUpdateSystemEvent represents a repository updated system event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/administration/system_hooks.html#repository-update-events -type RepositoryUpdateSystemEvent struct { - BaseSystemEvent - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitHTTPURL string `json:"git_http_url"` - GitSSHURL string `json:"git_ssh_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CiConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - } `json:"project"` - Changes []struct { - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - } `json:"changes"` - Refs []string `json:"refs"` -} diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go deleted file mode 100644 index c4a8e4aeb9..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go +++ /dev/null @@ -1,1265 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "strconv" - "time" -) - -// StateID identifies the state of an issue or merge request. -// -// There are no GitLab API docs on the subject, but the mappings can be found in -// GitLab's codebase: -// https://gitlab.com/gitlab-org/gitlab-foss/-/blob/ba5be4989e/app/models/concerns/issuable.rb#L39-42 -type StateID int - -const ( - StateIDNone StateID = 0 - StateIDOpen StateID = 1 - StateIDClosed StateID = 2 - StateIDMerged StateID = 3 - StateIDLocked StateID = 4 -) - -// BuildEvent represents a build event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#job-events -type BuildEvent struct { - ObjectKind string `json:"object_kind"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - BeforeSHA string `json:"before_sha"` - SHA string `json:"sha"` - BuildID int `json:"build_id"` - BuildName string `json:"build_name"` - BuildStage string `json:"build_stage"` - BuildStatus string `json:"build_status"` - BuildCreatedAt string `json:"build_created_at"` - BuildStartedAt string `json:"build_started_at"` - BuildFinishedAt string `json:"build_finished_at"` - BuildDuration float64 `json:"build_duration"` - BuildAllowFailure bool `json:"build_allow_failure"` - ProjectID int `json:"project_id"` - ProjectName string `json:"project_name"` - User *EventUser `json:"user"` - Commit struct { - ID int `json:"id"` - SHA string `json:"sha"` - Message string `json:"message"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - Status string `json:"status"` - Duration int `json:"duration"` - StartedAt string `json:"started_at"` - FinishedAt string `json:"finished_at"` - } `json:"commit"` - Repository *Repository `json:"repository"` -} - -// CommitCommentEvent represents a comment on a commit event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-a-commit -type CommitCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *User `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Commit *struct { - ID string `json:"id"` - Title string `json:"title"` - Message string `json:"message"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commit"` -} - -// DeploymentEvent represents a deployment event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#deployment-events -type DeploymentEvent struct { - ObjectKind string `json:"object_kind"` - Status string `json:"status"` - StatusChangedAt string `json:"status_changed_at"` - DeploymentID int `json:"deployment_id"` - DeployableID int `json:"deployable_id"` - DeployableURL string `json:"deployable_url"` - Environment string `json:"environment"` - EnvironmentSlug string `json:"environment_slug"` - EnvironmentExternalURL string `json:"environment_external_url"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL *string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - Ref string `json:"ref"` - ShortSHA string `json:"short_sha"` - User *EventUser `json:"user"` - UserURL string `json:"user_url"` - CommitURL string `json:"commit_url"` - CommitTitle string `json:"commit_title"` -} - -// FeatureFlagEvent represents a feature flag event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#feature-flag-events -type FeatureFlagEvent struct { - ObjectKind string `json:"object_kind"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL *string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - User *EventUser `json:"user"` - UserURL string `json:"user_url"` - ObjectAttributes struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Active bool `json:"active"` - } `json:"object_attributes"` -} - -// GroupResourceAccessTokenEvent represents a resource access token event for a -// group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events -type GroupResourceAccessTokenEvent struct { - EventName string `json:"event_name"` - ObjectKind string `json:"object_kind"` - Group struct { - GroupID int `json:"group_id"` - GroupName string `json:"group_name"` - GroupPath string `json:"group_path"` - } `json:"group"` - ObjectAttributes struct { - ID int `json:"id"` - UserID int `json:"user_id"` - Name string `json:"name"` - CreatedAt string `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - } `json:"object_attributes"` -} - -// IssueCommentEvent represents a comment on an issue event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-an-issue -type IssueCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *User `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - DiscussionID string `json:"discussion_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff []*Diff `json:"st_diff"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Issue struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` - MilestoneID int `json:"milestone_id"` - AuthorID int `json:"author_id"` - Position int `json:"position"` - BranchName string `json:"branch_name"` - Description string `json:"description"` - State string `json:"state"` - Title string `json:"title"` - Labels []*EventLabel `json:"labels"` - LastEditedAt string `json:"last_edit_at"` - LastEditedByID int `json:"last_edited_by_id"` - UpdatedAt string `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` - CreatedAt string `json:"created_at"` - ClosedAt string `json:"closed_at"` - DueDate *ISOTime `json:"due_date"` - URL string `json:"url"` - TimeEstimate int `json:"time_estimate"` - Confidential bool `json:"confidential"` - TotalTimeSpent int `json:"total_time_spent"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - HumanTimeEstimate string `json:"human_time_estimate"` - AssigneeIDs []int `json:"assignee_ids"` - AssigneeID int `json:"assignee_id"` - } `json:"issue"` -} - -// IssueEvent represents a issue event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#issue-events -type IssueEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Title string `json:"title"` - AssigneeIDs []int `json:"assignee_ids"` - AssigneeID int `json:"assignee_id"` - AuthorID int `json:"author_id"` - ProjectID int `json:"project_id"` - CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) - UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) - UpdatedByID int `json:"updated_by_id"` - LastEditedAt string `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - RelativePosition int `json:"relative_position"` - BranchName string `json:"branch_name"` - Description string `json:"description"` - MilestoneID int `json:"milestone_id"` - StateID StateID `json:"state_id"` - Confidential bool `json:"confidential"` - DiscussionLocked bool `json:"discussion_locked"` - DueDate *ISOTime `json:"due_date"` - MovedToID int `json:"moved_to_id"` - DuplicatedToID int `json:"duplicated_to_id"` - TimeEstimate int `json:"time_estimate"` - TotalTimeSpent int `json:"total_time_spent"` - TimeChange int `json:"time_change"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - HumanTimeEstimate string `json:"human_time_estimate"` - HumanTimeChange string `json:"human_time_change"` - Weight int `json:"weight"` - IID int `json:"iid"` - URL string `json:"url"` - State string `json:"state"` - Action string `json:"action"` - Severity string `json:"severity"` - EscalationStatus string `json:"escalation_status"` - EscalationPolicy struct { - ID int `json:"id"` - Name string `json:"name"` - } `json:"escalation_policy"` - Labels []*EventLabel `json:"labels"` - } `json:"object_attributes"` - Assignee *EventUser `json:"assignee"` - Assignees *[]EventUser `json:"assignees"` - Labels []*EventLabel `json:"labels"` - Changes struct { - Assignees struct { - Previous []*EventUser `json:"previous"` - Current []*EventUser `json:"current"` - } `json:"assignees"` - Description struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"description"` - Labels struct { - Previous []*EventLabel `json:"previous"` - Current []*EventLabel `json:"current"` - } `json:"labels"` - Title struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"title"` - ClosedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"closed_at"` - StateID struct { - Previous StateID `json:"previous"` - Current StateID `json:"current"` - } `json:"state_id"` - UpdatedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"updated_at"` - UpdatedByID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"updated_by_id"` - TotalTimeSpent struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"total_time_spent"` - } `json:"changes"` -} - -// JobEvent represents a job event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#job-events -type JobEvent struct { - ObjectKind string `json:"object_kind"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - BeforeSHA string `json:"before_sha"` - SHA string `json:"sha"` - BuildID int `json:"build_id"` - BuildName string `json:"build_name"` - BuildStage string `json:"build_stage"` - BuildStatus string `json:"build_status"` - BuildCreatedAt string `json:"build_created_at"` - BuildStartedAt string `json:"build_started_at"` - BuildFinishedAt string `json:"build_finished_at"` - BuildDuration float64 `json:"build_duration"` - BuildQueuedDuration float64 `json:"build_queued_duration"` - BuildAllowFailure bool `json:"build_allow_failure"` - BuildFailureReason string `json:"build_failure_reason"` - RetriesCount int `json:"retries_count"` - PipelineID int `json:"pipeline_id"` - ProjectID int `json:"project_id"` - ProjectName string `json:"project_name"` - User *EventUser `json:"user"` - Commit struct { - ID int `json:"id"` - Name string `json:"name"` - SHA string `json:"sha"` - Message string `json:"message"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthorURL string `json:"author_url"` - Status string `json:"status"` - Duration int `json:"duration"` - StartedAt string `json:"started_at"` - FinishedAt string `json:"finished_at"` - } `json:"commit"` - Repository *Repository `json:"repository"` - Runner struct { - ID int `json:"id"` - Active bool `json:"active"` - RunnerType string `json:"runner_type"` - IsShared bool `json:"is_shared"` - Description string `json:"description"` - Tags []string `json:"tags"` - } `json:"runner"` - Environment struct { - Name string `json:"name"` - Action string `json:"action"` - DeploymentTier string `json:"deployment_tier"` - } `json:"environment"` -} - -// MemberEvent represents a member event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#group-member-events -type MemberEvent struct { - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - GroupName string `json:"group_name"` - GroupPath string `json:"group_path"` - GroupID int `json:"group_id"` - UserUsername string `json:"user_username"` - UserName string `json:"user_name"` - UserEmail string `json:"user_email"` - UserID int `json:"user_id"` - GroupAccess string `json:"group_access"` - GroupPlan string `json:"group_plan"` - ExpiresAt *time.Time `json:"expires_at"` - EventName string `json:"event_name"` -} - -// MergeCommentEvent represents a comment on a merge event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-a-merge-request -type MergeCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - ObjectAttributes struct { - Attachment string `json:"attachment"` - AuthorID int `json:"author_id"` - ChangePosition *NotePosition `json:"change_position"` - CommitID string `json:"commit_id"` - CreatedAt string `json:"created_at"` - DiscussionID string `json:"discussion_id"` - ID int `json:"id"` - LineCode string `json:"line_code"` - Note string `json:"note"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - OriginalPosition *NotePosition `json:"original_position"` - Position *NotePosition `json:"position"` - ProjectID int `json:"project_id"` - ResolvedAt string `json:"resolved_at"` - ResolvedByID int `json:"resolved_by_id"` - ResolvedByPush bool `json:"resolved_by_push"` - StDiff *Diff `json:"st_diff"` - System bool `json:"system"` - Type string `json:"type"` - UpdatedAt string `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Repository *Repository `json:"repository"` - MergeRequest struct { - ID int `json:"id"` - TargetBranch string `json:"target_branch"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - AuthorID int `json:"author_id"` - AssigneeID int `json:"assignee_id"` - AssigneeIDs []int `json:"assignee_ids"` - Title string `json:"title"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - MilestoneID int `json:"milestone_id"` - State string `json:"state"` - MergeStatus string `json:"merge_status"` - TargetProjectID int `json:"target_project_id"` - IID int `json:"iid"` - Description string `json:"description"` - Position int `json:"position"` - Labels []*EventLabel `json:"labels"` - LockedAt string `json:"locked_at"` - UpdatedByID int `json:"updated_by_id"` - MergeError string `json:"merge_error"` - MergeParams *MergeParams `json:"merge_params"` - MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` - MergeUserID int `json:"merge_user_id"` - MergeCommitSHA string `json:"merge_commit_sha"` - DeletedAt string `json:"deleted_at"` - InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` - LockVersion int `json:"lock_version"` - ApprovalsBeforeMerge string `json:"approvals_before_merge"` - RebaseCommitSHA string `json:"rebase_commit_sha"` - TimeEstimate int `json:"time_estimate"` - Squash bool `json:"squash"` - LastEditedAt string `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - Source *Repository `json:"source"` - Target *Repository `json:"target"` - LastCommit struct { - ID string `json:"id"` - Title string `json:"title"` - Message string `json:"message"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"last_commit"` - WorkInProgress bool `json:"work_in_progress"` - TotalTimeSpent int `json:"total_time_spent"` - HeadPipelineID int `json:"head_pipeline_id"` - Assignee *EventUser `json:"assignee"` - DetailedMergeStatus string `json:"detailed_merge_status"` - } `json:"merge_request"` -} - -// MergeEvent represents a merge event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#merge-request-events -type MergeEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - ObjectAttributes struct { - ID int `json:"id"` - TargetBranch string `json:"target_branch"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - AuthorID int `json:"author_id"` - AssigneeID int `json:"assignee_id"` - AssigneeIDs []int `json:"assignee_ids"` - ReviewerIDs []int `json:"reviewer_ids"` - Title string `json:"title"` - CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) - UpdatedAt string `json:"updated_at"` // Should be *time.Time (see Gitlab issue #21468) - StCommits []*Commit `json:"st_commits"` - StDiffs []*Diff `json:"st_diffs"` - LastEditedAt string `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - MilestoneID int `json:"milestone_id"` - StateID StateID `json:"state_id"` - State string `json:"state"` - MergeStatus string `json:"merge_status"` - TargetProjectID int `json:"target_project_id"` - IID int `json:"iid"` - Description string `json:"description"` - Position int `json:"position"` - LockedAt string `json:"locked_at"` - UpdatedByID int `json:"updated_by_id"` - MergeError string `json:"merge_error"` - MergeParams *MergeParams `json:"merge_params"` - MergeWhenBuildSucceeds bool `json:"merge_when_build_succeeds"` - MergeUserID int `json:"merge_user_id"` - MergeCommitSHA string `json:"merge_commit_sha"` - DeletedAt string `json:"deleted_at"` - ApprovalsBeforeMerge string `json:"approvals_before_merge"` - RebaseCommitSHA string `json:"rebase_commit_sha"` - InProgressMergeCommitSHA string `json:"in_progress_merge_commit_sha"` - LockVersion int `json:"lock_version"` - TimeEstimate int `json:"time_estimate"` - Source *Repository `json:"source"` - Target *Repository `json:"target"` - HeadPipelineID *int `json:"head_pipeline_id"` - LastCommit struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"last_commit"` - BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` - WorkInProgress bool `json:"work_in_progress"` - Draft bool `json:"draft"` - TotalTimeSpent int `json:"total_time_spent"` - TimeChange int `json:"time_change"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - HumanTimeChange string `json:"human_time_change"` - HumanTimeEstimate string `json:"human_time_estimate"` - FirstContribution bool `json:"first_contribution"` - URL string `json:"url"` - Labels []*EventLabel `json:"labels"` - Action string `json:"action"` - DetailedMergeStatus string `json:"detailed_merge_status"` - OldRev string `json:"oldrev"` - } `json:"object_attributes"` - Repository *Repository `json:"repository"` - Labels []*EventLabel `json:"labels"` - Changes struct { - Assignees struct { - Previous []*EventUser `json:"previous"` - Current []*EventUser `json:"current"` - } `json:"assignees"` - Reviewers struct { - Previous []*EventUser `json:"previous"` - Current []*EventUser `json:"current"` - } `json:"reviewers"` - Description struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"description"` - Draft struct { - Previous bool `json:"previous"` - Current bool `json:"current"` - } `json:"draft"` - Labels struct { - Previous []*EventLabel `json:"previous"` - Current []*EventLabel `json:"current"` - } `json:"labels"` - LastEditedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"last_edited_at"` - LastEditedByID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"last_edited_by_id"` - MilestoneID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"milestone_id"` - SourceBranch struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"source_branch"` - SourceProjectID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"source_project_id"` - StateID struct { - Previous StateID `json:"previous"` - Current StateID `json:"current"` - } `json:"state_id"` - TargetBranch struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"target_branch"` - TargetProjectID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"target_project_id"` - Title struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"title"` - UpdatedAt struct { - Previous string `json:"previous"` - Current string `json:"current"` - } `json:"updated_at"` - UpdatedByID struct { - Previous int `json:"previous"` - Current int `json:"current"` - } `json:"updated_by_id"` - } `json:"changes"` - Assignees []*EventUser `json:"assignees"` - Reviewers []*EventUser `json:"reviewers"` -} - -// EventUser represents a user record in an event and is used as an even -// initiator or a merge assignee. -type EventUser struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - AvatarURL string `json:"avatar_url"` - Email string `json:"email"` -} - -// MergeParams represents the merge params. -type MergeParams struct { - ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` -} - -// UnmarshalJSON decodes the merge parameters -// -// This allows support of ForceRemoveSourceBranch for both type -// bool (>11.9) and string (<11.9) -func (p *MergeParams) UnmarshalJSON(b []byte) error { - type Alias MergeParams - raw := struct { - *Alias - ForceRemoveSourceBranch interface{} `json:"force_remove_source_branch"` - }{ - Alias: (*Alias)(p), - } - - err := json.Unmarshal(b, &raw) - if err != nil { - return err - } - - switch v := raw.ForceRemoveSourceBranch.(type) { - case nil: - // No action needed. - case bool: - p.ForceRemoveSourceBranch = v - case string: - p.ForceRemoveSourceBranch, err = strconv.ParseBool(v) - if err != nil { - return err - } - default: - return fmt.Errorf("failed to unmarshal ForceRemoveSourceBranch of type: %T", v) - } - - return nil -} - -// PipelineEvent represents a pipeline event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#pipeline-events -type PipelineEvent struct { - ObjectKind string `json:"object_kind"` - ObjectAttributes struct { - ID int `json:"id"` - IID int `json:"iid"` - Name string `json:"name"` - Ref string `json:"ref"` - Tag bool `json:"tag"` - SHA string `json:"sha"` - BeforeSHA string `json:"before_sha"` - Source string `json:"source"` - Status string `json:"status"` - DetailedStatus string `json:"detailed_status"` - Stages []string `json:"stages"` - CreatedAt string `json:"created_at"` - FinishedAt string `json:"finished_at"` - Duration int `json:"duration"` - QueuedDuration int `json:"queued_duration"` - URL string `json:"url"` - Variables []struct { - Key string `json:"key"` - Value string `json:"value"` - } `json:"variables"` - } `json:"object_attributes"` - MergeRequest struct { - ID int `json:"id"` - IID int `json:"iid"` - Title string `json:"title"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - TargetBranch string `json:"target_branch"` - TargetProjectID int `json:"target_project_id"` - State string `json:"state"` - MergeRequestStatus string `json:"merge_status"` - DetailedMergeStatus string `json:"detailed_merge_status"` - URL string `json:"url"` - } `json:"merge_request"` - User *EventUser `json:"user"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Commit struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commit"` - SourcePipline struct { - Project struct { - ID int `json:"id"` - WebURL string `json:"web_url"` - PathWithNamespace string `json:"path_with_namespace"` - } `json:"project"` - PipelineID int `json:"pipeline_id"` - JobID int `json:"job_id"` - } `json:"source_pipeline"` - Builds []struct { - ID int `json:"id"` - Stage string `json:"stage"` - Name string `json:"name"` - Status string `json:"status"` - CreatedAt string `json:"created_at"` - StartedAt string `json:"started_at"` - FinishedAt string `json:"finished_at"` - Duration float64 `json:"duration"` - QueuedDuration float64 `json:"queued_duration"` - FailureReason string `json:"failure_reason"` - When string `json:"when"` - Manual bool `json:"manual"` - AllowFailure bool `json:"allow_failure"` - User *EventUser `json:"user"` - Runner struct { - ID int `json:"id"` - Description string `json:"description"` - Active bool `json:"active"` - IsShared bool `json:"is_shared"` - RunnerType string `json:"runner_type"` - Tags []string `json:"tags"` - } `json:"runner"` - ArtifactsFile struct { - Filename string `json:"filename"` - Size int `json:"size"` - } `json:"artifacts_file"` - Environment struct { - Name string `json:"name"` - Action string `json:"action"` - DeploymentTier string `json:"deployment_tier"` - } `json:"environment"` - } `json:"builds"` -} - -// ProjectResourceAccessTokenEvent represents a resource access token event for -// a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#project-and-group-access-token-events -type ProjectResourceAccessTokenEvent struct { - EventName string `json:"event_name"` - ObjectKind string `json:"object_kind"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - ObjectAttributes struct { - ID int `json:"id"` - UserID int `json:"user_id"` - Name string `json:"name"` - CreatedAt string `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - } `json:"object_attributes"` -} - -// PushEvent represents a push event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#push-events -type PushEvent struct { - ObjectKind string `json:"object_kind"` - EventName string `json:"event_name"` - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserEmail string `json:"user_email"` - UserAvatar string `json:"user_avatar"` - ProjectID int `json:"project_id"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - Commits []*struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - Added []string `json:"added"` - Modified []string `json:"modified"` - Removed []string `json:"removed"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` -} - -// ReleaseEvent represents a release event -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#release-events -type ReleaseEvent struct { - ID int `json:"id"` - CreatedAt string `json:"created_at"` // Should be *time.Time (see Gitlab issue #21468) - Description string `json:"description"` - Name string `json:"name"` - Tag string `json:"tag"` - ReleasedAt string `json:"released_at"` // Should be *time.Time (see Gitlab issue #21468) - ObjectKind string `json:"object_kind"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL *string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - VisibilityLevel int `json:"visibility_level"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - CIConfigPath string `json:"ci_config_path"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - } `json:"project"` - URL string `json:"url"` - Action string `json:"action"` - Assets struct { - Count int `json:"count"` - Links []struct { - ID int `json:"id"` - External bool `json:"external"` - LinkType string `json:"link_type"` - Name string `json:"name"` - URL string `json:"url"` - } `json:"links"` - Sources []struct { - Format string `json:"format"` - URL string `json:"url"` - } `json:"sources"` - } `json:"assets"` - Commit struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp string `json:"timestamp"` // Should be *time.Time (see Gitlab issue #21468) - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - } `json:"commit"` -} - -// SnippetCommentEvent represents a comment on a snippet event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-on-a-code-snippet -type SnippetCommentEvent struct { - ObjectKind string `json:"object_kind"` - EventType string `json:"event_type"` - User *EventUser `json:"user"` - ProjectID int `json:"project_id"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - ObjectAttributes struct { - ID int `json:"id"` - Note string `json:"note"` - NoteableType string `json:"noteable_type"` - AuthorID int `json:"author_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ProjectID int `json:"project_id"` - Attachment string `json:"attachment"` - LineCode string `json:"line_code"` - CommitID string `json:"commit_id"` - NoteableID int `json:"noteable_id"` - System bool `json:"system"` - StDiff *Diff `json:"st_diff"` - Description string `json:"description"` - Action CommentEventAction `json:"action"` - URL string `json:"url"` - } `json:"object_attributes"` - Snippet *struct { - ID int `json:"id"` - Title string `json:"title"` - Content string `json:"content"` - AuthorID int `json:"author_id"` - ProjectID int `json:"project_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - Filename string `json:"file_name"` - ExpiresAt string `json:"expires_at"` - Type string `json:"type"` - VisibilityLevel int `json:"visibility_level"` - Description string `json:"description"` - Secret bool `json:"secret"` - RepositoryReadOnly bool `json:"repository_read_only"` - } `json:"snippet"` -} - -// SubGroupEvent represents a subgroup event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#subgroup-events -type SubGroupEvent struct { - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - EventName string `json:"event_name"` - Name string `json:"name"` - Path string `json:"path"` - FullPath string `json:"full_path"` - GroupID int `json:"group_id"` - ParentGroupID int `json:"parent_group_id"` - ParentName string `json:"parent_name"` - ParentPath string `json:"parent_path"` - ParentFullPath string `json:"parent_full_path"` -} - -// TagEvent represents a tag event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#tag-events -type TagEvent struct { - ObjectKind string `json:"object_kind"` - EventName string `json:"event_name"` - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - CheckoutSHA string `json:"checkout_sha"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - UserUsername string `json:"user_username"` - UserAvatar string `json:"user_avatar"` - UserEmail string `json:"user_email"` - ProjectID int `json:"project_id"` - Message string `json:"message"` - Project struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Repository *Repository `json:"repository"` - Commits []*struct { - ID string `json:"id"` - Message string `json:"message"` - Title string `json:"title"` - Timestamp *time.Time `json:"timestamp"` - URL string `json:"url"` - Author struct { - Name string `json:"name"` - Email string `json:"email"` - } `json:"author"` - Added []string `json:"added"` - Modified []string `json:"modified"` - Removed []string `json:"removed"` - } `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` -} - -// WikiPageEvent represents a wiki page event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#wiki-page-events -type WikiPageEvent struct { - ObjectKind string `json:"object_kind"` - User *EventUser `json:"user"` - Project struct { - Name string `json:"name"` - Description string `json:"description"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` - WebURL string `json:"web_url"` - Visibility VisibilityValue `json:"visibility"` - } `json:"project"` - Wiki struct { - WebURL string `json:"web_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - } `json:"wiki"` - ObjectAttributes struct { - Title string `json:"title"` - Content string `json:"content"` - Format string `json:"format"` - Message string `json:"message"` - Slug string `json:"slug"` - URL string `json:"url"` - Action string `json:"action"` - DiffURL string `json:"diff_url"` - } `json:"object_attributes"` -} - -// EventLabel represents a label inside a webhook event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#issue-events -type EventLabel struct { - ID int `json:"id"` - Title string `json:"title"` - Color string `json:"color"` - ProjectID int `json:"project_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - Template bool `json:"template"` - Description string `json:"description"` - Type string `json:"type"` - GroupID int `json:"group_id"` -} diff --git a/vendor/github.com/xanzy/go-gitlab/events.go b/vendor/github.com/xanzy/go-gitlab/events.go deleted file mode 100644 index 504db652f0..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/events.go +++ /dev/null @@ -1,231 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// EventsService handles communication with the event related methods of -// the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/events.html -type EventsService struct { - client *Client -} - -// ContributionEvent represents a user's contribution -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/events.html#get-user-contribution-events -type ContributionEvent struct { - ID int `json:"id"` - Title string `json:"title"` - ProjectID int `json:"project_id"` - ActionName string `json:"action_name"` - TargetID int `json:"target_id"` - TargetIID int `json:"target_iid"` - TargetType string `json:"target_type"` - AuthorID int `json:"author_id"` - TargetTitle string `json:"target_title"` - CreatedAt *time.Time `json:"created_at"` - PushData struct { - CommitCount int `json:"commit_count"` - Action string `json:"action"` - RefType string `json:"ref_type"` - CommitFrom string `json:"commit_from"` - CommitTo string `json:"commit_to"` - Ref string `json:"ref"` - CommitTitle string `json:"commit_title"` - } `json:"push_data"` - Note *Note `json:"note"` - Author struct { - Name string `json:"name"` - Username string `json:"username"` - ID int `json:"id"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - AuthorUsername string `json:"author_username"` -} - -// ListContributionEventsOptions represents the options for GetUserContributionEvents -// -// GitLap API docs: -// https://docs.gitlab.com/ee/api/events.html#get-user-contribution-events -type ListContributionEventsOptions struct { - ListOptions - Action *EventTypeValue `url:"action,omitempty" json:"action,omitempty"` - TargetType *EventTargetTypeValue `url:"target_type,omitempty" json:"target_type,omitempty"` - Before *ISOTime `url:"before,omitempty" json:"before,omitempty"` - After *ISOTime `url:"after,omitempty" json:"after,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListUserContributionEvents retrieves user contribution events -// for the specified user, sorted from newest to oldest. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/events.html#get-user-contribution-events -func (s *UsersService) ListUserContributionEvents(uid interface{}, opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/events", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*ContributionEvent - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// ListCurrentUserContributionEvents gets a list currently authenticated user's events -// -// GitLab API docs: https://docs.gitlab.com/ee/api/events.html#list-currently-authenticated-users-events -func (s *EventsService) ListCurrentUserContributionEvents(opt *ListContributionEventsOptions, options ...RequestOptionFunc) ([]*ContributionEvent, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "events", opt, options) - if err != nil { - return nil, nil, err - } - - var cs []*ContributionEvent - resp, err := s.client.Do(req, &cs) - if err != nil { - return nil, resp, err - } - - return cs, resp, nil -} - -// ProjectEvent represents a GitLab project event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/events.html#list-a-projects-visible-events -type ProjectEvent struct { - ID int `json:"id"` - Title string `json:"title"` - ProjectID int `json:"project_id"` - ActionName string `json:"action_name"` - TargetID int `json:"target_id"` - TargetIID int `json:"target_iid"` - TargetType string `json:"target_type"` - AuthorID int `json:"author_id"` - TargetTitle string `json:"target_title"` - CreatedAt string `json:"created_at"` - Author struct { - Name string `json:"name"` - Username string `json:"username"` - ID int `json:"id"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - AuthorUsername string `json:"author_username"` - Data struct { - Before string `json:"before"` - After string `json:"after"` - Ref string `json:"ref"` - UserID int `json:"user_id"` - UserName string `json:"user_name"` - Repository *Repository `json:"repository"` - Commits []*Commit `json:"commits"` - TotalCommitsCount int `json:"total_commits_count"` - } `json:"data"` - Note struct { - ID int `json:"id"` - Body string `json:"body"` - Attachment string `json:"attachment"` - Author struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - CreatedAt *time.Time `json:"created_at"` - System bool `json:"system"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - NoteableIID int `json:"noteable_iid"` - } `json:"note"` - PushData struct { - CommitCount int `json:"commit_count"` - Action string `json:"action"` - RefType string `json:"ref_type"` - CommitFrom string `json:"commit_from"` - CommitTo string `json:"commit_to"` - Ref string `json:"ref"` - CommitTitle string `json:"commit_title"` - } `json:"push_data"` -} - -func (s ProjectEvent) String() string { - return Stringify(s) -} - -// ListProjectVisibleEventsOptions represents the available -// ListProjectVisibleEvents() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/events.html#list-a-projects-visible-events -type ListProjectVisibleEventsOptions struct { - ListOptions - Action *EventTypeValue `url:"action,omitempty" json:"action,omitempty"` - TargetType *EventTargetTypeValue `url:"target_type,omitempty" json:"target_type,omitempty"` - Before *ISOTime `url:"before,omitempty" json:"before,omitempty"` - After *ISOTime `url:"after,omitempty" json:"after,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListProjectVisibleEvents gets the events for the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/events.html#list-a-projects-visible-events -func (s *EventsService) ListProjectVisibleEvents(pid interface{}, opt *ListProjectVisibleEventsOptions, options ...RequestOptionFunc) ([]*ProjectEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/events", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectEvent - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go deleted file mode 100644 index c6a3f7b285..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go +++ /dev/null @@ -1,218 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ExternalStatusChecksService handles communication with the external -// status check related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/status_checks.html -type ExternalStatusChecksService struct { - client *Client -} - -type MergeStatusCheck struct { - ID int `json:"id"` - Name string `json:"name"` - ExternalURL string `json:"external_url"` - Status string `json:"status"` -} - -type ProjectStatusCheck struct { - ID int `json:"id"` - Name string `json:"name"` - ProjectID int `json:"project_id"` - ExternalURL string `json:"external_url"` - ProtectedBranches []StatusCheckProtectedBranch `json:"protected_branches"` -} - -type StatusCheckProtectedBranch struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` - Name string `json:"name"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - CodeOwnerApprovalRequired bool `json:"code_owner_approval_required"` -} - -// ListMergeStatusChecks lists the external status checks that apply to it -// and their status for a single merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#list-status-checks-for-a-merge-request -func (s *ExternalStatusChecksService) ListMergeStatusChecks(pid interface{}, mr int, opt *ListOptions, options ...RequestOptionFunc) ([]*MergeStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mscs []*MergeStatusCheck - resp, err := s.client.Do(req, &mscs) - if err != nil { - return nil, resp, err - } - - return mscs, resp, nil -} - -// SetExternalStatusCheckStatusOptions represents the available -// SetExternalStatusCheckStatus() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#set-status-of-an-external-status-check -type SetExternalStatusCheckStatusOptions struct { - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` - ExternalStatusCheckID *int `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` - Status *string `url:"status,omitempty" json:"status,omitempty"` -} - -// SetExternalStatusCheckStatus sets the status of an external status check. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#set-status-of-an-external-status-check -func (s *ExternalStatusChecksService) SetExternalStatusCheckStatus(pid interface{}, mergeRequest int, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_check_responses", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListProjectStatusChecks lists the project external status checks. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#get-project-external-status-checks -func (s *ExternalStatusChecksService) ListProjectStatusChecks(pid interface{}, opt *ListOptions, options ...RequestOptionFunc) ([]*ProjectStatusCheck, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pscs []*ProjectStatusCheck - resp, err := s.client.Do(req, &pscs) - if err != nil { - return nil, resp, err - } - - return pscs, resp, nil -} - -// CreateExternalStatusCheckOptions represents the available -// CreateExternalStatusCheck() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#create-external-status-check -type CreateExternalStatusCheckOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` -} - -// CreateExternalStatusCheck creates an external status check. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#create-external-status-check -func (s *ExternalStatusChecksService) CreateExternalStatusCheck(pid interface{}, opt *CreateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteExternalStatusCheck deletes an external status check. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#delete-external-status-check -func (s *ExternalStatusChecksService) DeleteExternalStatusCheck(pid interface{}, check int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// UpdateExternalStatusCheckOptions represents the available -// UpdateExternalStatusCheck() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#update-external-status-check -type UpdateExternalStatusCheckOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` -} - -// UpdateExternalStatusCheck updates an external status check. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#update-external-status-check -func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid interface{}, check int, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// UpdateExternalStatusCheck updates an external status check. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/status_checks.html#retry-failed-status-check-for-a-merge-request -func (s *ExternalStatusChecksService) RetryFailedStatusCheckForAMergeRequest(pid interface{}, mergeRequest int, externalStatusCheck int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/status_checks/%d/retry", PathEscape(project), mergeRequest, externalStatusCheck) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/feature_flags.go b/vendor/github.com/xanzy/go-gitlab/feature_flags.go deleted file mode 100644 index 8bb847e151..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/feature_flags.go +++ /dev/null @@ -1,96 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// FeaturesService handles the communication with the application FeaturesService -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/features.html -type FeaturesService struct { - client *Client -} - -// Feature represents a GitLab feature flag. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/features.html -type Feature struct { - Name string `json:"name"` - State string `json:"state"` - Gates []Gate -} - -// Gate represents a gate of a GitLab feature flag. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/features.html -type Gate struct { - Key string `json:"key"` - Value interface{} `json:"value"` -} - -func (f Feature) String() string { - return Stringify(f) -} - -// ListFeatures gets a list of feature flags -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/features.html#list-all-features -func (s *FeaturesService) ListFeatures(options ...RequestOptionFunc) ([]*Feature, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "features", nil, options) - if err != nil { - return nil, nil, err - } - - var f []*Feature - resp, err := s.client.Do(req, &f) - if err != nil { - return nil, resp, err - } - return f, resp, nil -} - -// SetFeatureFlag sets or creates a feature flag gate -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/features.html#set-or-create-a-feature -func (s *FeaturesService) SetFeatureFlag(name string, value interface{}, options ...RequestOptionFunc) (*Feature, *Response, error) { - u := fmt.Sprintf("features/%s", url.PathEscape(name)) - - opt := struct { - Value interface{} `url:"value" json:"value"` - }{ - value, - } - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := &Feature{} - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - return f, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/freeze_periods.go b/vendor/github.com/xanzy/go-gitlab/freeze_periods.go deleted file mode 100644 index 3cbfba81c4..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/freeze_periods.go +++ /dev/null @@ -1,194 +0,0 @@ -// -// Copyright 2021 Paul Cioanca -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// FreezePeriodsService handles the communication with the freeze periods -// related methods of the GitLab API. -// -// https://docs.gitlab.com/ee/api/freeze_periods.html -type FreezePeriodsService struct { - client *Client -} - -// FreezePeriod represents a freeze period object. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#list-freeze-periods -type FreezePeriod struct { - ID int `json:"id"` - FreezeStart string `json:"freeze_start"` - FreezeEnd string `json:"freeze_end"` - CronTimezone string `json:"cron_timezone"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` -} - -// ListFreezePeriodsOptions represents the available ListFreezePeriodsOptions() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#list-freeze-periods -type ListFreezePeriodsOptions ListOptions - -// ListFreezePeriods gets a list of project project freeze periods. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#list-freeze-periods -func (s *FreezePeriodsService) ListFreezePeriods(pid interface{}, opt *ListFreezePeriodsOptions, options ...RequestOptionFunc) ([]*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var fp []*FreezePeriod - resp, err := s.client.Do(req, &fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil -} - -// GetFreezePeriod gets a specific freeze period for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#get-a-freeze-period-by-a-freeze_period_id -func (s *FreezePeriodsService) GetFreezePeriod(pid interface{}, freezePeriod int, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - fp := new(FreezePeriod) - resp, err := s.client.Do(req, fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil -} - -// CreateFreezePeriodOptions represents the available CreateFreezePeriodOptions() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#create-a-freeze-period -type CreateFreezePeriodOptions struct { - FreezeStart *string `url:"freeze_start,omitempty" json:"freeze_start,omitempty"` - FreezeEnd *string `url:"freeze_end,omitempty" json:"freeze_end,omitempty"` - CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` -} - -// CreateFreezePeriodOptions adds a freeze period to a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#create-a-freeze-period -func (s *FreezePeriodsService) CreateFreezePeriodOptions(pid interface{}, opt *CreateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - fp := new(FreezePeriod) - resp, err := s.client.Do(req, fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil -} - -// UpdateFreezePeriodOptions represents the available UpdateFreezePeriodOptions() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#update-a-freeze-period -type UpdateFreezePeriodOptions struct { - FreezeStart *string `url:"freeze_start,omitempty" json:"freeze_start,omitempty"` - FreezeEnd *string `url:"freeze_end,omitempty" json:"freeze_end,omitempty"` - CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` -} - -// UpdateFreezePeriodOptions edits a freeze period for a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#update-a-freeze-period -func (s *FreezePeriodsService) UpdateFreezePeriodOptions(pid interface{}, freezePeriod int, opt *UpdateFreezePeriodOptions, options ...RequestOptionFunc) (*FreezePeriod, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - fp := new(FreezePeriod) - resp, err := s.client.Do(req, fp) - if err != nil { - return nil, resp, err - } - - return fp, resp, nil -} - -// DeleteFreezePeriod removes a freeze period from a project. This is an -// idempotent method and can be called multiple times. Either the hook is -// available or not. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/freeze_periods.html#delete-a-freeze-period -func (s *FreezePeriodsService) DeleteFreezePeriod(pid interface{}, freezePeriod int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/freeze_periods/%d", PathEscape(project), freezePeriod) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/generic_packages.go b/vendor/github.com/xanzy/go-gitlab/generic_packages.go deleted file mode 100644 index 4c32eed44a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/generic_packages.go +++ /dev/null @@ -1,158 +0,0 @@ -// -// Copyright 2021, Sune Keller -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "io" - "net/http" - "time" -) - -// GenericPackagesService handles communication with the packages related -// methods of the GitLab API. -// -// GitLab docs: -// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html -type GenericPackagesService struct { - client *Client -} - -// GenericPackagesFile represents a GitLab generic package file. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-package-file -type GenericPackagesFile struct { - ID int `json:"id"` - PackageID int `json:"package_id"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Size int `json:"size"` - FileStore int `json:"file_store"` - FileMD5 string `json:"file_md5"` - FileSHA1 string `json:"file_sha1"` - FileName string `json:"file_name"` - File struct { - URL string `json:"url"` - } `json:"file"` - FileSHA256 string `json:"file_sha256"` - VerificationRetryAt *time.Time `json:"verification_retry_at"` - VerifiedAt *time.Time `json:"verified_at"` - VerificationFailure bool `json:"verification_failure"` - VerificationRetryCount int `json:"verification_retry_count"` - VerificationChecksum string `json:"verification_checksum"` - VerificationState int `json:"verification_state"` - VerificationStartedAt *time.Time `json:"verification_started_at"` - NewFilePath string `json:"new_file_path"` -} - -// FormatPackageURL returns the GitLab Package Registry URL for the given artifact metadata, without the BaseURL. -// This does not make a GitLab API request, but rather computes it based on their documentation. -func (s *GenericPackagesService) FormatPackageURL(pid interface{}, packageName, packageVersion, fileName string) (string, error) { - project, err := parseID(pid) - if err != nil { - return "", err - } - u := fmt.Sprintf( - "projects/%s/packages/generic/%s/%s/%s", - PathEscape(project), - PathEscape(packageName), - PathEscape(packageVersion), - PathEscape(fileName), - ) - return u, nil -} - -// PublishPackageFileOptions represents the available PublishPackageFile() -// options. -// -// GitLab docs: -// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-package-file -type PublishPackageFileOptions struct { - Status *GenericPackageStatusValue `url:"status,omitempty" json:"status,omitempty"` - Select *GenericPackageSelectValue `url:"select,omitempty" json:"select,omitempty"` -} - -// PublishPackageFile uploads a file to a project's package registry. -// -// GitLab docs: -// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#publish-a-package-file -func (s *GenericPackagesService) PublishPackageFile(pid interface{}, packageName, packageVersion, fileName string, content io.Reader, opt *PublishPackageFileOptions, options ...RequestOptionFunc) (*GenericPackagesFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/packages/generic/%s/%s/%s", - PathEscape(project), - PathEscape(packageName), - PathEscape(packageVersion), - PathEscape(fileName), - ) - - // We need to create the request as a GET request to make sure the options - // are set correctly. After the request is created we will overwrite both - // the method and the body. - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - // Overwrite the method and body. - req.Method = http.MethodPut - req.SetBody(content) - - f := new(GenericPackagesFile) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil -} - -// DownloadPackageFile allows you to download the package file. -// -// GitLab docs: -// https://docs.gitlab.com/ee/user/packages/generic_packages/index.html#download-package-file -func (s *GenericPackagesService) DownloadPackageFile(pid interface{}, packageName, packageVersion, fileName string, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/packages/generic/%s/%s/%s", - PathEscape(project), - PathEscape(packageName), - PathEscape(packageVersion), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var f bytes.Buffer - resp, err := s.client.Do(req, &f) - if err != nil { - return nil, resp, err - } - - return f.Bytes(), resp, err -} diff --git a/vendor/github.com/xanzy/go-gitlab/geo_nodes.go b/vendor/github.com/xanzy/go-gitlab/geo_nodes.go deleted file mode 100644 index 47ac583a56..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/geo_nodes.go +++ /dev/null @@ -1,433 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GeoNode represents a GitLab Geo Node. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/geo_nodes.html -type GeoNode struct { - ID int `json:"id"` - Name string `json:"name"` - URL string `json:"url"` - InternalURL string `json:"internal_url"` - Primary bool `json:"primary"` - Enabled bool `json:"enabled"` - Current bool `json:"current"` - FilesMaxCapacity int `json:"files_max_capacity"` - ReposMaxCapacity int `json:"repos_max_capacity"` - VerificationMaxCapacity int `json:"verification_max_capacity"` - SelectiveSyncType string `json:"selective_sync_type"` - SelectiveSyncShards []string `json:"selective_sync_shards"` - SelectiveSyncNamespaceIds []int `json:"selective_sync_namespace_ids"` - MinimumReverificationInterval int `json:"minimum_reverification_interval"` - ContainerRepositoriesMaxCapacity int `json:"container_repositories_max_capacity"` - SyncObjectStorage bool `json:"sync_object_storage"` - CloneProtocol string `json:"clone_protocol"` - WebEditURL string `json:"web_edit_url"` - WebGeoProjectsURL string `json:"web_geo_projects_url"` - Links GeoNodeLinks `json:"_links"` -} - -// GeoNodeLinks represents links for GitLab GeoNode. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/geo_nodes.html -type GeoNodeLinks struct { - Self string `json:"self"` - Status string `json:"status"` - Repair string `json:"repair"` -} - -// GeoNodesService handles communication with Geo Nodes related methods -// of GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/geo_nodes.html -type GeoNodesService struct { - client *Client -} - -// CreateGeoNodesOptions represents the available CreateGeoNode() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#create-a-new-geo-node -type CreateGeoNodesOptions struct { - Primary *bool `url:"primary,omitempty" json:"primary,omitempty"` - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` - FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` - ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` - VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` - ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` - SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` - SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` - SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` - SelectiveSyncNamespaceIds *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` - MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` -} - -// CreateGeoNode creates a new Geo Node. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#create-a-new-geo-node -func (s *GeoNodesService) CreateGeoNode(opt *CreateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "geo_nodes", opt, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// ListGeoNodesOptions represents the available ListGeoNodes() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-configuration-about-all-geo-nodes -type ListGeoNodesOptions ListOptions - -// ListGeoNodes gets a list of geo nodes. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-configuration-about-all-geo-nodes -func (s *GeoNodesService) ListGeoNodes(opt *ListGeoNodesOptions, options ...RequestOptionFunc) ([]*GeoNode, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "geo_nodes", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GeoNode - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// GetGeoNode gets a specific geo node. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-configuration-about-a-specific-geo-node -func (s *GeoNodesService) GetGeoNode(id int, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d", id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// UpdateGeoNodesOptions represents the available EditGeoNode() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#edit-a-geo-node -type UpdateGeoNodesOptions struct { - ID *int `url:"primary,omitempty" json:"primary,omitempty"` - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - InternalURL *string `url:"internal_url,omitempty" json:"internal_url,omitempty"` - FilesMaxCapacity *int `url:"files_max_capacity,omitempty" json:"files_max_capacity,omitempty"` - ReposMaxCapacity *int `url:"repos_max_capacity,omitempty" json:"repos_max_capacity,omitempty"` - VerificationMaxCapacity *int `url:"verification_max_capacity,omitempty" json:"verification_max_capacity,omitempty"` - ContainerRepositoriesMaxCapacity *int `url:"container_repositories_max_capacity,omitempty" json:"container_repositories_max_capacity,omitempty"` - SyncObjectStorage *bool `url:"sync_object_storage,omitempty" json:"sync_object_storage,omitempty"` - SelectiveSyncType *string `url:"selective_sync_type,omitempty" json:"selective_sync_type,omitempty"` - SelectiveSyncShards *[]string `url:"selective_sync_shards,omitempty" json:"selective_sync_shards,omitempty"` - SelectiveSyncNamespaceIds *[]int `url:"selective_sync_namespace_ids,omitempty" json:"selective_sync_namespace_ids,omitempty"` - MinimumReverificationInterval *int `url:"minimum_reverification_interval,omitempty" json:"minimum_reverification_interval,omitempty"` -} - -// EditGeoNode updates settings of an existing Geo node. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#edit-a-geo-node -func (s *GeoNodesService) EditGeoNode(id int, opt *UpdateGeoNodesOptions, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d", id) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// DeleteGeoNode removes the Geo node. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#delete-a-geo-node -func (s *GeoNodesService) DeleteGeoNode(id int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("geo_nodes/%d", id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// RepairGeoNode to repair the OAuth authentication of a Geo node. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#repair-a-geo-node -func (s *GeoNodesService) RepairGeoNode(id int, options ...RequestOptionFunc) (*GeoNode, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d/repair", id) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(GeoNode) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// GeoNodeStatus represents the status of Geo Node. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-status-about-all-geo-nodes -type GeoNodeStatus struct { - GeoNodeID int `json:"geo_node_id"` - Healthy bool `json:"healthy"` - Health string `json:"health"` - HealthStatus string `json:"health_status"` - MissingOauthApplication bool `json:"missing_oauth_application"` - AttachmentsCount int `json:"attachments_count"` - AttachmentsSyncedCount int `json:"attachments_synced_count"` - AttachmentsFailedCount int `json:"attachments_failed_count"` - AttachmentsSyncedMissingOnPrimaryCount int `json:"attachments_synced_missing_on_primary_count"` - AttachmentsSyncedInPercentage string `json:"attachments_synced_in_percentage"` - DbReplicationLagSeconds int `json:"db_replication_lag_seconds"` - LfsObjectsCount int `json:"lfs_objects_count"` - LfsObjectsSyncedCount int `json:"lfs_objects_synced_count"` - LfsObjectsFailedCount int `json:"lfs_objects_failed_count"` - LfsObjectsSyncedMissingOnPrimaryCount int `json:"lfs_objects_synced_missing_on_primary_count"` - LfsObjectsSyncedInPercentage string `json:"lfs_objects_synced_in_percentage"` - JobArtifactsCount int `json:"job_artifacts_count"` - JobArtifactsSyncedCount int `json:"job_artifacts_synced_count"` - JobArtifactsFailedCount int `json:"job_artifacts_failed_count"` - JobArtifactsSyncedMissingOnPrimaryCount int `json:"job_artifacts_synced_missing_on_primary_count"` - JobArtifactsSyncedInPercentage string `json:"job_artifacts_synced_in_percentage"` - ContainerRepositoriesCount int `json:"container_repositories_count"` - ContainerRepositoriesSyncedCount int `json:"container_repositories_synced_count"` - ContainerRepositoriesFailedCount int `json:"container_repositories_failed_count"` - ContainerRepositoriesSyncedInPercentage string `json:"container_repositories_synced_in_percentage"` - DesignRepositoriesCount int `json:"design_repositories_count"` - DesignRepositoriesSyncedCount int `json:"design_repositories_synced_count"` - DesignRepositoriesFailedCount int `json:"design_repositories_failed_count"` - DesignRepositoriesSyncedInPercentage string `json:"design_repositories_synced_in_percentage"` - ProjectsCount int `json:"projects_count"` - RepositoriesCount int `json:"repositories_count"` - RepositoriesFailedCount int `json:"repositories_failed_count"` - RepositoriesSyncedCount int `json:"repositories_synced_count"` - RepositoriesSyncedInPercentage string `json:"repositories_synced_in_percentage"` - WikisCount int `json:"wikis_count"` - WikisFailedCount int `json:"wikis_failed_count"` - WikisSyncedCount int `json:"wikis_synced_count"` - WikisSyncedInPercentage string `json:"wikis_synced_in_percentage"` - ReplicationSlotsCount int `json:"replication_slots_count"` - ReplicationSlotsUsedCount int `json:"replication_slots_used_count"` - ReplicationSlotsUsedInPercentage string `json:"replication_slots_used_in_percentage"` - ReplicationSlotsMaxRetainedWalBytes int `json:"replication_slots_max_retained_wal_bytes"` - RepositoriesCheckedCount int `json:"repositories_checked_count"` - RepositoriesCheckedFailedCount int `json:"repositories_checked_failed_count"` - RepositoriesCheckedInPercentage string `json:"repositories_checked_in_percentage"` - RepositoriesChecksummedCount int `json:"repositories_checksummed_count"` - RepositoriesChecksumFailedCount int `json:"repositories_checksum_failed_count"` - RepositoriesChecksummedInPercentage string `json:"repositories_checksummed_in_percentage"` - WikisChecksummedCount int `json:"wikis_checksummed_count"` - WikisChecksumFailedCount int `json:"wikis_checksum_failed_count"` - WikisChecksummedInPercentage string `json:"wikis_checksummed_in_percentage"` - RepositoriesVerifiedCount int `json:"repositories_verified_count"` - RepositoriesVerificationFailedCount int `json:"repositories_verification_failed_count"` - RepositoriesVerifiedInPercentage string `json:"repositories_verified_in_percentage"` - RepositoriesChecksumMismatchCount int `json:"repositories_checksum_mismatch_count"` - WikisVerifiedCount int `json:"wikis_verified_count"` - WikisVerificationFailedCount int `json:"wikis_verification_failed_count"` - WikisVerifiedInPercentage string `json:"wikis_verified_in_percentage"` - WikisChecksumMismatchCount int `json:"wikis_checksum_mismatch_count"` - RepositoriesRetryingVerificationCount int `json:"repositories_retrying_verification_count"` - WikisRetryingVerificationCount int `json:"wikis_retrying_verification_count"` - LastEventID int `json:"last_event_id"` - LastEventTimestamp int `json:"last_event_timestamp"` - CursorLastEventID int `json:"cursor_last_event_id"` - CursorLastEventTimestamp int `json:"cursor_last_event_timestamp"` - LastSuccessfulStatusCheckTimestamp int `json:"last_successful_status_check_timestamp"` - Version string `json:"version"` - Revision string `json:"revision"` - MergeRequestDiffsCount int `json:"merge_request_diffs_count"` - MergeRequestDiffsChecksumTotalCount int `json:"merge_request_diffs_checksum_total_count"` - MergeRequestDiffsChecksummedCount int `json:"merge_request_diffs_checksummed_count"` - MergeRequestDiffsChecksumFailedCount int `json:"merge_request_diffs_checksum_failed_count"` - MergeRequestDiffsSyncedCount int `json:"merge_request_diffs_synced_count"` - MergeRequestDiffsFailedCount int `json:"merge_request_diffs_failed_count"` - MergeRequestDiffsRegistryCount int `json:"merge_request_diffs_registry_count"` - MergeRequestDiffsVerificationTotalCount int `json:"merge_request_diffs_verification_total_count"` - MergeRequestDiffsVerifiedCount int `json:"merge_request_diffs_verified_count"` - MergeRequestDiffsVerificationFailedCount int `json:"merge_request_diffs_verification_failed_count"` - MergeRequestDiffsSyncedInPercentage string `json:"merge_request_diffs_synced_in_percentage"` - MergeRequestDiffsVerifiedInPercentage string `json:"merge_request_diffs_verified_in_percentage"` - PackageFilesCount int `json:"package_files_count"` - PackageFilesChecksumTotalCount int `json:"package_files_checksum_total_count"` - PackageFilesChecksummedCount int `json:"package_files_checksummed_count"` - PackageFilesChecksumFailedCount int `json:"package_files_checksum_failed_count"` - PackageFilesSyncedCount int `json:"package_files_synced_count"` - PackageFilesFailedCount int `json:"package_files_failed_count"` - PackageFilesRegistryCount int `json:"package_files_registry_count"` - PackageFilesVerificationTotalCount int `json:"package_files_verification_total_count"` - PackageFilesVerifiedCount int `json:"package_files_verified_count"` - PackageFilesVerificationFailedCount int `json:"package_files_verification_failed_count"` - PackageFilesSyncedInPercentage string `json:"package_files_synced_in_percentage"` - PackageFilesVerifiedInPercentage string `json:"package_files_verified_in_percentage"` - PagesDeploymentsCount int `json:"pages_deployments_count"` - PagesDeploymentsChecksumTotalCount int `json:"pages_deployments_checksum_total_count"` - PagesDeploymentsChecksummedCount int `json:"pages_deployments_checksummed_count"` - PagesDeploymentsChecksumFailedCount int `json:"pages_deployments_checksum_failed_count"` - PagesDeploymentsSyncedCount int `json:"pages_deployments_synced_count"` - PagesDeploymentsFailedCount int `json:"pages_deployments_failed_count"` - PagesDeploymentsRegistryCount int `json:"pages_deployments_registry_count"` - PagesDeploymentsVerificationTotalCount int `json:"pages_deployments_verification_total_count"` - PagesDeploymentsVerifiedCount int `json:"pages_deployments_verified_count"` - PagesDeploymentsVerificationFailedCount int `json:"pages_deployments_verification_failed_count"` - PagesDeploymentsSyncedInPercentage string `json:"pages_deployments_synced_in_percentage"` - PagesDeploymentsVerifiedInPercentage string `json:"pages_deployments_verified_in_percentage"` - TerraformStateVersionsCount int `json:"terraform_state_versions_count"` - TerraformStateVersionsChecksumTotalCount int `json:"terraform_state_versions_checksum_total_count"` - TerraformStateVersionsChecksummedCount int `json:"terraform_state_versions_checksummed_count"` - TerraformStateVersionsChecksumFailedCount int `json:"terraform_state_versions_checksum_failed_count"` - TerraformStateVersionsSyncedCount int `json:"terraform_state_versions_synced_count"` - TerraformStateVersionsFailedCount int `json:"terraform_state_versions_failed_count"` - TerraformStateVersionsRegistryCount int `json:"terraform_state_versions_registry_count"` - TerraformStateVersionsVerificationTotalCount int `json:"terraform_state_versions_verification_total_count"` - TerraformStateVersionsVerifiedCount int `json:"terraform_state_versions_verified_count"` - TerraformStateVersionsVerificationFailedCount int `json:"terraform_state_versions_verification_failed_count"` - TerraformStateVersionsSyncedInPercentage string `json:"terraform_state_versions_synced_in_percentage"` - TerraformStateVersionsVerifiedInPercentage string `json:"terraform_state_versions_verified_in_percentage"` - SnippetRepositoriesCount int `json:"snippet_repositories_count"` - SnippetRepositoriesChecksumTotalCount int `json:"snippet_repositories_checksum_total_count"` - SnippetRepositoriesChecksummedCount int `json:"snippet_repositories_checksummed_count"` - SnippetRepositoriesChecksumFailedCount int `json:"snippet_repositories_checksum_failed_count"` - SnippetRepositoriesSyncedCount int `json:"snippet_repositories_synced_count"` - SnippetRepositoriesFailedCount int `json:"snippet_repositories_failed_count"` - SnippetRepositoriesRegistryCount int `json:"snippet_repositories_registry_count"` - SnippetRepositoriesVerificationTotalCount int `json:"snippet_repositories_verification_total_count"` - SnippetRepositoriesVerifiedCount int `json:"snippet_repositories_verified_count"` - SnippetRepositoriesVerificationFailedCount int `json:"snippet_repositories_verification_failed_count"` - SnippetRepositoriesSyncedInPercentage string `json:"snippet_repositories_synced_in_percentage"` - SnippetRepositoriesVerifiedInPercentage string `json:"snippet_repositories_verified_in_percentage"` - GroupWikiRepositoriesCount int `json:"group_wiki_repositories_count"` - GroupWikiRepositoriesChecksumTotalCount int `json:"group_wiki_repositories_checksum_total_count"` - GroupWikiRepositoriesChecksummedCount int `json:"group_wiki_repositories_checksummed_count"` - GroupWikiRepositoriesChecksumFailedCount int `json:"group_wiki_repositories_checksum_failed_count"` - GroupWikiRepositoriesSyncedCount int `json:"group_wiki_repositories_synced_count"` - GroupWikiRepositoriesFailedCount int `json:"group_wiki_repositories_failed_count"` - GroupWikiRepositoriesRegistryCount int `json:"group_wiki_repositories_registry_count"` - GroupWikiRepositoriesVerificationTotalCount int `json:"group_wiki_repositories_verification_total_count"` - GroupWikiRepositoriesVerifiedCount int `json:"group_wiki_repositories_verified_count"` - GroupWikiRepositoriesVerificationFailedCount int `json:"group_wiki_repositories_verification_failed_count"` - GroupWikiRepositoriesSyncedInPercentage string `json:"group_wiki_repositories_synced_in_percentage"` - GroupWikiRepositoriesVerifiedInPercentage string `json:"group_wiki_repositories_verified_in_percentage"` - PipelineArtifactsCount int `json:"pipeline_artifacts_count"` - PipelineArtifactsChecksumTotalCount int `json:"pipeline_artifacts_checksum_total_count"` - PipelineArtifactsChecksummedCount int `json:"pipeline_artifacts_checksummed_count"` - PipelineArtifactsChecksumFailedCount int `json:"pipeline_artifacts_checksum_failed_count"` - PipelineArtifactsSyncedCount int `json:"pipeline_artifacts_synced_count"` - PipelineArtifactsFailedCount int `json:"pipeline_artifacts_failed_count"` - PipelineArtifactsRegistryCount int `json:"pipeline_artifacts_registry_count"` - PipelineArtifactsVerificationTotalCount int `json:"pipeline_artifacts_verification_total_count"` - PipelineArtifactsVerifiedCount int `json:"pipeline_artifacts_verified_count"` - PipelineArtifactsVerificationFailedCount int `json:"pipeline_artifacts_verification_failed_count"` - PipelineArtifactsSyncedInPercentage string `json:"pipeline_artifacts_synced_in_percentage"` - PipelineArtifactsVerifiedInPercentage string `json:"pipeline_artifacts_verified_in_percentage"` - UploadsCount int `json:"uploads_count"` - UploadsSyncedCount int `json:"uploads_synced_count"` - UploadsFailedCount int `json:"uploads_failed_count"` - UploadsRegistryCount int `json:"uploads_registry_count"` - UploadsSyncedInPercentage string `json:"uploads_synced_in_percentage"` -} - -// RetrieveStatusOfAllGeoNodes get the list of status of all Geo Nodes. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-status-about-all-geo-nodes -func (s *GeoNodesService) RetrieveStatusOfAllGeoNodes(options ...RequestOptionFunc) ([]*GeoNodeStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "geo_nodes/status", nil, options) - if err != nil { - return nil, nil, err - } - - var gnss []*GeoNodeStatus - resp, err := s.client.Do(req, &gnss) - if err != nil { - return nil, resp, err - } - - return gnss, resp, nil -} - -// RetrieveStatusOfGeoNode get the of status of a specific Geo Nodes. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/geo_nodes.html#retrieve-status-about-a-specific-geo-node -func (s *GeoNodesService) RetrieveStatusOfGeoNode(id int, options ...RequestOptionFunc) (*GeoNodeStatus, *Response, error) { - u := fmt.Sprintf("geo_nodes/%d/status", id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gns := new(GeoNodeStatus) - resp, err := s.client.Do(req, gns) - if err != nil { - return nil, resp, err - } - - return gns, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go b/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go deleted file mode 100644 index 83d5872c14..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/gitignore_templates.go +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// GitIgnoreTemplatesService handles communication with the gitignore -// templates related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/gitignores.html -type GitIgnoreTemplatesService struct { - client *Client -} - -// GitIgnoreTemplate represents a GitLab gitignore template. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/gitignores.html -type GitIgnoreTemplate struct { - Name string `json:"name"` - Content string `json:"content"` -} - -// GitIgnoreTemplateListItem represents a GitLab gitignore template from the list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/gitignores.html -type GitIgnoreTemplateListItem struct { - Key string `json:"key"` - Name string `json:"name"` -} - -// ListTemplatesOptions represents the available ListAllTemplates() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitignores.html#get-all-gitignore-templates -type ListTemplatesOptions ListOptions - -// ListTemplates get a list of available git ignore templates -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitignores.html#get-all-gitignore-templates -func (s *GitIgnoreTemplatesService) ListTemplates(opt *ListTemplatesOptions, options ...RequestOptionFunc) ([]*GitIgnoreTemplateListItem, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/gitignores", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GitIgnoreTemplateListItem - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// GetTemplate get a git ignore template -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/gitignores.html#get-a-single-gitignore-template -func (s *GitIgnoreTemplatesService) GetTemplate(key string, options ...RequestOptionFunc) (*GitIgnoreTemplate, *Response, error) { - u := fmt.Sprintf("templates/gitignores/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(GitIgnoreTemplate) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go deleted file mode 100644 index 19ed3eadb9..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/gitlab.go +++ /dev/null @@ -1,1049 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package gitlab implements a GitLab API client. -package gitlab - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "math/rand" - "mime/multipart" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-cleanhttp" - - "github.com/google/go-querystring/query" - retryablehttp "github.com/hashicorp/go-retryablehttp" - "golang.org/x/oauth2" - "golang.org/x/time/rate" -) - -const ( - defaultBaseURL = "https://gitlab.com/" - apiVersionPath = "api/v4/" - userAgent = "go-gitlab" - - headerRateLimit = "RateLimit-Limit" - headerRateReset = "RateLimit-Reset" -) - -// AuthType represents an authentication type within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -type AuthType int - -// List of available authentication types. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -const ( - BasicAuth AuthType = iota - JobToken - OAuthToken - PrivateToken -) - -var ErrNotFound = errors.New("404 Not Found") - -// A Client manages communication with the GitLab API. -type Client struct { - // HTTP client used to communicate with the API. - client *retryablehttp.Client - - // Base URL for API requests. Defaults to the public GitLab API, but can be - // set to a domain endpoint to use with a self hosted GitLab server. baseURL - // should always be specified with a trailing slash. - baseURL *url.URL - - // disableRetries is used to disable the default retry logic. - disableRetries bool - - // configureLimiterOnce is used to make sure the limiter is configured exactly - // once and block all other calls until the initial (one) call is done. - configureLimiterOnce sync.Once - - // Limiter is used to limit API calls and prevent 429 responses. - limiter RateLimiter - - // Token type used to make authenticated API calls. - authType AuthType - - // Username and password used for basic authentication. - username, password string - - // Token used to make authenticated API calls. - token string - - // Protects the token field from concurrent read/write accesses. - tokenLock sync.RWMutex - - // Default request options applied to every request. - defaultRequestOptions []RequestOptionFunc - - // User agent used when communicating with the GitLab API. - UserAgent string - - // Services used for talking to different parts of the GitLab API. - AccessRequests *AccessRequestsService - Appearance *AppearanceService - Applications *ApplicationsService - AuditEvents *AuditEventsService - Avatar *AvatarRequestsService - AwardEmoji *AwardEmojiService - Boards *IssueBoardsService - Branches *BranchesService - BroadcastMessage *BroadcastMessagesService - CIYMLTemplate *CIYMLTemplatesService - ClusterAgents *ClusterAgentsService - Commits *CommitsService - ContainerRegistry *ContainerRegistryService - CustomAttribute *CustomAttributesService - DeployKeys *DeployKeysService - DeployTokens *DeployTokensService - DeploymentMergeRequests *DeploymentMergeRequestsService - Deployments *DeploymentsService - Discussions *DiscussionsService - DockerfileTemplate *DockerfileTemplatesService - DORAMetrics *DORAMetricsService - DraftNotes *DraftNotesService - Environments *EnvironmentsService - EpicIssues *EpicIssuesService - Epics *EpicsService - ErrorTracking *ErrorTrackingService - Events *EventsService - ExternalStatusChecks *ExternalStatusChecksService - Features *FeaturesService - FreezePeriods *FreezePeriodsService - GenericPackages *GenericPackagesService - GeoNodes *GeoNodesService - GitIgnoreTemplates *GitIgnoreTemplatesService - GroupAccessTokens *GroupAccessTokensService - GroupBadges *GroupBadgesService - GroupCluster *GroupClustersService - GroupEpicBoards *GroupEpicBoardsService - GroupImportExport *GroupImportExportService - GroupIssueBoards *GroupIssueBoardsService - GroupIterations *GroupIterationsService - GroupLabels *GroupLabelsService - GroupMembers *GroupMembersService - GroupMilestones *GroupMilestonesService - GroupProtectedEnvironments *GroupProtectedEnvironmentsService - GroupRepositoryStorageMove *GroupRepositoryStorageMoveService - GroupSSHCertificates *GroupSSHCertificatesService - GroupVariables *GroupVariablesService - GroupWikis *GroupWikisService - Groups *GroupsService - Import *ImportService - InstanceCluster *InstanceClustersService - InstanceVariables *InstanceVariablesService - Invites *InvitesService - IssueLinks *IssueLinksService - Issues *IssuesService - IssuesStatistics *IssuesStatisticsService - Jobs *JobsService - JobTokenScope *JobTokenScopeService - Keys *KeysService - Labels *LabelsService - License *LicenseService - LicenseTemplates *LicenseTemplatesService - ManagedLicenses *ManagedLicensesService - Markdown *MarkdownService - MemberRolesService *MemberRolesService - MergeRequestApprovals *MergeRequestApprovalsService - MergeRequests *MergeRequestsService - MergeTrains *MergeTrainsService - Metadata *MetadataService - Milestones *MilestonesService - Namespaces *NamespacesService - Notes *NotesService - NotificationSettings *NotificationSettingsService - Packages *PackagesService - Pages *PagesService - PagesDomains *PagesDomainsService - PersonalAccessTokens *PersonalAccessTokensService - PipelineSchedules *PipelineSchedulesService - PipelineTriggers *PipelineTriggersService - Pipelines *PipelinesService - PlanLimits *PlanLimitsService - ProjectAccessTokens *ProjectAccessTokensService - ProjectBadges *ProjectBadgesService - ProjectCluster *ProjectClustersService - ProjectFeatureFlags *ProjectFeatureFlagService - ProjectImportExport *ProjectImportExportService - ProjectIterations *ProjectIterationsService - ProjectMembers *ProjectMembersService - ProjectMirrors *ProjectMirrorService - ProjectRepositoryStorageMove *ProjectRepositoryStorageMoveService - ProjectSnippets *ProjectSnippetsService - ProjectTemplates *ProjectTemplatesService - ProjectVariables *ProjectVariablesService - ProjectVulnerabilities *ProjectVulnerabilitiesService - Projects *ProjectsService - ProtectedBranches *ProtectedBranchesService - ProtectedEnvironments *ProtectedEnvironmentsService - ProtectedTags *ProtectedTagsService - ReleaseLinks *ReleaseLinksService - Releases *ReleasesService - Repositories *RepositoriesService - RepositoryFiles *RepositoryFilesService - RepositorySubmodules *RepositorySubmodulesService - ResourceGroup *ResourceGroupService - ResourceIterationEvents *ResourceIterationEventsService - ResourceLabelEvents *ResourceLabelEventsService - ResourceMilestoneEvents *ResourceMilestoneEventsService - ResourceStateEvents *ResourceStateEventsService - ResourceWeightEvents *ResourceWeightEventsService - Runners *RunnersService - Search *SearchService - Services *ServicesService - Settings *SettingsService - Sidekiq *SidekiqService - SnippetRepositoryStorageMove *SnippetRepositoryStorageMoveService - Snippets *SnippetsService - SystemHooks *SystemHooksService - Tags *TagsService - Todos *TodosService - Topics *TopicsService - Users *UsersService - Validate *ValidateService - Version *VersionService - Wikis *WikisService -} - -// ListOptions specifies the optional parameters to various List methods that -// support pagination. -type ListOptions struct { - // For keyset-based paginated result sets, the value must be `"keyset"` - Pagination string `url:"pagination,omitempty" json:"pagination,omitempty"` - // For offset-based and keyset-based paginated result sets, the number of results to include per page. - PerPage int `url:"per_page,omitempty" json:"per_page,omitempty"` - // For offset-based paginated result sets, page of results to retrieve. - Page int `url:"page,omitempty" json:"page,omitempty"` - // For keyset-based paginated result sets, tree record ID at which to fetch the next page. - PageToken string `url:"page_token,omitempty" json:"page_token,omitempty"` - // For keyset-based paginated result sets, name of the column by which to order - OrderBy string `url:"order_by,omitempty" json:"order_by,omitempty"` - // For keyset-based paginated result sets, sort order (`"asc"`` or `"desc"`) - Sort string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// RateLimiter describes the interface that all (custom) rate limiters must implement. -type RateLimiter interface { - Wait(context.Context) error -} - -// NewClient returns a new GitLab API client. To use API methods which require -// authentication, provide a valid private or personal token. -func NewClient(token string, options ...ClientOptionFunc) (*Client, error) { - client, err := newClient(options...) - if err != nil { - return nil, err - } - client.authType = PrivateToken - client.token = token - return client, nil -} - -// NewBasicAuthClient returns a new GitLab API client. To use API methods which -// require authentication, provide a valid username and password. -func NewBasicAuthClient(username, password string, options ...ClientOptionFunc) (*Client, error) { - client, err := newClient(options...) - if err != nil { - return nil, err - } - - client.authType = BasicAuth - client.username = username - client.password = password - - return client, nil -} - -// NewJobClient returns a new GitLab API client. To use API methods which require -// authentication, provide a valid job token. -func NewJobClient(token string, options ...ClientOptionFunc) (*Client, error) { - client, err := newClient(options...) - if err != nil { - return nil, err - } - client.authType = JobToken - client.token = token - return client, nil -} - -// NewOAuthClient returns a new GitLab API client. To use API methods which -// require authentication, provide a valid oauth token. -func NewOAuthClient(token string, options ...ClientOptionFunc) (*Client, error) { - client, err := newClient(options...) - if err != nil { - return nil, err - } - client.authType = OAuthToken - client.token = token - return client, nil -} - -func newClient(options ...ClientOptionFunc) (*Client, error) { - c := &Client{UserAgent: userAgent} - - // Configure the HTTP client. - c.client = &retryablehttp.Client{ - Backoff: c.retryHTTPBackoff, - CheckRetry: c.retryHTTPCheck, - ErrorHandler: retryablehttp.PassthroughErrorHandler, - HTTPClient: cleanhttp.DefaultPooledClient(), - RetryWaitMin: 100 * time.Millisecond, - RetryWaitMax: 400 * time.Millisecond, - RetryMax: 5, - } - - // Set the default base URL. - c.setBaseURL(defaultBaseURL) - - // Apply any given client options. - for _, fn := range options { - if fn == nil { - continue - } - if err := fn(c); err != nil { - return nil, err - } - } - - // If no custom limiter was set using a client option, configure - // the default rate limiter with values that implicitly disable - // rate limiting until an initial HTTP call is done and we can - // use the headers to try and properly configure the limiter. - if c.limiter == nil { - c.limiter = rate.NewLimiter(rate.Inf, 0) - } - - // Create the internal timeStats service. - timeStats := &timeStatsService{client: c} - - // Create all the public services. - c.AccessRequests = &AccessRequestsService{client: c} - c.Appearance = &AppearanceService{client: c} - c.Applications = &ApplicationsService{client: c} - c.AuditEvents = &AuditEventsService{client: c} - c.Avatar = &AvatarRequestsService{client: c} - c.AwardEmoji = &AwardEmojiService{client: c} - c.Boards = &IssueBoardsService{client: c} - c.Branches = &BranchesService{client: c} - c.BroadcastMessage = &BroadcastMessagesService{client: c} - c.CIYMLTemplate = &CIYMLTemplatesService{client: c} - c.ClusterAgents = &ClusterAgentsService{client: c} - c.Commits = &CommitsService{client: c} - c.ContainerRegistry = &ContainerRegistryService{client: c} - c.CustomAttribute = &CustomAttributesService{client: c} - c.DeployKeys = &DeployKeysService{client: c} - c.DeployTokens = &DeployTokensService{client: c} - c.DeploymentMergeRequests = &DeploymentMergeRequestsService{client: c} - c.Deployments = &DeploymentsService{client: c} - c.Discussions = &DiscussionsService{client: c} - c.DockerfileTemplate = &DockerfileTemplatesService{client: c} - c.DORAMetrics = &DORAMetricsService{client: c} - c.DraftNotes = &DraftNotesService{client: c} - c.Environments = &EnvironmentsService{client: c} - c.EpicIssues = &EpicIssuesService{client: c} - c.Epics = &EpicsService{client: c} - c.ErrorTracking = &ErrorTrackingService{client: c} - c.Events = &EventsService{client: c} - c.ExternalStatusChecks = &ExternalStatusChecksService{client: c} - c.Features = &FeaturesService{client: c} - c.FreezePeriods = &FreezePeriodsService{client: c} - c.GenericPackages = &GenericPackagesService{client: c} - c.GeoNodes = &GeoNodesService{client: c} - c.GitIgnoreTemplates = &GitIgnoreTemplatesService{client: c} - c.GroupAccessTokens = &GroupAccessTokensService{client: c} - c.GroupBadges = &GroupBadgesService{client: c} - c.GroupCluster = &GroupClustersService{client: c} - c.GroupEpicBoards = &GroupEpicBoardsService{client: c} - c.GroupImportExport = &GroupImportExportService{client: c} - c.GroupIssueBoards = &GroupIssueBoardsService{client: c} - c.GroupIterations = &GroupIterationsService{client: c} - c.GroupLabels = &GroupLabelsService{client: c} - c.GroupMembers = &GroupMembersService{client: c} - c.GroupMilestones = &GroupMilestonesService{client: c} - c.GroupProtectedEnvironments = &GroupProtectedEnvironmentsService{client: c} - c.GroupRepositoryStorageMove = &GroupRepositoryStorageMoveService{client: c} - c.GroupSSHCertificates = &GroupSSHCertificatesService{client: c} - c.GroupVariables = &GroupVariablesService{client: c} - c.GroupWikis = &GroupWikisService{client: c} - c.Groups = &GroupsService{client: c} - c.Import = &ImportService{client: c} - c.InstanceCluster = &InstanceClustersService{client: c} - c.InstanceVariables = &InstanceVariablesService{client: c} - c.Invites = &InvitesService{client: c} - c.IssueLinks = &IssueLinksService{client: c} - c.Issues = &IssuesService{client: c, timeStats: timeStats} - c.IssuesStatistics = &IssuesStatisticsService{client: c} - c.Jobs = &JobsService{client: c} - c.JobTokenScope = &JobTokenScopeService{client: c} - c.Keys = &KeysService{client: c} - c.Labels = &LabelsService{client: c} - c.License = &LicenseService{client: c} - c.LicenseTemplates = &LicenseTemplatesService{client: c} - c.ManagedLicenses = &ManagedLicensesService{client: c} - c.Markdown = &MarkdownService{client: c} - c.MemberRolesService = &MemberRolesService{client: c} - c.MergeRequestApprovals = &MergeRequestApprovalsService{client: c} - c.MergeRequests = &MergeRequestsService{client: c, timeStats: timeStats} - c.MergeTrains = &MergeTrainsService{client: c} - c.Metadata = &MetadataService{client: c} - c.Milestones = &MilestonesService{client: c} - c.Namespaces = &NamespacesService{client: c} - c.Notes = &NotesService{client: c} - c.NotificationSettings = &NotificationSettingsService{client: c} - c.Packages = &PackagesService{client: c} - c.Pages = &PagesService{client: c} - c.PagesDomains = &PagesDomainsService{client: c} - c.PersonalAccessTokens = &PersonalAccessTokensService{client: c} - c.PipelineSchedules = &PipelineSchedulesService{client: c} - c.PipelineTriggers = &PipelineTriggersService{client: c} - c.Pipelines = &PipelinesService{client: c} - c.PlanLimits = &PlanLimitsService{client: c} - c.ProjectAccessTokens = &ProjectAccessTokensService{client: c} - c.ProjectBadges = &ProjectBadgesService{client: c} - c.ProjectCluster = &ProjectClustersService{client: c} - c.ProjectFeatureFlags = &ProjectFeatureFlagService{client: c} - c.ProjectImportExport = &ProjectImportExportService{client: c} - c.ProjectIterations = &ProjectIterationsService{client: c} - c.ProjectMembers = &ProjectMembersService{client: c} - c.ProjectMirrors = &ProjectMirrorService{client: c} - c.ProjectRepositoryStorageMove = &ProjectRepositoryStorageMoveService{client: c} - c.ProjectSnippets = &ProjectSnippetsService{client: c} - c.ProjectTemplates = &ProjectTemplatesService{client: c} - c.ProjectVariables = &ProjectVariablesService{client: c} - c.ProjectVulnerabilities = &ProjectVulnerabilitiesService{client: c} - c.Projects = &ProjectsService{client: c} - c.ProtectedBranches = &ProtectedBranchesService{client: c} - c.ProtectedEnvironments = &ProtectedEnvironmentsService{client: c} - c.ProtectedTags = &ProtectedTagsService{client: c} - c.ReleaseLinks = &ReleaseLinksService{client: c} - c.Releases = &ReleasesService{client: c} - c.Repositories = &RepositoriesService{client: c} - c.RepositoryFiles = &RepositoryFilesService{client: c} - c.RepositorySubmodules = &RepositorySubmodulesService{client: c} - c.ResourceGroup = &ResourceGroupService{client: c} - c.ResourceIterationEvents = &ResourceIterationEventsService{client: c} - c.ResourceLabelEvents = &ResourceLabelEventsService{client: c} - c.ResourceMilestoneEvents = &ResourceMilestoneEventsService{client: c} - c.ResourceStateEvents = &ResourceStateEventsService{client: c} - c.ResourceWeightEvents = &ResourceWeightEventsService{client: c} - c.Runners = &RunnersService{client: c} - c.Search = &SearchService{client: c} - c.Services = &ServicesService{client: c} - c.Settings = &SettingsService{client: c} - c.Sidekiq = &SidekiqService{client: c} - c.Snippets = &SnippetsService{client: c} - c.SnippetRepositoryStorageMove = &SnippetRepositoryStorageMoveService{client: c} - c.SystemHooks = &SystemHooksService{client: c} - c.Tags = &TagsService{client: c} - c.Todos = &TodosService{client: c} - c.Topics = &TopicsService{client: c} - c.Users = &UsersService{client: c} - c.Validate = &ValidateService{client: c} - c.Version = &VersionService{client: c} - c.Wikis = &WikisService{client: c} - - return c, nil -} - -// retryHTTPCheck provides a callback for Client.CheckRetry which -// will retry both rate limit (429) and server (>= 500) errors. -func (c *Client) retryHTTPCheck(ctx context.Context, resp *http.Response, err error) (bool, error) { - if ctx.Err() != nil { - return false, ctx.Err() - } - if err != nil { - return false, err - } - if !c.disableRetries && (resp.StatusCode == 429 || resp.StatusCode >= 500) { - return true, nil - } - return false, nil -} - -// retryHTTPBackoff provides a generic callback for Client.Backoff which -// will pass through all calls based on the status code of the response. -func (c *Client) retryHTTPBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - // Use the rate limit backoff function when we are rate limited. - if resp != nil && resp.StatusCode == 429 { - return rateLimitBackoff(min, max, attemptNum, resp) - } - - // Set custom duration's when we experience a service interruption. - min = 700 * time.Millisecond - max = 900 * time.Millisecond - - return retryablehttp.LinearJitterBackoff(min, max, attemptNum, resp) -} - -// rateLimitBackoff provides a callback for Client.Backoff which will use the -// RateLimit-Reset header to determine the time to wait. We add some jitter -// to prevent a thundering herd. -// -// min and max are mainly used for bounding the jitter that will be added to -// the reset time retrieved from the headers. But if the final wait time is -// less then min, min will be used instead. -func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { - // rnd is used to generate pseudo-random numbers. - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) - - // First create some jitter bounded by the min and max durations. - jitter := time.Duration(rnd.Float64() * float64(max-min)) - - if resp != nil { - if v := resp.Header.Get(headerRateReset); v != "" { - if reset, _ := strconv.ParseInt(v, 10, 64); reset > 0 { - // Only update min if the given time to wait is longer. - if wait := time.Until(time.Unix(reset, 0)); wait > min { - min = wait - } - } - } else { - // In case the RateLimit-Reset header is not set, back off an additional - // 100% exponentially. With the default milliseconds being set to 100 for - // `min`, this makes the 5th retry wait 3.2 seconds (3,200 ms) by default. - min = time.Duration(float64(min) * math.Pow(2, float64(attemptNum))) - } - } - - return min + jitter -} - -// configureLimiter configures the rate limiter. -func (c *Client) configureLimiter(ctx context.Context, headers http.Header) { - if v := headers.Get(headerRateLimit); v != "" { - if rateLimit, _ := strconv.ParseFloat(v, 64); rateLimit > 0 { - // The rate limit is based on requests per minute, so for our limiter to - // work correctly we divide the limit by 60 to get the limit per second. - rateLimit /= 60 - - // Configure the limit and burst using a split of 2/3 for the limit and - // 1/3 for the burst. This enables clients to burst 1/3 of the allowed - // calls before the limiter kicks in. The remaining calls will then be - // spread out evenly using intervals of time.Second / limit which should - // prevent hitting the rate limit. - limit := rate.Limit(rateLimit * 0.66) - burst := int(rateLimit * 0.33) - - // Need at least one allowed to burst or x/time will throw an error - if burst == 0 { - burst = 1 - } - - // Create a new limiter using the calculated values. - c.limiter = rate.NewLimiter(limit, burst) - - // Call the limiter once as we have already made a request - // to get the headers and the limiter is not aware of this. - c.limiter.Wait(ctx) - } - } -} - -// BaseURL return a copy of the baseURL. -func (c *Client) BaseURL() *url.URL { - u := *c.baseURL - return &u -} - -// setBaseURL sets the base URL for API requests to a custom endpoint. -func (c *Client) setBaseURL(urlStr string) error { - // Make sure the given URL end with a slash - if !strings.HasSuffix(urlStr, "/") { - urlStr += "/" - } - - baseURL, err := url.Parse(urlStr) - if err != nil { - return err - } - - if !strings.HasSuffix(baseURL.Path, apiVersionPath) { - baseURL.Path += apiVersionPath - } - - // Update the base URL of the client. - c.baseURL = baseURL - - return nil -} - -// NewRequest creates a new API request. The method expects a relative URL -// path that will be resolved relative to the base URL of the Client. -// Relative URL paths should always be specified without a preceding slash. -// If specified, the value pointed to by body is JSON encoded and included -// as the request body. -func (c *Client) NewRequest(method, path string, opt interface{}, options []RequestOptionFunc) (*retryablehttp.Request, error) { - u := *c.baseURL - unescaped, err := url.PathUnescape(path) - if err != nil { - return nil, err - } - - // Set the encoded path data - u.RawPath = c.baseURL.Path + path - u.Path = c.baseURL.Path + unescaped - - // Create a request specific headers map. - reqHeaders := make(http.Header) - reqHeaders.Set("Accept", "application/json") - - if c.UserAgent != "" { - reqHeaders.Set("User-Agent", c.UserAgent) - } - - var body interface{} - switch { - case method == http.MethodPatch || method == http.MethodPost || method == http.MethodPut: - reqHeaders.Set("Content-Type", "application/json") - - if opt != nil { - body, err = json.Marshal(opt) - if err != nil { - return nil, err - } - } - case opt != nil: - q, err := query.Values(opt) - if err != nil { - return nil, err - } - u.RawQuery = q.Encode() - } - - req, err := retryablehttp.NewRequest(method, u.String(), body) - if err != nil { - return nil, err - } - - for _, fn := range append(c.defaultRequestOptions, options...) { - if fn == nil { - continue - } - if err := fn(req); err != nil { - return nil, err - } - } - - // Set the request specific headers. - for k, v := range reqHeaders { - req.Header[k] = v - } - - return req, nil -} - -// UploadRequest creates an API request for uploading a file. The method -// expects a relative URL path that will be resolved relative to the base -// URL of the Client. Relative URL paths should always be specified without -// a preceding slash. If specified, the value pointed to by body is JSON -// encoded and included as the request body. -func (c *Client) UploadRequest(method, path string, content io.Reader, filename string, uploadType UploadType, opt interface{}, options []RequestOptionFunc) (*retryablehttp.Request, error) { - u := *c.baseURL - unescaped, err := url.PathUnescape(path) - if err != nil { - return nil, err - } - - // Set the encoded path data - u.RawPath = c.baseURL.Path + path - u.Path = c.baseURL.Path + unescaped - - // Create a request specific headers map. - reqHeaders := make(http.Header) - reqHeaders.Set("Accept", "application/json") - - if c.UserAgent != "" { - reqHeaders.Set("User-Agent", c.UserAgent) - } - - b := new(bytes.Buffer) - w := multipart.NewWriter(b) - - fw, err := w.CreateFormFile(string(uploadType), filename) - if err != nil { - return nil, err - } - - if _, err := io.Copy(fw, content); err != nil { - return nil, err - } - - if opt != nil { - fields, err := query.Values(opt) - if err != nil { - return nil, err - } - for name := range fields { - if err = w.WriteField(name, fmt.Sprintf("%v", fields.Get(name))); err != nil { - return nil, err - } - } - } - - if err = w.Close(); err != nil { - return nil, err - } - - reqHeaders.Set("Content-Type", w.FormDataContentType()) - - req, err := retryablehttp.NewRequest(method, u.String(), b) - if err != nil { - return nil, err - } - - for _, fn := range append(c.defaultRequestOptions, options...) { - if fn == nil { - continue - } - if err := fn(req); err != nil { - return nil, err - } - } - - // Set the request specific headers. - for k, v := range reqHeaders { - req.Header[k] = v - } - - return req, nil -} - -// Response is a GitLab API response. This wraps the standard http.Response -// returned from GitLab and provides convenient access to things like -// pagination links. -type Response struct { - *http.Response - - // Fields used for offset-based pagination. - TotalItems int - TotalPages int - ItemsPerPage int - CurrentPage int - NextPage int - PreviousPage int - - // Fields used for keyset-based pagination. - PreviousLink string - NextLink string - FirstLink string - LastLink string -} - -// newResponse creates a new Response for the provided http.Response. -func newResponse(r *http.Response) *Response { - response := &Response{Response: r} - response.populatePageValues() - response.populateLinkValues() - return response -} - -const ( - // Headers used for offset-based pagination. - xTotal = "X-Total" - xTotalPages = "X-Total-Pages" - xPerPage = "X-Per-Page" - xPage = "X-Page" - xNextPage = "X-Next-Page" - xPrevPage = "X-Prev-Page" - - // Headers used for keyset-based pagination. - linkPrev = "prev" - linkNext = "next" - linkFirst = "first" - linkLast = "last" -) - -// populatePageValues parses the HTTP Link response headers and populates the -// various pagination link values in the Response. -func (r *Response) populatePageValues() { - if totalItems := r.Header.Get(xTotal); totalItems != "" { - r.TotalItems, _ = strconv.Atoi(totalItems) - } - if totalPages := r.Header.Get(xTotalPages); totalPages != "" { - r.TotalPages, _ = strconv.Atoi(totalPages) - } - if itemsPerPage := r.Header.Get(xPerPage); itemsPerPage != "" { - r.ItemsPerPage, _ = strconv.Atoi(itemsPerPage) - } - if currentPage := r.Header.Get(xPage); currentPage != "" { - r.CurrentPage, _ = strconv.Atoi(currentPage) - } - if nextPage := r.Header.Get(xNextPage); nextPage != "" { - r.NextPage, _ = strconv.Atoi(nextPage) - } - if previousPage := r.Header.Get(xPrevPage); previousPage != "" { - r.PreviousPage, _ = strconv.Atoi(previousPage) - } -} - -func (r *Response) populateLinkValues() { - if link := r.Header.Get("Link"); link != "" { - for _, link := range strings.Split(link, ",") { - parts := strings.Split(link, ";") - if len(parts) < 2 { - continue - } - - linkType := strings.Trim(strings.Split(parts[1], "=")[1], "\"") - linkValue := strings.Trim(parts[0], "< >") - - switch linkType { - case linkPrev: - r.PreviousLink = linkValue - case linkNext: - r.NextLink = linkValue - case linkFirst: - r.FirstLink = linkValue - case linkLast: - r.LastLink = linkValue - } - } - } -} - -// Do sends an API request and returns the API response. The API response is -// JSON decoded and stored in the value pointed to by v, or returned as an -// error if an API error has occurred. If v implements the io.Writer -// interface, the raw response body will be written to v, without attempting to -// first decode it. -func (c *Client) Do(req *retryablehttp.Request, v interface{}) (*Response, error) { - // Wait will block until the limiter can obtain a new token. - err := c.limiter.Wait(req.Context()) - if err != nil { - return nil, err - } - - // Set the correct authentication header. If using basic auth, then check - // if we already have a token and if not first authenticate and get one. - var basicAuthToken string - switch c.authType { - case BasicAuth: - c.tokenLock.RLock() - basicAuthToken = c.token - c.tokenLock.RUnlock() - if basicAuthToken == "" { - // If we don't have a token yet, we first need to request one. - basicAuthToken, err = c.requestOAuthToken(req.Context(), basicAuthToken) - if err != nil { - return nil, err - } - } - req.Header.Set("Authorization", "Bearer "+basicAuthToken) - case JobToken: - if values := req.Header.Values("JOB-TOKEN"); len(values) == 0 { - req.Header.Set("JOB-TOKEN", c.token) - } - case OAuthToken: - if values := req.Header.Values("Authorization"); len(values) == 0 { - req.Header.Set("Authorization", "Bearer "+c.token) - } - case PrivateToken: - if values := req.Header.Values("PRIVATE-TOKEN"); len(values) == 0 { - req.Header.Set("PRIVATE-TOKEN", c.token) - } - } - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - - if resp.StatusCode == http.StatusUnauthorized && c.authType == BasicAuth { - resp.Body.Close() - // The token most likely expired, so we need to request a new one and try again. - if _, err := c.requestOAuthToken(req.Context(), basicAuthToken); err != nil { - return nil, err - } - return c.Do(req, v) - } - defer resp.Body.Close() - defer io.Copy(io.Discard, resp.Body) - - // If not yet configured, try to configure the rate limiter - // using the response headers we just received. Fail silently - // so the limiter will remain disabled in case of an error. - c.configureLimiterOnce.Do(func() { c.configureLimiter(req.Context(), resp.Header) }) - - response := newResponse(resp) - - err = CheckResponse(resp) - if err != nil { - // Even though there was an error, we still return the response - // in case the caller wants to inspect it further. - return response, err - } - - if v != nil { - if w, ok := v.(io.Writer); ok { - _, err = io.Copy(w, resp.Body) - } else { - err = json.NewDecoder(resp.Body).Decode(v) - } - } - - return response, err -} - -func (c *Client) requestOAuthToken(ctx context.Context, token string) (string, error) { - c.tokenLock.Lock() - defer c.tokenLock.Unlock() - - // Return early if the token was updated while waiting for the lock. - if c.token != token { - return c.token, nil - } - - config := &oauth2.Config{ - Endpoint: oauth2.Endpoint{ - AuthURL: strings.TrimSuffix(c.baseURL.String(), apiVersionPath) + "oauth/authorize", - TokenURL: strings.TrimSuffix(c.baseURL.String(), apiVersionPath) + "oauth/token", - }, - } - - ctx = context.WithValue(ctx, oauth2.HTTPClient, c.client.HTTPClient) - t, err := config.PasswordCredentialsToken(ctx, c.username, c.password) - if err != nil { - return "", err - } - c.token = t.AccessToken - - return c.token, nil -} - -// Helper function to accept and format both the project ID or name as project -// identifier for all API calls. -func parseID(id interface{}) (string, error) { - switch v := id.(type) { - case int: - return strconv.Itoa(v), nil - case string: - return v, nil - default: - return "", fmt.Errorf("invalid ID type %#v, the ID must be an int or a string", id) - } -} - -// Helper function to escape a project identifier. -func PathEscape(s string) string { - return strings.ReplaceAll(url.PathEscape(s), ".", "%2E") -} - -// An ErrorResponse reports one or more errors caused by an API request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/index.html#data-validation-and-error-reporting -type ErrorResponse struct { - Body []byte - Response *http.Response - Message string -} - -func (e *ErrorResponse) Error() string { - path, _ := url.QueryUnescape(e.Response.Request.URL.Path) - url := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, path) - - if e.Message == "" { - return fmt.Sprintf("%s %s: %d", e.Response.Request.Method, url, e.Response.StatusCode) - } else { - return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, url, e.Response.StatusCode, e.Message) - } -} - -// CheckResponse checks the API response for errors, and returns them if present. -func CheckResponse(r *http.Response) error { - switch r.StatusCode { - case 200, 201, 202, 204, 304: - return nil - case 404: - return ErrNotFound - } - - errorResponse := &ErrorResponse{Response: r} - - data, err := io.ReadAll(r.Body) - if err == nil && strings.TrimSpace(string(data)) != "" { - errorResponse.Body = data - - var raw interface{} - if err := json.Unmarshal(data, &raw); err != nil { - errorResponse.Message = fmt.Sprintf("failed to parse unknown error format: %s", data) - } else { - errorResponse.Message = parseError(raw) - } - } - - return errorResponse -} - -// Format: -// -// { -// "message": { -// "": [ -// "", -// "", -// ... -// ], -// "": { -// "": [ -// "", -// "", -// ... -// ], -// } -// }, -// "error": "" -// } -func parseError(raw interface{}) string { - switch raw := raw.(type) { - case string: - return raw - - case []interface{}: - var errs []string - for _, v := range raw { - errs = append(errs, parseError(v)) - } - return fmt.Sprintf("[%s]", strings.Join(errs, ", ")) - - case map[string]interface{}: - var errs []string - for k, v := range raw { - errs = append(errs, fmt.Sprintf("{%s: %s}", k, parseError(v))) - } - sort.Strings(errs) - return strings.Join(errs, ", ") - - default: - return fmt.Sprintf("failed to parse unexpected error type: %T", raw) - } -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go deleted file mode 100644 index ccbe47b838..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_access_tokens.go +++ /dev/null @@ -1,199 +0,0 @@ -// -// Copyright 2022, Masahiro Yoshida -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupAccessTokensService handles communication with the -// groups access tokens related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_access_tokens.html -type GroupAccessTokensService struct { - client *Client -} - -// GroupAccessToken represents a GitLab group access token. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_access_tokens.html -type GroupAccessToken struct { - ID int `json:"id"` - UserID int `json:"user_id"` - Name string `json:"name"` - Scopes []string `json:"scopes"` - CreatedAt *time.Time `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - LastUsedAt *time.Time `json:"last_used_at"` - Active bool `json:"active"` - Revoked bool `json:"revoked"` - Token string `json:"token"` - AccessLevel AccessLevelValue `json:"access_level"` -} - -func (v GroupAccessToken) String() string { - return Stringify(v) -} - -// ListGroupAccessTokensOptions represents the available options for -// listing variables in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#list-group-access-tokens -type ListGroupAccessTokensOptions ListOptions - -// ListGroupAccessTokens gets a list of all group access tokens in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#list-group-access-tokens -func (s *GroupAccessTokensService) ListGroupAccessTokens(gid interface{}, opt *ListGroupAccessTokensOptions, options ...RequestOptionFunc) ([]*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens", PathEscape(groups)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gats []*GroupAccessToken - resp, err := s.client.Do(req, &gats) - if err != nil { - return nil, resp, err - } - - return gats, resp, nil -} - -// GetGroupAccessToken gets a single group access tokens in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#get-a-group-access-token -func (s *GroupAccessTokensService) GetGroupAccessToken(gid interface{}, id int, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/%d", PathEscape(groups), id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gat := new(GroupAccessToken) - resp, err := s.client.Do(req, &gat) - if err != nil { - return nil, resp, err - } - - return gat, resp, nil -} - -// CreateGroupAccessTokenOptions represents the available CreateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#create-a-group-access-token -type CreateGroupAccessTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// CreateGroupAccessToken creates a new group access token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#create-a-group-access-token -func (s *GroupAccessTokensService) CreateGroupAccessToken(gid interface{}, opt *CreateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens", PathEscape(groups)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(GroupAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RotateGroupAccessTokenOptions represents the available RotateGroupAccessToken() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#rotate-a-group-access-token -type RotateGroupAccessTokenOptions struct { - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// RotateGroupAccessToken revokes a group access token and returns a new group -// access token that expires in one week per default. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#rotate-a-group-access-token -func (s *GroupAccessTokensService) RotateGroupAccessToken(gid interface{}, id int, opt *RotateGroupAccessTokenOptions, options ...RequestOptionFunc) (*GroupAccessToken, *Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/%d/rotate", PathEscape(groups), id) - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gat := new(GroupAccessToken) - resp, err := s.client.Do(req, gat) - if err != nil { - return nil, resp, err - } - - return gat, resp, nil -} - -// RevokeGroupAccessToken revokes a group access token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_access_tokens.html#revoke-a-group-access-token -func (s *GroupAccessTokensService) RevokeGroupAccessToken(gid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { - groups, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/access_tokens/%d", PathEscape(groups), id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_badges.go b/vendor/github.com/xanzy/go-gitlab/group_badges.go deleted file mode 100644 index c648a74432..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_badges.go +++ /dev/null @@ -1,237 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupBadgesService handles communication with the group badges -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html -type GroupBadgesService struct { - client *Client -} - -// BadgeKind represents a GitLab Badge Kind -type BadgeKind string - -// all possible values Badge Kind -const ( - ProjectBadgeKind BadgeKind = "project" - GroupBadgeKind BadgeKind = "group" -) - -// GroupBadge represents a group badge. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html -type GroupBadge struct { - ID int `json:"id"` - Name string `json:"name"` - LinkURL string `json:"link_url"` - ImageURL string `json:"image_url"` - RenderedLinkURL string `json:"rendered_link_url"` - RenderedImageURL string `json:"rendered_image_url"` - Kind BadgeKind `json:"kind"` -} - -// ListGroupBadgesOptions represents the available ListGroupBadges() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#list-all-badges-of-a-group -type ListGroupBadgesOptions struct { - ListOptions - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// ListGroupBadges gets a list of a group badges. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#list-all-badges-of-a-group -func (s *GroupBadgesService) ListGroupBadges(gid interface{}, opt *ListGroupBadgesOptions, options ...RequestOptionFunc) ([]*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gb []*GroupBadge - resp, err := s.client.Do(req, &gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil -} - -// GetGroupBadge gets a group badge. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#get-a-badge-of-a-group -func (s *GroupBadgesService) GetGroupBadge(gid interface{}, badge int, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil -} - -// AddGroupBadgeOptions represents the available AddGroupBadge() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#add-a-badge-to-a-group -type AddGroupBadgeOptions struct { - LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` - ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// AddGroupBadge adds a badge to a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#add-a-badge-to-a-group -func (s *GroupBadgesService) AddGroupBadge(gid interface{}, opt *AddGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil -} - -// EditGroupBadgeOptions represents the available EditGroupBadge() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#edit-a-badge-of-a-group -type EditGroupBadgeOptions struct { - LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` - ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// EditGroupBadge updates a badge of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#edit-a-badge-of-a-group -func (s *GroupBadgesService) EditGroupBadge(gid interface{}, badge int, opt *EditGroupBadgeOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil -} - -// DeleteGroupBadge removes a badge from a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#remove-a-badge-from-a-group -func (s *GroupBadgesService) DeleteGroupBadge(gid interface{}, badge int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/badges/%d", PathEscape(group), badge) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GroupBadgePreviewOptions represents the available PreviewGroupBadge() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#preview-a-badge-from-a-group -type GroupBadgePreviewOptions struct { - LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` - ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// PreviewGroupBadge returns how the link_url and image_url final URLs would be after -// resolving the placeholder interpolation. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_badges.html#preview-a-badge-from-a-group -func (s *GroupBadgesService) PreviewGroupBadge(gid interface{}, opt *GroupBadgePreviewOptions, options ...RequestOptionFunc) (*GroupBadge, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/badges/render", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - gb := new(GroupBadge) - resp, err := s.client.Do(req, &gb) - if err != nil { - return nil, resp, err - } - - return gb, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_boards.go b/vendor/github.com/xanzy/go-gitlab/group_boards.go deleted file mode 100644 index ed9f8d5169..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_boards.go +++ /dev/null @@ -1,353 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupIssueBoardsService handles communication with the group issue board -// related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html -type GroupIssueBoardsService struct { - client *Client -} - -// GroupIssueBoard represents a GitLab group issue board. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html -type GroupIssueBoard struct { - ID int `json:"id"` - Name string `json:"name"` - Group *Group `json:"group"` - Milestone *Milestone `json:"milestone"` - Labels []*GroupLabel `json:"labels"` - Lists []*BoardList `json:"lists"` -} - -func (b GroupIssueBoard) String() string { - return Stringify(b) -} - -// ListGroupIssueBoardsOptions represents the available -// ListGroupIssueBoards() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#list-all-group-issue-boards-in-a-group -type ListGroupIssueBoardsOptions ListOptions - -// ListGroupIssueBoards gets a list of all issue boards in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#list-all-group-issue-boards-in-a-group -func (s *GroupIssueBoardsService) ListGroupIssueBoards(gid interface{}, opt *ListGroupIssueBoardsOptions, options ...RequestOptionFunc) ([]*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GroupIssueBoard - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// CreateGroupIssueBoardOptions represents the available -// CreateGroupIssueBoard() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#create-a-group-issue-board -type CreateGroupIssueBoardOptions struct { - Name *string `url:"name" json:"name"` -} - -// CreateGroupIssueBoard creates a new issue board. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#create-a-group-issue-board -func (s *GroupIssueBoardsService) CreateGroupIssueBoard(gid interface{}, opt *CreateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupIssueBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil -} - -// GetGroupIssueBoard gets a single issue board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#single-group-issue-board -func (s *GroupIssueBoardsService) GetGroupIssueBoard(gid interface{}, board int, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupIssueBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil -} - -// UpdateGroupIssueBoardOptions represents a group issue board. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#update-a-group-issue-board -type UpdateGroupIssueBoardOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` -} - -// UpdateIssueBoard updates a single issue board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#update-a-group-issue-board -func (s *GroupIssueBoardsService) UpdateIssueBoard(gid interface{}, board int, opt *UpdateGroupIssueBoardOptions, options ...RequestOptionFunc) (*GroupIssueBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupIssueBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil -} - -// DeleteIssueBoard delete a single issue board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#delete-a-group-issue-board -func (s *GroupIssueBoardsService) DeleteIssueBoard(gid interface{}, board int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListGroupIssueBoardListsOptions represents the available -// ListGroupIssueBoardLists() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#list-group-issue-board-lists -type ListGroupIssueBoardListsOptions ListOptions - -// ListGroupIssueBoardLists gets a list of the issue board's lists. Does not include -// backlog and closed lists. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_boards.html#list-group-issue-board-lists -func (s *GroupIssueBoardsService) ListGroupIssueBoardLists(gid interface{}, board int, opt *ListGroupIssueBoardListsOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gbl []*BoardList - resp, err := s.client.Do(req, &gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil -} - -// GetGroupIssueBoardList gets a single issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#single-group-issue-board-list -func (s *GroupIssueBoardsService) GetGroupIssueBoardList(gid interface{}, board, list int, options ...RequestOptionFunc) (*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", - PathEscape(group), - board, - list, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gbl := new(BoardList) - resp, err := s.client.Do(req, gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil -} - -// CreateGroupIssueBoardListOptions represents the available -// CreateGroupIssueBoardList() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#new-group-issue-board-list -type CreateGroupIssueBoardListOptions struct { - LabelID *int `url:"label_id" json:"label_id"` -} - -// CreateGroupIssueBoardList creates a new issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#new-group-issue-board-list -func (s *GroupIssueBoardsService) CreateGroupIssueBoardList(gid interface{}, board int, opt *CreateGroupIssueBoardListOptions, options ...RequestOptionFunc) (*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gbl := new(BoardList) - resp, err := s.client.Do(req, gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil -} - -// UpdateGroupIssueBoardListOptions represents the available -// UpdateGroupIssueBoardList() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#edit-group-issue-board-list -type UpdateGroupIssueBoardListOptions struct { - Position *int `url:"position" json:"position"` -} - -// UpdateIssueBoardList updates the position of an existing -// group issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#edit-group-issue-board-list -func (s *GroupIssueBoardsService) UpdateIssueBoardList(gid interface{}, board, list int, opt *UpdateGroupIssueBoardListOptions, options ...RequestOptionFunc) ([]*BoardList, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", - PathEscape(group), - board, - list, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gbl []*BoardList - resp, err := s.client.Do(req, &gbl) - if err != nil { - return nil, resp, err - } - - return gbl, resp, nil -} - -// DeleteGroupIssueBoardList soft deletes a group issue board list. -// Only for admins and group owners. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_boards.html#delete-a-group-issue-board-list -func (s *GroupIssueBoardsService) DeleteGroupIssueBoardList(gid interface{}, board, list int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/boards/%d/lists/%d", - PathEscape(group), - board, - list, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_clusters.go b/vendor/github.com/xanzy/go-gitlab/group_clusters.go deleted file mode 100644 index f459e1cc84..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_clusters.go +++ /dev/null @@ -1,217 +0,0 @@ -// -// Copyright 2021, Paul Shoemaker -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupClustersService handles communication with the -// group clusters related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html -type GroupClustersService struct { - client *Client -} - -// GroupCluster represents a GitLab Group Cluster. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_clusters.html -type GroupCluster struct { - ID int `json:"id"` - Name string `json:"name"` - Domain string `json:"domain"` - CreatedAt *time.Time `json:"created_at"` - Managed bool `json:"managed"` - Enabled bool `json:"enabled"` - ProviderType string `json:"provider_type"` - PlatformType string `json:"platform_type"` - EnvironmentScope string `json:"environment_scope"` - ClusterType string `json:"cluster_type"` - User *User `json:"user"` - PlatformKubernetes *PlatformKubernetes `json:"platform_kubernetes"` - ManagementProject *ManagementProject `json:"management_project"` - Group *Group `json:"group"` -} - -func (v GroupCluster) String() string { - return Stringify(v) -} - -// ListClusters gets a list of all clusters in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#list-group-clusters -func (s *GroupClustersService) ListClusters(pid interface{}, options ...RequestOptionFunc) ([]*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var pcs []*GroupCluster - resp, err := s.client.Do(req, &pcs) - if err != nil { - return nil, resp, err - } - - return pcs, resp, nil -} - -// GetCluster gets a cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#get-a-single-group-cluster -func (s *GroupClustersService) GetCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gc := new(GroupCluster) - resp, err := s.client.Do(req, &gc) - if err != nil { - return nil, resp, err - } - - return gc, resp, nil -} - -// AddGroupClusterOptions represents the available AddCluster() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#add-existing-cluster-to-group -type AddGroupClusterOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Domain *string `url:"domain,omitempty" json:"domain,omitempty"` - ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - Managed *bool `url:"managed,omitempty" json:"managed,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - PlatformKubernetes *AddGroupPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` -} - -// AddGroupPlatformKubernetesOptions represents the available PlatformKubernetes options for adding. -type AddGroupPlatformKubernetesOptions struct { - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` - Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` - AuthorizationType *string `url:"authorization_type,omitempty" json:"authorization_type,omitempty"` -} - -// AddCluster adds an existing cluster to the group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#add-existing-cluster-to-group -func (s *GroupClustersService) AddCluster(pid interface{}, opt *AddGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters/user", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gc := new(GroupCluster) - resp, err := s.client.Do(req, gc) - if err != nil { - return nil, resp, err - } - - return gc, resp, nil -} - -// EditGroupClusterOptions represents the available EditCluster() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#edit-group-cluster -type EditGroupClusterOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Domain *string `url:"domain,omitempty" json:"domain,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - PlatformKubernetes *EditGroupPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` - ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` -} - -// EditGroupPlatformKubernetesOptions represents the available PlatformKubernetes options for editing. -type EditGroupPlatformKubernetesOptions struct { - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` -} - -// EditCluster updates an existing group cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#edit-group-cluster -func (s *GroupClustersService) EditCluster(pid interface{}, cluster int, opt *EditGroupClusterOptions, options ...RequestOptionFunc) (*GroupCluster, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gc := new(GroupCluster) - resp, err := s.client.Do(req, gc) - if err != nil { - return nil, resp, err - } - - return gc, resp, nil -} - -// DeleteCluster deletes an existing group cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_clusters.html#delete-group-cluster -func (s *GroupClustersService) DeleteCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/clusters/%d", PathEscape(group), cluster) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go b/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go deleted file mode 100644 index fd8cfd86d2..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_epic_boards.go +++ /dev/null @@ -1,104 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupEpicBoardsService handles communication with the group epic board -// related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_epic_boards.html -type GroupEpicBoardsService struct { - client *Client -} - -// GroupEpicBoard represents a GitLab group epic board. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_epic_boards.html -type GroupEpicBoard struct { - ID int `json:"id"` - Name string `json:"name"` - Group *Group `json:"group"` - Labels []*LabelDetails `json:"labels"` - Lists []*BoardList `json:"lists"` -} - -func (b GroupEpicBoard) String() string { - return Stringify(b) -} - -// ListGroupEpicBoardsOptions represents the available -// ListGroupEpicBoards() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_epic_boards.html#list-all-epic-boards-in-a-group -type ListGroupEpicBoardsOptions ListOptions - -// ListGroupEpicBoards gets a list of all epic boards in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_epic_boards.html#list-all-epic-boards-in-a-group -func (s *GroupEpicBoardsService) ListGroupEpicBoards(gid interface{}, opt *ListGroupEpicBoardsOptions, options ...RequestOptionFunc) ([]*GroupEpicBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epic_boards", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*GroupEpicBoard - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// GetGroupEpicBoard gets a single epic board of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_epic_boards.html#single-group-epic-board -func (s *GroupEpicBoardsService) GetGroupEpicBoard(gid interface{}, board int, options ...RequestOptionFunc) (*GroupEpicBoard, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epic_boards/%d", PathEscape(group), board) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gib := new(GroupEpicBoard) - resp, err := s.client.Do(req, gib) - if err != nil { - return nil, resp, err - } - - return gib, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_hooks.go b/vendor/github.com/xanzy/go-gitlab/group_hooks.go deleted file mode 100644 index 414a8d0864..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_hooks.go +++ /dev/null @@ -1,268 +0,0 @@ -// -// Copyright 2021, Eric Stevens -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupHook represents a GitLab group hook. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks -type GroupHook struct { - ID int `json:"id"` - URL string `json:"url"` - GroupID int `json:"group_id"` - PushEvents bool `json:"push_events"` - PushEventsBranchFilter string `json:"push_events_branch_filter"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - JobEvents bool `json:"job_events"` - PipelineEvents bool `json:"pipeline_events"` - WikiPageEvents bool `json:"wiki_page_events"` - DeploymentEvents bool `json:"deployment_events"` - ReleasesEvents bool `json:"releases_events"` - SubGroupEvents bool `json:"subgroup_events"` - MemberEvents bool `json:"member_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - AlertStatus string `json:"alert_status"` - CreatedAt *time.Time `json:"created_at"` - CustomWebhookTemplate string `json:"custom_webhook_template"` - ResourceAccessTokenEvents bool `json:"resource_access_token_events"` - CustomHeaders []*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` -} - -// ListGroupHooksOptions represents the available ListGroupHooks() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks -type ListGroupHooksOptions ListOptions - -// ListGroupHooks gets a list of group hooks. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-group-hooks -func (s *GroupsService) ListGroupHooks(gid interface{}, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - var gh []*GroupHook - resp, err := s.client.Do(req, &gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// GetGroupHook gets a specific hook for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#get-group-hook -func (s *GroupsService) GetGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// AddGroupHookOptions represents the available AddGroupHook() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook -type AddGroupHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` - MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` - CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` -} - -// AddGroupHook create a new group scoped webhook. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#add-group-hook -func (s *GroupsService) AddGroupHook(gid interface{}, opt *AddGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// EditGroupHookOptions represents the available EditGroupHook() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook -type EditGroupHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - SubGroupEvents *bool `url:"subgroup_events,omitempty" json:"subgroup_events,omitempty"` - MemberEvents *bool `url:"member_events,omitempty" json:"member_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` - CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` -} - -// EditGroupHook edits a hook for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/groups.html#edit-group-hook -func (s *GroupsService) EditGroupHook(pid interface{}, hook int, opt *EditGroupHookOptions, options ...RequestOptionFunc) (*GroupHook, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gh := new(GroupHook) - resp, err := s.client.Do(req, gh) - if err != nil { - return nil, resp, err - } - - return gh, resp, nil -} - -// DeleteGroupHook removes a hook from a group. This is an idempotent -// method and can be called multiple times. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-group-hook -func (s *GroupsService) DeleteGroupHook(pid interface{}, hook int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d", PathEscape(group), hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SetGroupCustomHeader creates or updates a group custom webhook header. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#set-a-custom-header -func (s *GroupsService) SetGroupCustomHeader(gid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteGroupCustomHeader deletes a group custom webhook header. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-a-custom-header -func (s *GroupsService) DeleteGroupCustomHeader(gid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/hooks/%d/custom_headers/%s", PathEscape(group), hook, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_import_export.go b/vendor/github.com/xanzy/go-gitlab/group_import_export.go deleted file mode 100644 index b35245ed49..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_import_export.go +++ /dev/null @@ -1,180 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "io" - "mime/multipart" - "net/http" - "os" - "path/filepath" - "strconv" -) - -// GroupImportExportService handles communication with the group import export -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_import_export.html -type GroupImportExportService struct { - client *Client -} - -// ScheduleExport starts a new group export. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_import_export.html#schedule-new-export -func (s *GroupImportExportService) ScheduleExport(gid interface{}, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/export", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ExportDownload downloads the finished export. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_import_export.html#export-download -func (s *GroupImportExportService) ExportDownload(gid interface{}, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/export/download", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - exportDownload := new(bytes.Buffer) - resp, err := s.client.Do(req, exportDownload) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(exportDownload.Bytes()), resp, err -} - -// GroupImportFileOptions represents the available ImportFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_import_export.html#import-a-file -type GroupImportFileOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - File *string `url:"file,omitempty" json:"file,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` -} - -// ImportFile imports a file. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_import_export.html#import-a-file -func (s *GroupImportExportService) ImportFile(opt *GroupImportFileOptions, options ...RequestOptionFunc) (*Response, error) { - // First check if we got all required options. - if opt.Name == nil || *opt.Name == "" { - return nil, fmt.Errorf("Missing required option: Name") - } - if opt.Path == nil || *opt.Path == "" { - return nil, fmt.Errorf("Missing required option: Path") - } - if opt.File == nil || *opt.File == "" { - return nil, fmt.Errorf("Missing required option: File") - } - - f, err := os.Open(*opt.File) - if err != nil { - return nil, err - } - defer f.Close() - - b := &bytes.Buffer{} - w := multipart.NewWriter(b) - - _, filename := filepath.Split(*opt.File) - fw, err := w.CreateFormFile("file", filename) - if err != nil { - return nil, err - } - - _, err = io.Copy(fw, f) - if err != nil { - return nil, err - } - - // Populate the additional fields. - fw, err = w.CreateFormField("name") - if err != nil { - return nil, err - } - - _, err = fw.Write([]byte(*opt.Name)) - if err != nil { - return nil, err - } - - fw, err = w.CreateFormField("path") - if err != nil { - return nil, err - } - - _, err = fw.Write([]byte(*opt.Path)) - if err != nil { - return nil, err - } - - if opt.ParentID != nil { - fw, err = w.CreateFormField("parent_id") - if err != nil { - return nil, err - } - - _, err = fw.Write([]byte(strconv.Itoa(*opt.ParentID))) - if err != nil { - return nil, err - } - } - - if err = w.Close(); err != nil { - return nil, err - } - - req, err := s.client.NewRequest(http.MethodPost, "groups/import", nil, options) - if err != nil { - return nil, err - } - - // Set the buffer as the request body. - if err = req.SetBody(b); err != nil { - return nil, err - } - - // Overwrite the default content type. - req.Header.Set("Content-Type", w.FormDataContentType()) - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_iterations.go b/vendor/github.com/xanzy/go-gitlab/group_iterations.go deleted file mode 100644 index c77d633f6a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_iterations.go +++ /dev/null @@ -1,90 +0,0 @@ -// -// Copyright 2022, Daniel Steinke -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// IterationsAPI handles communication with the iterations related methods -// of the GitLab API -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_iterations.html -type GroupIterationsService struct { - client *Client -} - -// GroupInteration represents a GitLab iteration. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_iterations.html -type GroupIteration struct { - ID int `json:"id"` - IID int `json:"iid"` - Sequence int `json:"sequence"` - GroupID int `json:"group_id"` - Title string `json:"title"` - Description string `json:"description"` - State int `json:"state"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - DueDate *ISOTime `json:"due_date"` - StartDate *ISOTime `json:"start_date"` - WebURL string `json:"web_url"` -} - -func (i GroupIteration) String() string { - return Stringify(i) -} - -// ListGroupIterationsOptions contains the available ListGroupIterations() -// options -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_iterations.html#list-group-iterations -type ListGroupIterationsOptions struct { - ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` -} - -// ListGroupIterations returns a list of group iterations. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_iterations.html#list-group-iterations -func (s *GroupIterationsService) ListGroupIterations(gid interface{}, opt *ListGroupIterationsOptions, options ...RequestOptionFunc) ([]*GroupIteration, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/iterations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gis []*GroupIteration - resp, err := s.client.Do(req, &gis) - if err != nil { - return nil, nil, err - } - - return gis, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_labels.go b/vendor/github.com/xanzy/go-gitlab/group_labels.go deleted file mode 100644 index 5a390269b1..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_labels.go +++ /dev/null @@ -1,258 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupLabelsService handles communication with the label related methods of the -// GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_labels.html -type GroupLabelsService struct { - client *Client -} - -// GroupLabel represents a GitLab group label. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_labels.html -type GroupLabel Label - -func (l GroupLabel) String() string { - return Stringify(l) -} - -// ListGroupLabelsOptions represents the available ListGroupLabels() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_labels.html#list-group-labels -type ListGroupLabelsOptions struct { - ListOptions - WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` - IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` - IncludeDescendantGrouops *bool `url:"include_descendant_groups,omitempty" json:"include_descendant_groups,omitempty"` - OnlyGroupLabels *bool `url:"only_group_labels,omitempty" json:"only_group_labels,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` -} - -// ListGroupLabels gets all labels for given group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#list-group-labels -func (s *GroupLabelsService) ListGroupLabels(gid interface{}, opt *ListGroupLabelsOptions, options ...RequestOptionFunc) ([]*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var l []*GroupLabel - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// GetGroupLabel get a single label for a given group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#get-a-single-group-label -func (s *GroupLabelsService) GetGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var l *GroupLabel - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// CreateGroupLabelOptions represents the available CreateGroupLabel() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#create-a-new-group-label -type CreateGroupLabelOptions CreateLabelOptions - -// CreateGroupLabel creates a new label for given group with given name and -// color. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#create-a-new-group-label -func (s *GroupLabelsService) CreateGroupLabel(gid interface{}, opt *CreateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(GroupLabel) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// DeleteGroupLabelOptions represents the available DeleteGroupLabel() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#delete-a-group-label -type DeleteGroupLabelOptions DeleteLabelOptions - -// DeleteGroupLabel deletes a group label given by its name or ID. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#delete-a-group-label -func (s *GroupLabelsService) DeleteGroupLabel(gid interface{}, lid interface{}, opt *DeleteGroupLabelOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) - - if lid != nil { - label, err := parseID(lid) - if err != nil { - return nil, err - } - u = fmt.Sprintf("groups/%s/labels/%s", PathEscape(group), PathEscape(label)) - } - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// UpdateGroupLabelOptions represents the available UpdateGroupLabel() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#update-a-group-label -type UpdateGroupLabelOptions UpdateLabelOptions - -// UpdateGroupLabel updates an existing label with new name or now color. At least -// one parameter is required, to update the label. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#update-a-group-label -func (s *GroupLabelsService) UpdateGroupLabel(gid interface{}, opt *UpdateGroupLabelOptions, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(GroupLabel) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// SubscribeToGroupLabel subscribes the authenticated user to a label to receive -// notifications. If the user is already subscribed to the label, the status -// code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#subscribe-to-a-group-label -func (s *GroupLabelsService) SubscribeToGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*GroupLabel, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/labels/%s/subscribe", PathEscape(group), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(GroupLabel) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// UnsubscribeFromGroupLabel unsubscribes the authenticated user from a label to not -// receive notifications from it. If the user is not subscribed to the label, the -// status code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_labels.html#unsubscribe-from-a-group-label -func (s *GroupLabelsService) UnsubscribeFromGroupLabel(gid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/labels/%s/unsubscribe", PathEscape(group), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_members.go b/vendor/github.com/xanzy/go-gitlab/group_members.go deleted file mode 100644 index cdf225c3d8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_members.go +++ /dev/null @@ -1,391 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupMembersService handles communication with the group members -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html -type GroupMembersService struct { - client *Client -} - -// GroupMemberSAMLIdentity represents the SAML Identity link for the group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -// Gitlab MR for API change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/20357 -// Gitlab MR for API Doc change: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/25652 -type GroupMemberSAMLIdentity struct { - ExternUID string `json:"extern_uid"` - Provider string `json:"provider"` - SAMLProviderID int `json:"saml_provider_id"` -} - -// GroupMember represents a GitLab group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html -type GroupMember struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - CreatedAt *time.Time `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - AccessLevel AccessLevelValue `json:"access_level"` - Email string `json:"email,omitempty"` - GroupSAMLIdentity *GroupMemberSAMLIdentity `json:"group_saml_identity"` - MemberRole *MemberRole `json:"member_role"` -} - -// ListGroupMembersOptions represents the available ListGroupMembers() and -// ListAllGroupMembers() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -type ListGroupMembersOptions struct { - ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` - UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` -} - -// ListGroupMembers get a list of group members viewable by the authenticated -// user. Inherited members through ancestor groups are not included. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -func (s *GroupsService) ListGroupMembers(gid interface{}, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gm []*GroupMember - resp, err := s.client.Do(req, &gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil -} - -// ListAllGroupMembers get a list of group members viewable by the authenticated -// user. Returns a list including inherited members through ancestor groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project-including-inherited-and-invited-members -func (s *GroupsService) ListAllGroupMembers(gid interface{}, opt *ListGroupMembersOptions, options ...RequestOptionFunc) ([]*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/all", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gm []*GroupMember - resp, err := s.client.Do(req, &gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil -} - -// AddGroupMemberOptions represents the available AddGroupMember() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project -type AddGroupMemberOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` -} - -// GetGroupMember gets a member of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project -func (s *GroupMembersService) GetGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil -} - -// GetInheritedGroupMember get a member of a group or project, including -// inherited and invited members -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project-including-inherited-and-invited-members -func (s *GroupMembersService) GetInheritedGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/all/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, err -} - -// BillableGroupMember represents a GitLab billable group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group -type BillableGroupMember struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - Email string `json:"email"` - LastActivityOn *ISOTime `json:"last_activity_on"` - MembershipType string `json:"membership_type"` - Removable bool `json:"removable"` - CreatedAt *time.Time `json:"created_at"` - IsLastOwner bool `json:"is_last_owner"` - LastLoginAt *time.Time `json:"last_login_at"` -} - -// ListBillableGroupMembersOptions represents the available ListBillableGroupMembers() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group -type ListBillableGroupMembersOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListBillableGroupMembers Gets a list of group members that count as billable. -// The list includes members in the subgroup or subproject. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-billable-members-of-a-group -func (s *GroupsService) ListBillableGroupMembers(gid interface{}, opt *ListBillableGroupMembersOptions, options ...RequestOptionFunc) ([]*BillableGroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/billable_members", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var bgm []*BillableGroupMember - resp, err := s.client.Do(req, &bgm) - if err != nil { - return nil, resp, err - } - - return bgm, resp, nil -} - -// RemoveBillableGroupMember removes a given group members that count as billable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#remove-a-billable-member-from-a-group -func (s *GroupsService) RemoveBillableGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/billable_members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// AddGroupMember adds a user to the list of group members. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project -func (s *GroupMembersService) AddGroupMember(gid interface{}, opt *AddGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil -} - -// ShareWithGroup shares a group with the group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#share-groups-with-groups -func (s *GroupMembersService) ShareWithGroup(gid interface{}, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/share", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// DeleteShareWithGroup allows to unshare a group from a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-link-sharing-group-with-another-group -func (s *GroupMembersService) DeleteShareWithGroup(gid interface{}, groupID int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/share/%d", PathEscape(group), groupID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// EditGroupMemberOptions represents the available EditGroupMember() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project -type EditGroupMemberOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` -} - -// EditGroupMember updates a member of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project -func (s *GroupMembersService) EditGroupMember(gid interface{}, user int, opt *EditGroupMemberOptions, options ...RequestOptionFunc) (*GroupMember, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gm := new(GroupMember) - resp, err := s.client.Do(req, gm) - if err != nil { - return nil, resp, err - } - - return gm, resp, nil -} - -// RemoveGroupMemberOptions represents the available options to remove a group member. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project -type RemoveGroupMemberOptions struct { - SkipSubresources *bool `url:"skip_subresources,omitempty" json:"skip_subresources,omitempty"` - UnassignIssuables *bool `url:"unassign_issuables,omitempty" json:"unassign_issuables,omitempty"` -} - -// RemoveGroupMember removes user from user team. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project -func (s *GroupMembersService) RemoveGroupMember(gid interface{}, user int, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_milestones.go b/vendor/github.com/xanzy/go-gitlab/group_milestones.go deleted file mode 100644 index f3089b2152..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_milestones.go +++ /dev/null @@ -1,322 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupMilestonesService handles communication with the milestone related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_milestones.html -type GroupMilestonesService struct { - client *Client -} - -// GroupMilestone represents a GitLab milestone. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_milestones.html -type GroupMilestone struct { - ID int `json:"id"` - IID int `json:"iid"` - GroupID int `json:"group_id"` - Title string `json:"title"` - Description string `json:"description"` - StartDate *ISOTime `json:"start_date"` - DueDate *ISOTime `json:"due_date"` - State string `json:"state"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` - Expired *bool `json:"expired"` -} - -func (m GroupMilestone) String() string { - return Stringify(m) -} - -// ListGroupMilestonesOptions represents the available -// ListGroupMilestones() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#list-group-milestones -type ListGroupMilestonesOptions struct { - ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - SearchTitle *string `url:"search_title,omitempty" json:"search_title,omitempty"` - IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` - IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` - IncludeDescendents *bool `url:"include_descendents,omitempty" json:"include_descendents,omitempty"` - UpdatedBefore *ISOTime `url:"updated_before,omitempty" json:"updated_before,omitempty"` - UpdatedAfter *ISOTime `url:"updated_after,omitempty" json:"updated_after,omitempty"` - ContainingDate *ISOTime `url:"containing_date,omitempty" json:"containing_date,omitempty"` - StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` - EndDate *ISOTime `url:"end_date,omitempty" json:"end_date,omitempty"` -} - -// ListGroupMilestones returns a list of group milestones. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#list-group-milestones -func (s *GroupMilestonesService) ListGroupMilestones(gid interface{}, opt *ListGroupMilestonesOptions, options ...RequestOptionFunc) ([]*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*GroupMilestone - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// GetGroupMilestone gets a single group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-single-milestone -func (s *GroupMilestonesService) GetGroupMilestone(gid interface{}, milestone int, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(GroupMilestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateGroupMilestoneOptions represents the available CreateGroupMilestone() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#create-new-milestone -type CreateGroupMilestoneOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` - DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` -} - -// CreateGroupMilestone creates a new group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#create-new-milestone -func (s *GroupMilestonesService) CreateGroupMilestone(gid interface{}, opt *CreateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(GroupMilestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UpdateGroupMilestoneOptions represents the available UpdateGroupMilestone() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#edit-milestone -type UpdateGroupMilestoneOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` - DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` - StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` -} - -// UpdateGroupMilestone updates an existing group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#edit-milestone -func (s *GroupMilestonesService) UpdateGroupMilestone(gid interface{}, milestone int, opt *UpdateGroupMilestoneOptions, options ...RequestOptionFunc) (*GroupMilestone, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(GroupMilestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteGroupMilestone deletes a specified group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#delete-group-milestone -func (s *GroupMilestonesService) DeleteGroupMilestone(pid interface{}, milestone int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// GetGroupMilestoneIssuesOptions represents the available GetGroupMilestoneIssues() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-issues-assigned-to-a-single-milestone -type GetGroupMilestoneIssuesOptions ListOptions - -// GetGroupMilestoneIssues gets all issues assigned to a single group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-issues-assigned-to-a-single-milestone -func (s *GroupMilestonesService) GetGroupMilestoneIssues(gid interface{}, milestone int, opt *GetGroupMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d/issues", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// GetGroupMilestoneMergeRequestsOptions represents the available -// GetGroupMilestoneMergeRequests() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-merge-requests-assigned-to-a-single-milestone -type GetGroupMilestoneMergeRequestsOptions ListOptions - -// GetGroupMilestoneMergeRequests gets all merge requests assigned to a -// single group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-merge-requests-assigned-to-a-single-milestone -func (s *GroupMilestonesService) GetGroupMilestoneMergeRequests(gid interface{}, milestone int, opt *GetGroupMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d/merge_requests", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mr []*MergeRequest - resp, err := s.client.Do(req, &mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil -} - -// BurndownChartEvent reprensents a burnout chart event -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-burndown-chart-events-for-a-single-milestone -type BurndownChartEvent struct { - CreatedAt *time.Time `json:"created_at"` - Weight *int `json:"weight"` - Action *string `json:"action"` -} - -// GetGroupMilestoneBurndownChartEventsOptions represents the available -// GetGroupMilestoneBurndownChartEventsOptions() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-burndown-chart-events-for-a-single-milestone -type GetGroupMilestoneBurndownChartEventsOptions ListOptions - -// GetGroupMilestoneBurndownChartEvents gets all merge requests assigned to a -// single group milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_milestones.html#get-all-burndown-chart-events-for-a-single-milestone -func (s *GroupMilestonesService) GetGroupMilestoneBurndownChartEvents(gid interface{}, milestone int, opt *GetGroupMilestoneBurndownChartEventsOptions, options ...RequestOptionFunc) ([]*BurndownChartEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/milestones/%d/burndown_events", PathEscape(group), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var be []*BurndownChartEvent - resp, err := s.client.Do(req, &be) - if err != nil { - return nil, resp, err - } - - return be, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go b/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go deleted file mode 100644 index addc383fb1..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_protected_environments.go +++ /dev/null @@ -1,281 +0,0 @@ -// -// Copyright 2023, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupProtectedEnvironmentsService handles communication with the group-level -// protected environment methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html -type GroupProtectedEnvironmentsService struct { - client *Client -} - -// GroupProtectedEnvironment represents a group-level protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html -type GroupProtectedEnvironment struct { - Name string `json:"name"` - DeployAccessLevels []*GroupEnvironmentAccessDescription `json:"deploy_access_levels"` - RequiredApprovalCount int `json:"required_approval_count"` - ApprovalRules []*GroupEnvironmentApprovalRule `json:"approval_rules"` -} - -// GroupEnvironmentAccessDescription represents the access decription for a -// group-level protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html -type GroupEnvironmentAccessDescription struct { - ID int `json:"id"` - AccessLevel AccessLevelValue `json:"access_level"` - AccessLevelDescription string `json:"access_level_description"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - GroupInheritanceType int `json:"group_inheritance_type"` -} - -// GroupEnvironmentApprovalRule represents the approval rules for a group-level -// protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment -type GroupEnvironmentApprovalRule struct { - ID int `json:"id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - AccessLevel AccessLevelValue `json:"access_level"` - AccessLevelDescription string `json:"access_level_description"` - RequiredApprovalCount int `json:"required_approvals"` - GroupInheritanceType int `json:"group_inheritance_type"` -} - -// ListGroupProtectedEnvironmentsOptions represents the available -// ListGroupProtectedEnvironments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#list-group-level-protected-environments -type ListGroupProtectedEnvironmentsOptions ListOptions - -// ListGroupProtectedEnvironments returns a list of protected environments from -// a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#list-group-level-protected-environments -func (s *GroupProtectedEnvironmentsService) ListGroupProtectedEnvironments(gid interface{}, opt *ListGroupProtectedEnvironmentsOptions, options ...RequestOptionFunc) ([]*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pes []*GroupProtectedEnvironment - resp, err := s.client.Do(req, &pes) - if err != nil { - return nil, resp, err - } - - return pes, resp, nil -} - -// GetGroupProtectedEnvironment returns a single group-level protected -// environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#get-a-single-protected-environment -func (s *GroupProtectedEnvironmentsService) GetGroupProtectedEnvironment(gid interface{}, environment string, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pe := new(GroupProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil -} - -// ProtectGroupEnvironmentOptions represents the available -// ProtectGroupEnvironment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment -type ProtectGroupEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - DeployAccessLevels *[]*GroupEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` - ApprovalRules *[]*GroupEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` -} - -// GroupEnvironmentAccessOptions represents the options for an access decription -// for a group-level protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment -type GroupEnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` -} - -// GroupEnvironmentApprovalRuleOptions represents the approval rules for a -// group-level protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment -type GroupEnvironmentApprovalRuleOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` -} - -// ProtectGroupEnvironment protects a single group-level environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#protect-a-single-environment -func (s *GroupProtectedEnvironmentsService) ProtectGroupEnvironment(gid interface{}, opt *ProtectGroupEnvironmentOptions, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(GroupProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil -} - -// UpdateGroupProtectedEnvironmentOptions represents the available -// UpdateGroupProtectedEnvironment() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment -type UpdateGroupProtectedEnvironmentOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - DeployAccessLevels *[]*UpdateGroupEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` - ApprovalRules *[]*UpdateGroupEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` -} - -// UpdateGroupEnvironmentAccessOptions represents the options for updates to the -// access decription for a group-level protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment -type UpdateGroupEnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` -} - -// UpdateGroupEnvironmentApprovalRuleOptions represents the updates to the -// approval rules for a group-level protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment -type UpdateGroupEnvironmentApprovalRuleOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` -} - -// UpdateGroupProtectedEnvironment updates a single group-level protected -// environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#update-a-protected-environment -func (s *GroupProtectedEnvironmentsService) UpdateGroupProtectedEnvironment(gid interface{}, environment string, opt *UpdateGroupProtectedEnvironmentOptions, options ...RequestOptionFunc) (*GroupProtectedEnvironment, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(GroupProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil -} - -// UnprotectGroupEnvironment unprotects the given protected group-level -// environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_protected_environments.html#unprotect-a-single-environment -func (s *GroupProtectedEnvironmentsService) UnprotectGroupEnvironment(gid interface{}, environment string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/protected_environments/%s", PathEscape(group), environment) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go deleted file mode 100644 index 18951a1661..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_repository_storage_move.go +++ /dev/null @@ -1,195 +0,0 @@ -// -// Copyright 2023, Nick Westbury -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupRepositoryStorageMoveService handles communication with the -// group repositories related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html -type GroupRepositoryStorageMoveService struct { - client *Client -} - -// GroupRepositoryStorageMove represents the status of a repository move. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html -type GroupRepositoryStorageMove struct { - ID int `json:"id"` - CreatedAt *time.Time `json:"created_at"` - State string `json:"state"` - SourceStorageName string `json:"source_storage_name"` - DestinationStorageName string `json:"destination_storage_name"` - Group *RepositoryGroup `json:"group"` -} - -type RepositoryGroup struct { - ID int `json:"id"` - Name string `json:"name"` - WebURL string `json:"web_url"` -} - -// RetrieveAllGroupStorageMovesOptions represents the available -// RetrieveAllStorageMoves() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-group-repository-storage-moves -type RetrieveAllGroupStorageMovesOptions ListOptions - -// RetrieveAllStorageMoves retrieves all group repository storage moves -// accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-group-repository-storage-moves -func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { - req, err := g.client.NewRequest(http.MethodGet, "group_repository_storage_moves", opts, options) - if err != nil { - return nil, nil, err - } - - var gsms []*GroupRepositoryStorageMove - resp, err := g.client.Do(req, &gsms) - if err != nil { - return nil, resp, err - } - - return gsms, resp, err -} - -// RetrieveAllStorageMovesForGroup retrieves all repository storage moves for -// a single group accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-single-group -func (g GroupRepositoryStorageMoveService) RetrieveAllStorageMovesForGroup(group int, opts RetrieveAllGroupStorageMovesOptions, options ...RequestOptionFunc) ([]*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("groups/%d/repository_storage_moves", group) - - req, err := g.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var gsms []*GroupRepositoryStorageMove - resp, err := g.client.Do(req, &gsms) - if err != nil { - return nil, resp, err - } - - return gsms, resp, err -} - -// GetStorageMove gets a single group repository storage move. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#get-a-single-group-repository-storage-move -func (g GroupRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("group_repository_storage_moves/%d", repositoryStorage) - - req, err := g.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gsm := new(GroupRepositoryStorageMove) - resp, err := g.client.Do(req, gsm) - if err != nil { - return nil, resp, err - } - - return gsm, resp, err -} - -// GetStorageMoveForGroup gets a single repository storage move for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-group -func (g GroupRepositoryStorageMoveService) GetStorageMoveForGroup(group int, repositoryStorage int, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("groups/%d/repository_storage_moves/%d", group, repositoryStorage) - - req, err := g.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gsm := new(GroupRepositoryStorageMove) - resp, err := g.client.Do(req, gsm) - if err != nil { - return nil, resp, err - } - - return gsm, resp, err -} - -// ScheduleStorageMoveForGroupOptions represents the available -// ScheduleStorageMoveForGroup() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-group -type ScheduleStorageMoveForGroupOptions struct { - DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` -} - -// ScheduleStorageMoveForGroup schedule a repository to be moved for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-group -func (g GroupRepositoryStorageMoveService) ScheduleStorageMoveForGroup(group int, opts ScheduleStorageMoveForGroupOptions, options ...RequestOptionFunc) (*GroupRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("groups/%d/repository_storage_moves", group) - - req, err := g.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - gsm := new(GroupRepositoryStorageMove) - resp, err := g.client.Do(req, gsm) - if err != nil { - return nil, resp, err - } - - return gsm, resp, err -} - -// ScheduleAllGroupStorageMovesOptions represents the available -// ScheduleAllStorageMoves() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard -type ScheduleAllGroupStorageMovesOptions struct { - SourceStorageName *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"` - DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` -} - -// ScheduleAllStorageMoves schedules all group repositories to be moved. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_repository_storage_moves.html#schedule-repository-storage-moves-for-all-groups-on-a-storage-shard -func (g GroupRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllGroupStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := g.client.NewRequest(http.MethodPost, "group_repository_storage_moves", opts, options) - if err != nil { - return nil, err - } - - return g.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go b/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go deleted file mode 100644 index 1360057a25..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_serviceaccounts.go +++ /dev/null @@ -1,181 +0,0 @@ -// -// Copyright 2023, James Hong -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// GroupServiceAccount represents a GitLab service account user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -type GroupServiceAccount struct { - ID int `json:"id"` - Name string `json:"name"` - UserName string `json:"username"` -} - -// ListServiceAccountsOptions represents the available ListServiceAccounts() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users -type ListServiceAccountsOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListServiceAccounts gets a list of service acxcounts. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#list-service-account-users -func (s *GroupsService) ListServiceAccounts(gid interface{}, opt *ListServiceAccountsOptions, options ...RequestOptionFunc) ([]*GroupServiceAccount, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var sa []*GroupServiceAccount - resp, err := s.client.Do(req, &sa) - if err != nil { - return nil, resp, err - } - - return sa, resp, nil -} - -// CreateServiceAccountOptions represents the available CreateServiceAccount() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-service-account-user -type CreateServiceAccountOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` -} - -// Creates a service account user. -// -// This API endpoint works on top-level groups only. It does not work on subgroups. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-service-account-user -func (s *GroupsService) CreateServiceAccount(gid interface{}, opt *CreateServiceAccountOptions, options ...RequestOptionFunc) (*GroupServiceAccount, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - sa := new(GroupServiceAccount) - resp, err := s.client.Do(req, sa) - if err != nil { - return nil, resp, err - } - - return sa, resp, nil -} - -// CreateServiceAccountPersonalAccessTokenOptions represents the available -// CreateServiceAccountPersonalAccessToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user -type CreateServiceAccountPersonalAccessTokenOptions struct { - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// CreateServiceAccountPersonalAccessToken add a new Personal Access Token for a -// service account user for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_service_accounts.html#create-a-personal-access-token-for-a-service-account-user -func (s *GroupsService) CreateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount int, opt *CreateServiceAccountPersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RotateServiceAccountPersonalAccessToken rotates a Personal Access Token for a -// service account user for a group. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#create-personal-access-token-for-service-account-user -func (s *GroupsService) RotateServiceAccountPersonalAccessToken(gid interface{}, serviceAccount, token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d/personal_access_tokens/%d/rotate", PathEscape(group), serviceAccount, token) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// DeleteServiceAccount Deletes a service account user. -// -// This API endpoint works on top-level groups only. It does not work on subgroups. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_service_accounts.html#delete-a-service-account-user -func (s *GroupsService) DeleteServiceAccount(gid interface{}, serviceAccount int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/service_accounts/%d", PathEscape(group), serviceAccount) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go b/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go deleted file mode 100644 index c29039039d..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_ssh_certificates.go +++ /dev/null @@ -1,105 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// GroupSSHCertificatesService handles communication with the group -// SSH certificate related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_ssh_certificates.html -type GroupSSHCertificatesService struct { - client *Client -} - -// GroupSSHCertificate represents a GitLab Group SSH certificate. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html -type GroupSSHCertificate struct { - ID int `json:"id"` - Title string `json:"title"` - Key string `json:"key"` - CreatedAt *time.Time `json:"created_at"` -} - -// ListGroupSSHCertificates gets a list of SSH certificates for a specified -// group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#get-all-ssh-certificates-for-a-particular-group -func (s *GroupSSHCertificatesService) ListGroupSSHCertificates(gid interface{}, options ...RequestOptionFunc) ([]*GroupSSHCertificate, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ssh_certificates", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var certs []*GroupSSHCertificate - resp, err := s.client.Do(req, &certs) - if err != nil { - return nil, resp, err - } - - return certs, resp, nil -} - -// CreateGroupSSHCertificateOptions represents the available -// CreateGroupSSHCertificate() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#create-ssh-certificate -type CreateGroupSSHCertificateOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` -} - -// CreateMemberRole creates a new member role for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#create-ssh-certificate -func (s *GroupSSHCertificatesService) CreateGroupSSHCertificate(gid interface{}, opt *CreateGroupSSHCertificateOptions, options ...RequestOptionFunc) (*GroupSSHCertificate, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ssh_certificates", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - cert := new(GroupSSHCertificate) - resp, err := s.client.Do(req, cert) - if err != nil { - return nil, resp, err - } - - return cert, resp, nil -} - -// DeleteGroupSSHCertificate deletes a SSH certificate from a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/group_ssh_certificates.html#delete-group-ssh-certificate -func (s *GroupSSHCertificatesService) DeleteGroupSSHCertificate(gid interface{}, cert int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/ssh_certificates/%d", PathEscape(group), cert) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_variables.go b/vendor/github.com/xanzy/go-gitlab/group_variables.go deleted file mode 100644 index 69fe44592d..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_variables.go +++ /dev/null @@ -1,218 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// GroupVariablesService handles communication with the -// group variables related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html -type GroupVariablesService struct { - client *Client -} - -// GroupVariable represents a GitLab group Variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html -type GroupVariable struct { - Key string `json:"key"` - Value string `json:"value"` - VariableType VariableTypeValue `json:"variable_type"` - Protected bool `json:"protected"` - Masked bool `json:"masked"` - Raw bool `json:"raw"` - EnvironmentScope string `json:"environment_scope"` - Description string `json:"description"` -} - -func (v GroupVariable) String() string { - return Stringify(v) -} - -// ListGroupVariablesOptions represents the available options for listing variables -// for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#list-group-variables -type ListGroupVariablesOptions ListOptions - -// ListVariables gets a list of all variables for a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#list-group-variables -func (s *GroupVariablesService) ListVariables(gid interface{}, opt *ListGroupVariablesOptions, options ...RequestOptionFunc) ([]*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var vs []*GroupVariable - resp, err := s.client.Do(req, &vs) - if err != nil { - return nil, resp, err - } - - return vs, resp, nil -} - -// GetGroupVariableOptions represents the available GetVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details -type GetGroupVariableOptions struct { - Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` -} - -// GetVariable gets a variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#show-variable-details -func (s *GroupVariablesService) GetVariable(gid interface{}, key string, opt *GetGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(GroupVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// CreateGroupVariableOptions represents the available CreateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#create-variable -type CreateGroupVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` - Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// CreateVariable creates a new group variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#create-variable -func (s *GroupVariablesService) CreateVariable(gid interface{}, opt *CreateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(GroupVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// UpdateGroupVariableOptions represents the available UpdateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#update-variable -type UpdateGroupVariableOptions struct { - Value *string `url:"value,omitempty" json:"value,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` - Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// UpdateVariable updates the position of an existing -// group issue board list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#update-variable -func (s *GroupVariablesService) UpdateVariable(gid interface{}, key string, opt *UpdateGroupVariableOptions, options ...RequestOptionFunc) (*GroupVariable, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(GroupVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// RemoveVariable removes a group's variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_level_variables.html#remove-variable -func (s *GroupVariablesService) RemoveVariable(gid interface{}, key string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/variables/%s", PathEscape(group), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/group_wikis.go b/vendor/github.com/xanzy/go-gitlab/group_wikis.go deleted file mode 100644 index 4693965fe5..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/group_wikis.go +++ /dev/null @@ -1,204 +0,0 @@ -// -// Copyright 2021, Markus Lackner -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// GroupWikisService handles communication with the group wikis related methods of -// the Gitlab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_wikis.html -type GroupWikisService struct { - client *Client -} - -// GroupWiki represents a GitLab groups wiki. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/group_wikis.html -type GroupWiki struct { - Content string `json:"content"` - Encoding string `json:"encoding"` - Format WikiFormatValue `json:"format"` - Slug string `json:"slug"` - Title string `json:"title"` -} - -func (w GroupWiki) String() string { - return Stringify(w) -} - -// ListGroupWikisOptions represents the available ListGroupWikis options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#list-wiki-pages -type ListGroupWikisOptions struct { - WithContent *bool `url:"with_content,omitempty" json:"with_content,omitempty"` -} - -// ListGroupWikis lists all pages of the wiki of the given group id. -// When with_content is set, it also returns the content of the pages. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#list-wiki-pages -func (s *GroupWikisService) ListGroupWikis(gid interface{}, opt *ListGroupWikisOptions, options ...RequestOptionFunc) ([]*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gws []*GroupWiki - resp, err := s.client.Do(req, &gws) - if err != nil { - return nil, resp, err - } - - return gws, resp, nil -} - -// GetGroupWikiPageOptions represents options to GetGroupWikiPage -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#get-a-wiki-page -type GetGroupWikiPageOptions struct { - RenderHTML *bool `url:"render_html,omitempty" json:"render_html,omitempty"` - Version *string `url:"version,omitempty" json:"version,omitempty"` -} - -// GetGroupWikiPage gets a wiki page for a given group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#get-a-wiki-page -func (s *GroupWikisService) GetGroupWikiPage(gid interface{}, slug string, opt *GetGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - gw := new(GroupWiki) - resp, err := s.client.Do(req, gw) - if err != nil { - return nil, resp, err - } - - return gw, resp, nil -} - -// CreateGroupWikiPageOptions represents options to CreateGroupWikiPage. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#create-a-new-wiki-page -type CreateGroupWikiPageOptions struct { - Content *string `url:"content,omitempty" json:"content,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` -} - -// CreateGroupWikiPage creates a new wiki page for the given group with -// the given title, slug, and content. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#create-a-new-wiki-page -func (s *GroupWikisService) CreateGroupWikiPage(gid interface{}, opt *CreateGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(GroupWiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil -} - -// EditGroupWikiPageOptions represents options to EditGroupWikiPage. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#edit-an-existing-wiki-page -type EditGroupWikiPageOptions struct { - Content *string `url:"content,omitempty" json:"content,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` -} - -// EditGroupWikiPage Updates an existing wiki page. At least one parameter is -// required to update the wiki page. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#edit-an-existing-wiki-page -func (s *GroupWikisService) EditGroupWikiPage(gid interface{}, slug string, opt *EditGroupWikiPageOptions, options ...RequestOptionFunc) (*GroupWiki, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(GroupWiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil -} - -// DeleteGroupWikiPage deletes a wiki page with a given slug. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/group_wikis.html#delete-a-wiki-page -func (s *GroupWikisService) DeleteGroupWikiPage(gid interface{}, slug string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/wikis/%s", PathEscape(group), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go deleted file mode 100644 index 34f0cab662..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/groups.go +++ /dev/null @@ -1,1177 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - retryablehttp "github.com/hashicorp/go-retryablehttp" -) - -// GroupsService handles communication with the group related methods of -// the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html -type GroupsService struct { - client *Client -} - -// Group represents a GitLab group. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html -type Group struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Description string `json:"description"` - MembershipLock bool `json:"membership_lock"` - Visibility VisibilityValue `json:"visibility"` - LFSEnabled bool `json:"lfs_enabled"` - DefaultBranchProtectionDefaults struct { - AllowedToPush []*GroupAccessLevel `json:"allowed_to_push"` - AllowForcePush bool `json:"allow_force_push"` - AllowedToMerge []*GroupAccessLevel `json:"allowed_to_merge"` - DeveloperCanInitialPush bool `json:"developer_can_initial_push"` - } `json:"default_branch_protection_defaults"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - RequestAccessEnabled bool `json:"request_access_enabled"` - RepositoryStorage string `json:"repository_storage"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` - FileTemplateProjectID int `json:"file_template_project_id"` - ParentID int `json:"parent_id"` - Projects []*Project `json:"projects"` - Statistics *Statistics `json:"statistics"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ShareWithGroupLock bool `json:"share_with_group_lock"` - RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - ProjectCreationLevel ProjectCreationLevelValue `json:"project_creation_level"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - SubGroupCreationLevel SubGroupCreationLevelValue `json:"subgroup_creation_level"` - EmailsEnabled bool `json:"emails_enabled"` - MentionsDisabled bool `json:"mentions_disabled"` - RunnersToken string `json:"runners_token"` - SharedProjects []*Project `json:"shared_projects"` - SharedRunnersSetting SharedRunnersSettingValue `json:"shared_runners_setting"` - SharedWithGroups []struct { - GroupID int `json:"group_id"` - GroupName string `json:"group_name"` - GroupFullPath string `json:"group_full_path"` - GroupAccessLevel int `json:"group_access_level"` - ExpiresAt *ISOTime `json:"expires_at"` - } `json:"shared_with_groups"` - LDAPCN string `json:"ldap_cn"` - LDAPAccess AccessLevelValue `json:"ldap_access"` - LDAPGroupLinks []*LDAPGroupLink `json:"ldap_group_links"` - SAMLGroupLinks []*SAMLGroupLink `json:"saml_group_links"` - SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` - ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` - PreventForkingOutsideGroup bool `json:"prevent_forking_outside_group"` - MarkedForDeletionOn *ISOTime `json:"marked_for_deletion_on"` - CreatedAt *time.Time `json:"created_at"` - IPRestrictionRanges string `json:"ip_restriction_ranges"` - WikiAccessLevel AccessControlValue `json:"wiki_access_level"` - - // Deprecated: Use EmailsEnabled instead - EmailsDisabled bool `json:"emails_disabled"` - - // Deprecated: Use DefaultBranchProtectionDefaults instead - DefaultBranchProtection int `json:"default_branch_protection"` -} - -// GroupAccessLevel represents default branch protection defaults access levels. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults -type GroupAccessLevel struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` -} - -// GroupAvatar represents a GitLab group avatar. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html -type GroupAvatar struct { - Filename string - Image io.Reader -} - -// MarshalJSON implements the json.Marshaler interface. -func (a *GroupAvatar) MarshalJSON() ([]byte, error) { - if a.Filename == "" && a.Image == nil { - return []byte(`""`), nil - } - type alias GroupAvatar - return json.Marshal((*alias)(a)) -} - -// LDAPGroupLink represents a GitLab LDAP group link. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#ldap-group-links -type LDAPGroupLink struct { - CN string `json:"cn"` - Filter string `json:"filter"` - GroupAccess AccessLevelValue `json:"group_access"` - Provider string `json:"provider"` -} - -// SAMLGroupLink represents a GitLab SAML group link. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#saml-group-links -type SAMLGroupLink struct { - Name string `json:"name"` - AccessLevel AccessLevelValue `json:"access_level"` - MemberRoleID int `json:"member_role_id,omitempty"` -} - -// ListGroupsOptions represents the available ListGroups() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#list-groups -type ListGroupsOptions struct { - ListOptions - SkipGroups *[]int `url:"skip_groups,omitempty" del:"," json:"skip_groups,omitempty"` - AllAvailable *bool `url:"all_available,omitempty" json:"all_available,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` - Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` - MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` - TopLevelOnly *bool `url:"top_level_only,omitempty" json:"top_level_only,omitempty"` - RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` -} - -// ListGroups gets a list of groups (as user: my groups, as admin: all groups). -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-groups -func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "groups", opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// ListSubGroupsOptions represents the available ListSubGroups() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-subgroups -type ListSubGroupsOptions ListGroupsOptions - -// ListSubGroups gets a list of subgroups for a given group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-subgroups -func (s *GroupsService) ListSubGroups(gid interface{}, opt *ListSubGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/subgroups", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// ListDescendantGroupsOptions represents the available ListDescendantGroups() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-descendant-groups -type ListDescendantGroupsOptions ListGroupsOptions - -// ListDescendantGroups gets a list of subgroups for a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-descendant-groups -func (s *GroupsService) ListDescendantGroups(gid interface{}, opt *ListDescendantGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/descendant_groups", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// ListGroupProjectsOptions represents the available ListGroup() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-projects -type ListGroupProjectsOptions struct { - ListOptions - Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` - IncludeSubGroups *bool `url:"include_subgroups,omitempty" json:"include_subgroups,omitempty"` - MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - Simple *bool `url:"simple,omitempty" json:"simple,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Starred *bool `url:"starred,omitempty" json:"starred,omitempty"` - Topic *string `url:"topic,omitempty" json:"topic,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` - WithIssuesEnabled *bool `url:"with_issues_enabled,omitempty" json:"with_issues_enabled,omitempty"` - WithMergeRequestsEnabled *bool `url:"with_merge_requests_enabled,omitempty" json:"with_merge_requests_enabled,omitempty"` - WithSecurityReports *bool `url:"with_security_reports,omitempty" json:"with_security_reports,omitempty"` - WithShared *bool `url:"with_shared,omitempty" json:"with_shared,omitempty"` -} - -// ListGroupProjects get a list of group projects -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-a-groups-projects -func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/projects", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Project - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// GetGroupOptions represents the available GetGroup() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#details-of-a-group -type GetGroupOptions struct { - ListOptions - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` - WithProjects *bool `url:"with_projects,omitempty" json:"with_projects,omitempty"` -} - -// GetGroup gets all details of a group. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#details-of-a-group -func (s *GroupsService) GetGroup(gid interface{}, opt *GetGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// DownloadAvatar downloads a group avatar. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#download-a-group-avatar -func (s *GroupsService) DownloadAvatar(gid interface{}, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/avatar", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - avatar := new(bytes.Buffer) - resp, err := s.client.Do(req, avatar) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(avatar.Bytes()), resp, err -} - -// CreateGroupOptions represents the available CreateGroup() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#new-group -type CreateGroupOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Avatar *GroupAvatar `url:"-" json:"-"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` - RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` - IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` - - // Deprecated: Use EmailsEnabled instead - EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` - - // Deprecated: User DefaultBranchProtectionDefaults instead - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` -} - -// DefaultBranchProtectionDefaultsOptions represents the available options for -// using default_branch_protection_defaults in CreateGroup() or UpdateGroup() -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults -type DefaultBranchProtectionDefaultsOptions struct { - AllowedToPush *[]*GroupAccessLevel `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` - AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToMerge *[]*GroupAccessLevel `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` -} - -// CreateGroup creates a new project group. Available only for users who can -// create groups. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#new-group -func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "groups", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "groups", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// TransferGroup transfers a project to the Group namespace. Available only -// for admin. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#transfer-project-to-group -func (s *GroupsService) TransferGroup(gid interface{}, pid interface{}, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/projects/%s", PathEscape(group), PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// TransferSubGroupOptions represents the available TransferSubGroup() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#transfer-a-group-to-a-new-parent-group--turn-a-subgroup-to-a-top-level-group -type TransferSubGroupOptions struct { - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` -} - -// TransferSubGroup transfers a group to a new parent group or turn a subgroup -// to a top-level group. Available to administrators and users. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#transfer-a-group-to-a-new-parent-group--turn-a-subgroup-to-a-top-level-group -func (s *GroupsService) TransferSubGroup(gid interface{}, opt *TransferSubGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/transfer", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// UpdateGroupOptions represents the available UpdateGroup() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group -type UpdateGroupOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Avatar *GroupAvatar `url:"-" json:"avatar,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - MembershipLock *bool `url:"membership_lock,omitempty" json:"membership_lock,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - ShareWithGroupLock *bool `url:"share_with_group_lock,omitempty" json:"share_with_group_lock,omitempty"` - RequireTwoFactorAuth *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - ProjectCreationLevel *ProjectCreationLevelValue `url:"project_creation_level,omitempty" json:"project_creation_level,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - SubGroupCreationLevel *SubGroupCreationLevelValue `url:"subgroup_creation_level,omitempty" json:"subgroup_creation_level,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - MentionsDisabled *bool `url:"mentions_disabled,omitempty" json:"mentions_disabled,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - DefaultBranchProtectionDefaults *DefaultBranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - SharedRunnersMinutesLimit *int `url:"shared_runners_minutes_limit,omitempty" json:"shared_runners_minutes_limit,omitempty"` - ExtraSharedRunnersMinutesLimit *int `url:"extra_shared_runners_minutes_limit,omitempty" json:"extra_shared_runners_minutes_limit,omitempty"` - PreventForkingOutsideGroup *bool `url:"prevent_forking_outside_group,omitempty" json:"prevent_forking_outside_group,omitempty"` - SharedRunnersSetting *SharedRunnersSettingValue `url:"shared_runners_setting,omitempty" json:"shared_runners_setting,omitempty"` - PreventSharingGroupsOutsideHierarchy *bool `url:"prevent_sharing_groups_outside_hierarchy,omitempty" json:"prevent_sharing_groups_outside_hierarchy,omitempty"` - IPRestrictionRanges *string `url:"ip_restriction_ranges,omitempty" json:"ip_restriction_ranges,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` - - // Deprecated: Use EmailsEnabled instead - EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` - - // Deprecated: Use DefaultBranchProtectionDefaults instead - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` -} - -// UpdateGroup updates an existing group; only available to group owners and -// administrators. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group -func (s *GroupsService) UpdateGroup(gid interface{}, opt *UpdateGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - var req *retryablehttp.Request - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// UploadAvatar uploads a group avatar. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#upload-a-group-avatar -func (s *GroupsService) UploadAvatar(gid interface{}, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - req, err := s.client.UploadRequest( - http.MethodPut, - u, - avatar, - filename, - UploadAvatar, - nil, - options, - ) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// DeleteGroupOptions represents the available DeleteGroup() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group -type DeleteGroupOptions struct { - PermanentlyRemove *bool `url:"permanently_remove,omitempty" json:"permanently_remove,omitempty"` - FullPath *string `url:"full_path,omitempty" json:"full_path,omitempty"` -} - -// DeleteGroup removes group with all projects inside. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#remove-group -func (s *GroupsService) DeleteGroup(gid interface{}, opt *DeleteGroupOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// RestoreGroup restores a previously deleted group -// -// GitLap API docs: -// https://docs.gitlab.com/ee/api/groups.html#restore-group-marked-for-deletion -func (s *GroupsService) RestoreGroup(gid interface{}, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/restore", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// SearchGroup get all groups that match your string in their name or path. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#search-for-group -func (s *GroupsService) SearchGroup(query string, options ...RequestOptionFunc) ([]*Group, *Response, error) { - var q struct { - Search string `url:"search,omitempty" json:"search,omitempty"` - } - q.Search = query - - req, err := s.client.NewRequest(http.MethodGet, "groups", &q, options) - if err != nil { - return nil, nil, err - } - - var gs []*Group - resp, err := s.client.Do(req, &gs) - if err != nil { - return nil, resp, err - } - - return gs, resp, nil -} - -// ListProvisionedUsersOptions represents the available ListProvisionedUsers() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-provisioned-users -type ListProvisionedUsersOptions struct { - ListOptions - Username *string `url:"username,omitempty" json:"username,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - Blocked *bool `url:"blocked,omitempty" json:"blocked,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` -} - -// ListProvisionedUsers gets a list of users provisioned by the given group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-provisioned-users -func (s *GroupsService) ListProvisionedUsers(gid interface{}, opt *ListProvisionedUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/provisioned_users", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var us []*User - resp, err := s.client.Do(req, &us) - if err != nil { - return nil, resp, err - } - - return us, resp, nil -} - -// ListGroupLDAPLinks lists the group's LDAP links. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-ldap-group-links -func (s *GroupsService) ListGroupLDAPLinks(gid interface{}, options ...RequestOptionFunc) ([]*LDAPGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var gls []*LDAPGroupLink - resp, err := s.client.Do(req, &gls) - if err != nil { - return nil, resp, err - } - - return gls, resp, nil -} - -// AddGroupLDAPLinkOptions represents the available AddGroupLDAPLink() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#add-ldap-group-link-with-cn-or-filter -type AddGroupLDAPLinkOptions struct { - CN *string `url:"cn,omitempty" json:"cn,omitempty"` - Filter *string `url:"filter,omitempty" json:"filter,omitempty"` - GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` -} - -// DeleteGroupLDAPLinkWithCNOrFilterOptions represents the available DeleteGroupLDAPLinkWithCNOrFilter() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link-with-cn-or-filter -type DeleteGroupLDAPLinkWithCNOrFilterOptions struct { - CN *string `url:"cn,omitempty" json:"cn,omitempty"` - Filter *string `url:"filter,omitempty" json:"filter,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` -} - -// AddGroupLDAPLink creates a new group LDAP link. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#add-ldap-group-link-with-cn-or-filter -func (s *GroupsService) AddGroupLDAPLink(gid interface{}, opt *AddGroupLDAPLinkOptions, options ...RequestOptionFunc) (*LDAPGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gl := new(LDAPGroupLink) - resp, err := s.client.Do(req, gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil -} - -// DeleteGroupLDAPLink deletes a group LDAP link. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link -func (s *GroupsService) DeleteGroupLDAPLink(gid interface{}, cn string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links/%s", PathEscape(group), PathEscape(cn)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteGroupLDAPLinkWithCNOrFilter deletes a group LDAP link. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link-with-cn-or-filter -func (s *GroupsService) DeleteGroupLDAPLinkWithCNOrFilter(gid interface{}, opts *DeleteGroupLDAPLinkWithCNOrFilterOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/ldap_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteGroupLDAPLinkForProvider deletes a group LDAP link from a specific -// provider. Available only for users who can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-ldap-group-link -func (s *GroupsService) DeleteGroupLDAPLinkForProvider(gid interface{}, provider, cn string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf( - "groups/%s/ldap_group_links/%s/%s", - PathEscape(group), - PathEscape(provider), - PathEscape(cn), - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListGroupSAMLLinks lists the group's SAML links. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#list-saml-group-links -func (s *GroupsService) ListGroupSAMLLinks(gid interface{}, options ...RequestOptionFunc) ([]*SAMLGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var gl []*SAMLGroupLink - resp, err := s.client.Do(req, &gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil -} - -// GetGroupSAMLLink get a specific group SAML link. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#get-saml-group-link -func (s *GroupsService) GetGroupSAMLLink(gid interface{}, samlGroupName string, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gl := new(SAMLGroupLink) - resp, err := s.client.Do(req, &gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil -} - -// AddGroupSAMLLinkOptions represents the available AddGroupSAMLLink() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#add-saml-group-link -type AddGroupSAMLLinkOptions struct { - SAMLGroupName *string `url:"saml_group_name,omitempty" json:"saml_group_name,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` -} - -// AddGroupSAMLLink creates a new group SAML link. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#add-saml-group-link -func (s *GroupsService) AddGroupSAMLLink(gid interface{}, opt *AddGroupSAMLLinkOptions, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gl := new(SAMLGroupLink) - resp, err := s.client.Do(req, &gl) - if err != nil { - return nil, resp, err - } - - return gl, resp, nil -} - -// DeleteGroupSAMLLink deletes a group SAML link. Available only for users who -// can edit groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-saml-group-link -func (s *GroupsService) DeleteGroupSAMLLink(gid interface{}, samlGroupName string, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ShareGroupWithGroupOptions represents the available ShareGroupWithGroup() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#share-groups-with-groups -type ShareGroupWithGroupOptions struct { - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupAccess *AccessLevelValue `url:"group_access,omitempty" json:"group_access,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// ShareGroupWithGroup shares a group with another group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#create-a-link-to-share-a-group-with-another-group -func (s *GroupsService) ShareGroupWithGroup(gid interface{}, opt *ShareGroupWithGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/share", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - g := new(Group) - resp, err := s.client.Do(req, g) - if err != nil { - return nil, resp, err - } - - return g, resp, nil -} - -// UnshareGroupFromGroup unshares a group from another group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-link-sharing-group-with-another-group -func (s *GroupsService) UnshareGroupFromGroup(gid interface{}, groupID int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/share/%d", PathEscape(group), groupID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GroupPushRules represents a group push rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#get-group-push-rules -type GroupPushRules struct { - ID int `json:"id"` - CreatedAt *time.Time `json:"created_at"` - CommitMessageRegex string `json:"commit_message_regex"` - CommitMessageNegativeRegex string `json:"commit_message_negative_regex"` - BranchNameRegex string `json:"branch_name_regex"` - DenyDeleteTag bool `json:"deny_delete_tag"` - MemberCheck bool `json:"member_check"` - PreventSecrets bool `json:"prevent_secrets"` - AuthorEmailRegex string `json:"author_email_regex"` - FileNameRegex string `json:"file_name_regex"` - MaxFileSize int `json:"max_file_size"` - CommitCommitterCheck bool `json:"commit_committer_check"` - CommitCommitterNameCheck bool `json:"commit_committer_name_check"` - RejectUnsignedCommits bool `json:"reject_unsigned_commits"` - RejectNonDCOCommits bool `json:"reject_non_dco_commits"` -} - -// GetGroupPushRules gets the push rules of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#get-group-push-rules -func (s *GroupsService) GetGroupPushRules(gid interface{}, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - gpr := new(GroupPushRules) - resp, err := s.client.Do(req, gpr) - if err != nil { - return nil, resp, err - } - - return gpr, resp, nil -} - -// AddGroupPushRuleOptions represents the available AddGroupPushRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#add-group-push-rule -type AddGroupPushRuleOptions struct { - AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` - BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` - CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` - CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` - CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` - CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` - DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` - FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` - MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` - PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` - RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` - RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` -} - -// AddGroupPushRule adds push rules to the specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#add-group-push-rule -func (s *GroupsService) AddGroupPushRule(gid interface{}, opt *AddGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - gpr := new(GroupPushRules) - resp, err := s.client.Do(req, gpr) - if err != nil { - return nil, resp, err - } - - return gpr, resp, nil -} - -// EditGroupPushRuleOptions represents the available EditGroupPushRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#edit-group-push-rule -type EditGroupPushRuleOptions struct { - AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` - BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` - CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` - CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` - CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` - CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` - DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` - FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` - MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` - PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` - RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` - RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` -} - -// EditGroupPushRule edits a push rule for a specified group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#edit-group-push-rule -func (s *GroupsService) EditGroupPushRule(gid interface{}, opt *EditGroupPushRuleOptions, options ...RequestOptionFunc) (*GroupPushRules, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - gpr := new(GroupPushRules) - resp, err := s.client.Do(req, gpr) - if err != nil { - return nil, resp, err - } - - return gpr, resp, nil -} - -// DeleteGroupPushRule deletes the push rules of a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#delete-group-push-rule -func (s *GroupsService) DeleteGroupPushRule(gid interface{}, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/push_rule", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/import.go b/vendor/github.com/xanzy/go-gitlab/import.go deleted file mode 100644 index a8164a70c6..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/import.go +++ /dev/null @@ -1,266 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "net/http" -) - -// ImportService handles communication with the import -// related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html -type ImportService struct { - client *Client -} - -// GitHubImport represents the response from an import from GitHub. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github -type GitHubImport struct { - ID int `json:"id"` - Name string `json:"name"` - FullPath string `json:"full_path"` - FullName string `json:"full_name"` - RefsUrl string `json:"refs_url"` - ImportSource string `json:"import_source"` - ImportStatus string `json:"import_status"` - HumanImportStatusName string `json:"human_import_status_name"` - ProviderLink string `json:"provider_link"` - RelationType string `json:"relation_type"` - ImportWarning string `json:"import_warning"` -} - -func (s GitHubImport) String() string { - return Stringify(s) -} - -// ImportRepositoryFromGitHubOptions represents the available -// ImportRepositoryFromGitHub() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github -type ImportRepositoryFromGitHubOptions struct { - PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` - RepoID *int `url:"repo_id,omitempty" json:"repo_id,omitempty"` - NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` - TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` - GitHubHostname *string `url:"github_hostname,omitempty" json:"github_hostname,omitempty"` - OptionalStages struct { - SingleEndpointNotesImport *bool `url:"single_endpoint_notes_import,omitempty" json:"single_endpoint_notes_import,omitempty"` - AttachmentsImport *bool `url:"attachments_import,omitempty" json:"attachments_import,omitempty"` - CollaboratorsImport *bool `url:"collaborators_import,omitempty" json:"collaborators_import,omitempty"` - } `url:"optional_stages,omitempty" json:"optional_stages,omitempty"` - TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` -} - -// Import a repository from GitHub. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-github -func (s *ImportService) ImportRepositoryFromGitHub(opt *ImportRepositoryFromGitHubOptions, options ...RequestOptionFunc) (*GitHubImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/github", opt, options) - if err != nil { - return nil, nil, err - } - - gi := new(GitHubImport) - resp, err := s.client.Do(req, gi) - if err != nil { - return nil, resp, err - } - - return gi, resp, nil -} - -// CancelledGitHubImport represents the response when canceling -// an import from GitHub. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import -type CancelledGitHubImport struct { - ID int `json:"id"` - Name string `json:"name"` - FullPath string `json:"full_path"` - FullName string `json:"full_name"` - ImportSource string `json:"import_source"` - ImportStatus string `json:"import_status"` - HumanImportStatusName string `json:"human_import_status_name"` - ProviderLink string `json:"provider_link"` -} - -func (s CancelledGitHubImport) String() string { - return Stringify(s) -} - -// CancelGitHubProjectImportOptions represents the available -// CancelGitHubProjectImport() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import -type CancelGitHubProjectImportOptions struct { - ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` -} - -// Cancel an import of a repository from GitHub. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#cancel-github-project-import -func (s *ImportService) CancelGitHubProjectImport(opt *CancelGitHubProjectImportOptions, options ...RequestOptionFunc) (*CancelledGitHubImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/github/cancel", opt, options) - if err != nil { - return nil, nil, err - } - - cgi := new(CancelledGitHubImport) - resp, err := s.client.Do(req, cgi) - if err != nil { - return nil, resp, err - } - - return cgi, resp, nil -} - -// ImportGitHubGistsIntoGitLabSnippetsOptions represents the available -// ImportGitHubGistsIntoGitLabSnippets() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets -type ImportGitHubGistsIntoGitLabSnippetsOptions struct { - PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` -} - -// Import personal GitHub Gists into personal GitLab Snippets. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-github-gists-into-gitlab-snippets -func (s *ImportService) ImportGitHubGistsIntoGitLabSnippets(opt *ImportGitHubGistsIntoGitLabSnippetsOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/github/gists", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// BitbucketServerImport represents the response from an import from Bitbucket -// Server. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server -type BitbucketServerImport struct { - ID int `json:"id"` - Name string `json:"name"` - FullPath string `json:"full_path"` - FullName string `json:"full_name"` - RefsUrl string `json:"refs_url"` -} - -func (s BitbucketServerImport) String() string { - return Stringify(s) -} - -// ImportRepositoryFromBitbucketServerOptions represents the available ImportRepositoryFromBitbucketServer() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server -type ImportRepositoryFromBitbucketServerOptions struct { - BitbucketServerUrl *string `url:"bitbucket_server_url,omitempty" json:"bitbucket_server_url,omitempty"` - BitbucketServerUsername *string `url:"bitbucket_server_username,omitempty" json:"bitbucket_server_username,omitempty"` - PersonalAccessToken *string `url:"personal_access_token,omitempty" json:"personal_access_token,omitempty"` - BitbucketServerProject *string `url:"bitbucket_server_project,omitempty" json:"bitbucket_server_project,omitempty"` - BitbucketServerRepo *string `url:"bitbucket_server_repo,omitempty" json:"bitbucket_server_repo,omitempty"` - NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` - NewNamespace *string `url:"new_namespace,omitempty" json:"new_namespace,omitempty"` - TimeoutStrategy *string `url:"timeout_strategy,omitempty" json:"timeout_strategy,omitempty"` -} - -// Import a repository from Bitbucket Server. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-server -func (s *ImportService) ImportRepositoryFromBitbucketServer(opt *ImportRepositoryFromBitbucketServerOptions, options ...RequestOptionFunc) (*BitbucketServerImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket_server", opt, options) - if err != nil { - return nil, nil, err - } - - bsi := new(BitbucketServerImport) - resp, err := s.client.Do(req, bsi) - if err != nil { - return nil, resp, err - } - - return bsi, resp, nil -} - -// BitbucketCloudImport represents the response from an import from Bitbucket -// Cloud. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud -type BitbucketCloudImport struct { - ID int `json:"id"` - Name string `json:"name"` - FullPath string `json:"full_path"` - FullName string `json:"full_name"` - RefsUrl string `json:"refs_url"` - ImportSource string `json:"import_source"` - ImportStatus string `json:"import_status"` - HumanImportStatusName string `json:"human_import_status_name"` - ProviderLink string `json:"provider_link"` - RelationType string `json:"relation_type"` - ImportWarning string `json:"import_warning"` -} - -func (s BitbucketCloudImport) String() string { - return Stringify(s) -} - -// ImportRepositoryFromBitbucketCloudOptions represents the available -// ImportRepositoryFromBitbucketCloud() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud -type ImportRepositoryFromBitbucketCloudOptions struct { - BitbucketUsername *string `url:"bitbucket_username,omitempty" json:"bitbucket_username,omitempty"` - BitbucketAppPassword *string `url:"bitbucket_app_password,omitempty" json:"bitbucket_app_password,omitempty"` - RepoPath *string `url:"repo_path,omitempty" json:"repo_path,omitempty"` - TargetNamespace *string `url:"target_namespace,omitempty" json:"target_namespace,omitempty"` - NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` -} - -// Import a repository from Bitbucket Cloud. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/import.html#import-repository-from-bitbucket-cloud -func (s *ImportService) ImportRepositoryFromBitbucketCloud(opt *ImportRepositoryFromBitbucketCloudOptions, options ...RequestOptionFunc) (*BitbucketCloudImport, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "import/bitbucket", opt, options) - if err != nil { - return nil, nil, err - } - - bci := new(BitbucketCloudImport) - resp, err := s.client.Do(req, bci) - if err != nil { - return nil, resp, err - } - - return bci, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/instance_clusters.go b/vendor/github.com/xanzy/go-gitlab/instance_clusters.go deleted file mode 100644 index 0760d11579..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/instance_clusters.go +++ /dev/null @@ -1,153 +0,0 @@ -// -// Copyright 2021, Serena Fang -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// InstanceClustersService handles communication with the -// instance clusters related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_clusters.html -type InstanceClustersService struct { - client *Client -} - -// InstanceCluster represents a GitLab Instance Cluster. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/instance_clusters.html -type InstanceCluster struct { - ID int `json:"id"` - Name string `json:"name"` - Domain string `json:"domain"` - Managed bool `json:"managed"` - CreatedAt *time.Time `json:"created_at"` - ProviderType string `json:"provider_type"` - PlatformType string `json:"platform_type"` - EnvironmentScope string `json:"environment_scope"` - ClusterType string `json:"cluster_type"` - User *User `json:"user"` - PlatformKubernetes *PlatformKubernetes `json:"platform_kubernetes"` - ManagementProject *ManagementProject `json:"management_project"` -} - -func (v InstanceCluster) String() string { - return Stringify(v) -} - -// ListClusters gets a list of all instance clusters. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_clusters.html#list-instance-clusters -func (s *InstanceClustersService) ListClusters(options ...RequestOptionFunc) ([]*InstanceCluster, *Response, error) { - u := "admin/clusters" - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var ics []*InstanceCluster - resp, err := s.client.Do(req, &ics) - if err != nil { - return nil, resp, err - } - - return ics, resp, nil -} - -// GetCluster gets an instance cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_clusters.html#get-a-single-instance-cluster -func (s *InstanceClustersService) GetCluster(cluster int, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { - u := fmt.Sprintf("admin/clusters/%d", cluster) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ic := new(InstanceCluster) - resp, err := s.client.Do(req, &ic) - if err != nil { - return nil, resp, err - } - - return ic, resp, nil -} - -// AddCluster adds an existing cluster to the instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_clusters.html#add-existing-instance-cluster -func (s *InstanceClustersService) AddCluster(opt *AddClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { - u := "admin/clusters/add" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ic := new(InstanceCluster) - resp, err := s.client.Do(req, ic) - if err != nil { - return nil, resp, err - } - - return ic, resp, nil -} - -// EditCluster updates an existing instance cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_clusters.html#edit-instance-cluster -func (s *InstanceClustersService) EditCluster(cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*InstanceCluster, *Response, error) { - u := fmt.Sprintf("admin/clusters/%d", cluster) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ic := new(InstanceCluster) - resp, err := s.client.Do(req, ic) - if err != nil { - return nil, resp, err - } - - return ic, resp, nil -} - -// DeleteCluster deletes an existing instance cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_clusters.html#delete-instance-cluster -func (s *InstanceClustersService) DeleteCluster(cluster int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("admin/clusters/%d", cluster) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/instance_variables.go b/vendor/github.com/xanzy/go-gitlab/instance_variables.go deleted file mode 100644 index 58eef2b272..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/instance_variables.go +++ /dev/null @@ -1,186 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// InstanceVariablesService handles communication with the -// instance level CI variables related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html -type InstanceVariablesService struct { - client *Client -} - -// InstanceVariable represents a GitLab instance level CI Variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html -type InstanceVariable struct { - Key string `json:"key"` - Value string `json:"value"` - VariableType VariableTypeValue `json:"variable_type"` - Protected bool `json:"protected"` - Masked bool `json:"masked"` - Raw bool `json:"raw"` - Description string `json:"description"` -} - -func (v InstanceVariable) String() string { - return Stringify(v) -} - -// ListInstanceVariablesOptions represents the available options for listing variables -// for an instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#list-all-instance-variables -type ListInstanceVariablesOptions ListOptions - -// ListVariables gets a list of all variables for an instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#list-all-instance-variables -func (s *InstanceVariablesService) ListVariables(opt *ListInstanceVariablesOptions, options ...RequestOptionFunc) ([]*InstanceVariable, *Response, error) { - u := "admin/ci/variables" - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var vs []*InstanceVariable - resp, err := s.client.Do(req, &vs) - if err != nil { - return nil, resp, err - } - - return vs, resp, nil -} - -// GetVariable gets a variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#show-instance-variable-details -func (s *InstanceVariablesService) GetVariable(key string, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { - u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - v := new(InstanceVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// CreateInstanceVariableOptions represents the available CreateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#create-instance-variable -type CreateInstanceVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` - Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// CreateVariable creates a new instance level CI variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#create-instance-variable -func (s *InstanceVariablesService) CreateVariable(opt *CreateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { - u := "admin/ci/variables" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(InstanceVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// UpdateInstanceVariableOptions represents the available UpdateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#update-instance-variable -type UpdateInstanceVariableOptions struct { - Value *string `url:"value,omitempty" json:"value,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` - Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// UpdateVariable updates the position of an existing -// instance level CI variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#update-instance-variable -func (s *InstanceVariablesService) UpdateVariable(key string, opt *UpdateInstanceVariableOptions, options ...RequestOptionFunc) (*InstanceVariable, *Response, error) { - u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(InstanceVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// RemoveVariable removes an instance level CI variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/instance_level_ci_variables.html#remove-instance-variable -func (s *InstanceVariablesService) RemoveVariable(key string, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("admin/ci/variables/%s", url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/invites.go b/vendor/github.com/xanzy/go-gitlab/invites.go deleted file mode 100644 index 62bad26d22..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/invites.go +++ /dev/null @@ -1,176 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// InvitesService handles communication with the invitation related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/invitations.html -type InvitesService struct { - client *Client -} - -// PendingInvite represents a pending invite. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/invitations.html -type PendingInvite struct { - ID int `json:"id"` - InviteEmail string `json:"invite_email"` - CreatedAt *time.Time `json:"created_at"` - AccessLevel AccessLevelValue `json:"access_level"` - ExpiresAt *time.Time `json:"expires_at"` - UserName string `json:"user_name"` - CreatedByName string `json:"created_by_name"` -} - -// ListPendingInvitationsOptions represents the available -// ListPendingInvitations() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#list-all-invitations-pending-for-a-group-or-project -type ListPendingInvitationsOptions struct { - ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` -} - -// ListPendingGroupInvitations gets a list of invited group members. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#list-all-invitations-pending-for-a-group-or-project -func (s *InvitesService) ListPendingGroupInvitations(gid interface{}, opt *ListPendingInvitationsOptions, options ...RequestOptionFunc) ([]*PendingInvite, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/invitations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pis []*PendingInvite - resp, err := s.client.Do(req, &pis) - if err != nil { - return nil, resp, err - } - - return pis, resp, nil -} - -// ListPendingProjectInvitations gets a list of invited project members. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#list-all-invitations-pending-for-a-group-or-project -func (s *InvitesService) ListPendingProjectInvitations(pid interface{}, opt *ListPendingInvitationsOptions, options ...RequestOptionFunc) ([]*PendingInvite, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/invitations", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pis []*PendingInvite - resp, err := s.client.Do(req, &pis) - if err != nil { - return nil, resp, err - } - - return pis, resp, nil -} - -// InvitesOptions represents the available GroupInvites() and ProjectInvites() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project -type InvitesOptions struct { - ID interface{} `url:"id,omitempty" json:"id,omitempty"` - Email *string `url:"email,omitempty" json:"email,omitempty"` - UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// InvitesResult represents an invitations result. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project -type InvitesResult struct { - Status string `json:"status"` - Message map[string]string `json:"message,omitempty"` -} - -// GroupInvites invites new users by email to join a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project -func (s *InvitesService) GroupInvites(gid interface{}, opt *InvitesOptions, options ...RequestOptionFunc) (*InvitesResult, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/invitations", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ir := new(InvitesResult) - resp, err := s.client.Do(req, ir) - if err != nil { - return nil, resp, err - } - - return ir, resp, nil -} - -// ProjectInvites invites new users by email to join a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/invitations.html#add-a-member-to-a-group-or-project -func (s *InvitesService) ProjectInvites(pid interface{}, opt *InvitesOptions, options ...RequestOptionFunc) (*InvitesResult, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/invitations", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ir := new(InvitesResult) - resp, err := s.client.Do(req, ir) - if err != nil { - return nil, resp, err - } - - return ir, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/issue_links.go b/vendor/github.com/xanzy/go-gitlab/issue_links.go deleted file mode 100644 index d5fcae0ddd..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/issue_links.go +++ /dev/null @@ -1,186 +0,0 @@ -// -// Copyright 2021, Arkbriar -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// IssueLinksService handles communication with the issue relations related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html -type IssueLinksService struct { - client *Client -} - -// IssueLink represents a two-way relation between two issues. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html -type IssueLink struct { - SourceIssue *Issue `json:"source_issue"` - TargetIssue *Issue `json:"target_issue"` - LinkType string `json:"link_type"` -} - -// IssueRelation gets a relation between two issues. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issue_links.html#list-issue-relations -type IssueRelation struct { - ID int `json:"id"` - IID int `json:"iid"` - State string `json:"state"` - Description string `json:"description"` - Confidential bool `json:"confidential"` - Author *IssueAuthor `json:"author"` - Milestone *Milestone `json:"milestone"` - ProjectID int `json:"project_id"` - Assignees []*IssueAssignee `json:"assignees"` - Assignee *IssueAssignee `json:"assignee"` - UpdatedAt *time.Time `json:"updated_at"` - Title string `json:"title"` - CreatedAt *time.Time `json:"created_at"` - Labels Labels `json:"labels"` - DueDate *ISOTime `json:"due_date"` - WebURL string `json:"web_url"` - References *IssueReferences `json:"references"` - Weight int `json:"weight"` - UserNotesCount int `json:"user_notes_count"` - IssueLinkID int `json:"issue_link_id"` - LinkType string `json:"link_type"` - LinkCreatedAt *time.Time `json:"link_created_at"` - LinkUpdatedAt *time.Time `json:"link_updated_at"` -} - -// ListIssueRelations gets a list of related issues of a given issue, -// sorted by the relationship creation datetime (ascending). -// -// Issues will be filtered according to the user authorizations. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issue_links.html#list-issue-relations -func (s *IssueLinksService) ListIssueRelations(pid interface{}, issue int, options ...RequestOptionFunc) ([]*IssueRelation, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var is []*IssueRelation - resp, err := s.client.Do(req, &is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// GetIssueLink gets a specific issue link. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issue_links.html#get-an-issue-link -func (s *IssueLinksService) GetIssueLink(pid interface{}, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links/%d", PathEscape(project), issue, issueLink) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - il := new(IssueLink) - resp, err := s.client.Do(req, il) - if err != nil { - return nil, resp, err - } - - return il, resp, nil -} - -// CreateIssueLinkOptions represents the available CreateIssueLink() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html#create-an-issue-link -type CreateIssueLinkOptions struct { - TargetProjectID *string `json:"target_project_id"` - TargetIssueIID *string `json:"target_issue_iid"` - LinkType *string `json:"link_type"` -} - -// CreateIssueLink creates a two-way relation between two issues. -// User must be allowed to update both issues in order to succeed. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issue_links.html#create-an-issue-link -func (s *IssueLinksService) CreateIssueLink(pid interface{}, issue int, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(IssueLink) - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// DeleteIssueLink deletes an issue link, thus removes the two-way relationship. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issue_links.html#delete-an-issue-link -func (s *IssueLinksService) DeleteIssueLink(pid interface{}, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/links/%d", - PathEscape(project), - issue, - issueLink) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - il := new(IssueLink) - resp, err := s.client.Do(req, &il) - if err != nil { - return nil, resp, err - } - - return il, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/github.com/xanzy/go-gitlab/issues.go deleted file mode 100644 index eecccc475e..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/issues.go +++ /dev/null @@ -1,791 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "net/http" - "reflect" - "time" -) - -// IssuesService handles communication with the issue related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html -type IssuesService struct { - client *Client - timeStats *timeStatsService -} - -// IssueAuthor represents a author of the issue. -type IssueAuthor struct { - ID int `json:"id"` - State string `json:"state"` - WebURL string `json:"web_url"` - Name string `json:"name"` - AvatarURL string `json:"avatar_url"` - Username string `json:"username"` -} - -// IssueAssignee represents a assignee of the issue. -type IssueAssignee struct { - ID int `json:"id"` - State string `json:"state"` - WebURL string `json:"web_url"` - Name string `json:"name"` - AvatarURL string `json:"avatar_url"` - Username string `json:"username"` -} - -// IssueReferences represents references of the issue. -type IssueReferences struct { - Short string `json:"short"` - Relative string `json:"relative"` - Full string `json:"full"` -} - -// IssueCloser represents a closer of the issue. -type IssueCloser struct { - ID int `json:"id"` - State string `json:"state"` - WebURL string `json:"web_url"` - Name string `json:"name"` - AvatarURL string `json:"avatar_url"` - Username string `json:"username"` -} - -// IssueLinks represents links of the issue. -type IssueLinks struct { - Self string `json:"self"` - Notes string `json:"notes"` - AwardEmoji string `json:"award_emoji"` - Project string `json:"project"` -} - -// Issue represents a GitLab issue. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html -type Issue struct { - ID int `json:"id"` - IID int `json:"iid"` - ExternalID string `json:"external_id"` - State string `json:"state"` - Description string `json:"description"` - HealthStatus string `json:"health_status"` - Author *IssueAuthor `json:"author"` - Milestone *Milestone `json:"milestone"` - ProjectID int `json:"project_id"` - Assignees []*IssueAssignee `json:"assignees"` - Assignee *IssueAssignee `json:"assignee"` - UpdatedAt *time.Time `json:"updated_at"` - ClosedAt *time.Time `json:"closed_at"` - ClosedBy *IssueCloser `json:"closed_by"` - Title string `json:"title"` - CreatedAt *time.Time `json:"created_at"` - MovedToID int `json:"moved_to_id"` - Labels Labels `json:"labels"` - LabelDetails []*LabelDetails `json:"label_details"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` - DueDate *ISOTime `json:"due_date"` - WebURL string `json:"web_url"` - References *IssueReferences `json:"references"` - TimeStats *TimeStats `json:"time_stats"` - Confidential bool `json:"confidential"` - Weight int `json:"weight"` - DiscussionLocked bool `json:"discussion_locked"` - IssueType *string `json:"issue_type,omitempty"` - Subscribed bool `json:"subscribed"` - UserNotesCount int `json:"user_notes_count"` - Links *IssueLinks `json:"_links"` - IssueLinkID int `json:"issue_link_id"` - MergeRequestCount int `json:"merge_requests_count"` - EpicIssueID int `json:"epic_issue_id"` - Epic *Epic `json:"epic"` - Iteration *GroupIteration `json:"iteration"` - TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` -} - -func (i Issue) String() string { - return Stringify(i) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (i *Issue) UnmarshalJSON(data []byte) error { - type alias Issue - - raw := make(map[string]interface{}) - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - - if reflect.TypeOf(raw["id"]).Kind() == reflect.String { - raw["external_id"] = raw["id"] - delete(raw, "id") - } - - labelDetails, ok := raw["labels"].([]interface{}) - if ok && len(labelDetails) > 0 { - // We only want to change anything if we got label details. - if _, ok := labelDetails[0].(map[string]interface{}); ok { - labels := make([]interface{}, len(labelDetails)) - for i, details := range labelDetails { - labels[i] = details.(map[string]interface{})["name"] - } - - // Set the correct values - raw["labels"] = labels - raw["label_details"] = labelDetails - } - } - - data, err = json.Marshal(raw) - if err != nil { - return err - } - - return json.Unmarshal(data, (*alias)(i)) -} - -// LabelDetails represents detailed label information. -type LabelDetails struct { - ID int `json:"id"` - Name string `json:"name"` - Color string `json:"color"` - Description string `json:"description"` - DescriptionHTML string `json:"description_html"` - TextColor string `json:"text_color"` -} - -// ListIssuesOptions represents the available ListIssues() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-issues -type ListIssuesOptions struct { - ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - NotSearch *string `url:"not[search],omitempty" json:"not[search],omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` -} - -// ListIssues gets all issues created by authenticated user. This function -// takes pagination parameters page and per_page to restrict the list of issues. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-issues -func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "issues", opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// ListGroupIssuesOptions represents the available ListGroupIssues() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-group-issues -type ListGroupIssuesOptions struct { - ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - NotSearch *string `url:"not[search],omitempty" json:"not[search],omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` -} - -// ListGroupIssues gets a list of group issues. This function accepts -// pagination parameters page and per_page to return the list of group issues. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-group-issues -func (s *IssuesService) ListGroupIssues(pid interface{}, opt *ListGroupIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/issues", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// ListProjectIssuesOptions represents the available ListProjectIssues() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-project-issues -type ListProjectIssuesOptions struct { - ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - NotAssigneeUsername *string `url:"not[assignee_username],omitempty" json:"not[assignee_username],omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - NotIn *string `url:"not[in],omitempty" json:"not[in],omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` - IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` -} - -// ListProjectIssues gets a list of project issues. This function accepts -// pagination parameters page and per_page to return the list of project issues. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#list-project-issues -func (s *IssuesService) ListProjectIssues(pid interface{}, opt *ListProjectIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// GetIssueByID gets a single issue. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#single-issue -func (s *IssuesService) GetIssueByID(issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - u := fmt.Sprintf("issues/%d", issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// GetIssue gets a single project issue. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#single-project-issue -func (s *IssuesService) GetIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// CreateIssueOptions represents the available CreateIssue() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#new-issue -type CreateIssueOptions struct { - IID *int `url:"iid,omitempty" json:"iid,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` - DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` - EpicID *int `url:"epic_id,omitempty" json:"epic_id,omitempty"` - MergeRequestToResolveDiscussionsOf *int `url:"merge_request_to_resolve_discussions_of,omitempty" json:"merge_request_to_resolve_discussions_of,omitempty"` - DiscussionToResolve *string `url:"discussion_to_resolve,omitempty" json:"discussion_to_resolve,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` -} - -// CreateIssue creates a new project issue. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#new-issue -func (s *IssuesService) CreateIssue(pid interface{}, opt *CreateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// UpdateIssueOptions represents the available UpdateIssue() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#edit-issue -type UpdateIssueOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - AddLabels *LabelOptions `url:"add_labels,comma,omitempty" json:"add_labels,omitempty"` - RemoveLabels *LabelOptions `url:"remove_labels,comma,omitempty" json:"remove_labels,omitempty"` - StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` - UpdatedAt *time.Time `url:"updated_at,omitempty" json:"updated_at,omitempty"` - DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` - EpicID *int `url:"epic_id,omitempty" json:"epic_id,omitempty"` - Weight *int `url:"weight,omitempty" json:"weight,omitempty"` - DiscussionLocked *bool `url:"discussion_locked,omitempty" json:"discussion_locked,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` -} - -// UpdateIssue updates an existing project issue. This function is also used -// to mark an issue as closed. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#edit-issues -func (s *IssuesService) UpdateIssue(pid interface{}, issue int, opt *UpdateIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// DeleteIssue deletes a single project issue. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#delete-an-issue -func (s *IssuesService) DeleteIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ReorderIssueOptions represents the available ReorderIssue() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#reorder-an-issue -type ReorderIssueOptions struct { - MoveAfterID *int `url:"move_after_id,omitempty" json:"move_after_id,omitempty"` - MoveBeforeID *int `url:"move_before_id,omitempty" json:"move_before_id,omitempty"` -} - -// ReorderIssue reorders an issue. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#reorder-an-issue -func (s *IssuesService) ReorderIssue(pid interface{}, issue int, opt *ReorderIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/reorder", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// MoveIssueOptions represents the available MoveIssue() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#move-an-issue -type MoveIssueOptions struct { - ToProjectID *int `url:"to_project_id,omitempty" json:"to_project_id,omitempty"` -} - -// MoveIssue updates an existing project issue. This function is also used -// to mark an issue as closed. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues.html#move-an-issue -func (s *IssuesService) MoveIssue(pid interface{}, issue int, opt *MoveIssueOptions, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/move", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// SubscribeToIssue subscribes the authenticated user to the given issue to -// receive notifications. If the user is already subscribed to the issue, the -// status code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#subscribe-to-an-issue -func (s *IssuesService) SubscribeToIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/subscribe", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// UnsubscribeFromIssue unsubscribes the authenticated user from the given -// issue to not receive notifications from that merge request. If the user -// is not subscribed to the issue, status code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#unsubscribe-from-an-issue -func (s *IssuesService) UnsubscribeFromIssue(pid interface{}, issue int, options ...RequestOptionFunc) (*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/unsubscribe", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - i := new(Issue) - resp, err := s.client.Do(req, i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// CreateTodo creates a todo for the current user for an issue. -// If there already exists a todo for the user on that issue, status code -// 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#create-a-to-do-item -func (s *IssuesService) CreateTodo(pid interface{}, issue int, options ...RequestOptionFunc) (*Todo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/todo", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(Todo) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// ListMergeRequestsClosingIssueOptions represents the available -// ListMergeRequestsClosingIssue() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-that-close-a-particular-issue-on-merge -type ListMergeRequestsClosingIssueOptions ListOptions - -// ListMergeRequestsClosingIssue gets all the merge requests that will close -// issue when merged. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-that-close-a-particular-issue-on-merge -func (s *IssuesService) ListMergeRequestsClosingIssue(pid interface{}, issue int, opt *ListMergeRequestsClosingIssueOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/closed_by", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListMergeRequestsRelatedToIssueOptions represents the available -// ListMergeRequestsRelatedToIssue() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-related-to-issue -type ListMergeRequestsRelatedToIssueOptions ListOptions - -// ListMergeRequestsRelatedToIssue gets all the merge requests that are -// related to the issue -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#list-merge-requests-related-to-issue -func (s *IssuesService) ListMergeRequestsRelatedToIssue(pid interface{}, issue int, opt *ListMergeRequestsRelatedToIssueOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/related_merge_requests", - PathEscape(project), - issue, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// SetTimeEstimate sets the time estimate for a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#set-a-time-estimate-for-an-issue -func (s *IssuesService) SetTimeEstimate(pid interface{}, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.setTimeEstimate(pid, "issues", issue, opt, options...) -} - -// ResetTimeEstimate resets the time estimate for a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#reset-the-time-estimate-for-an-issue -func (s *IssuesService) ResetTimeEstimate(pid interface{}, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.resetTimeEstimate(pid, "issues", issue, options...) -} - -// AddSpentTime adds spent time for a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#add-spent-time-for-an-issue -func (s *IssuesService) AddSpentTime(pid interface{}, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.addSpentTime(pid, "issues", issue, opt, options...) -} - -// ResetSpentTime resets the spent time for a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#reset-spent-time-for-an-issue -func (s *IssuesService) ResetSpentTime(pid interface{}, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.resetSpentTime(pid, "issues", issue, options...) -} - -// GetTimeSpent gets the spent time for a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#get-time-tracking-stats -func (s *IssuesService) GetTimeSpent(pid interface{}, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.getTimeSpent(pid, "issues", issue, options...) -} - -// GetParticipants gets a list of issue participants. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues.html#participants-on-issues -func (s *IssuesService) GetParticipants(pid interface{}, issue int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/participants", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var bu []*BasicUser - resp, err := s.client.Do(req, &bu) - if err != nil { - return nil, resp, err - } - - return bu, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/issues_statistics.go b/vendor/github.com/xanzy/go-gitlab/issues_statistics.go deleted file mode 100644 index 53555781e9..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/issues_statistics.go +++ /dev/null @@ -1,187 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// IssuesStatisticsService handles communication with the issues statistics -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues_statistics.html -type IssuesStatisticsService struct { - client *Client -} - -// IssuesStatistics represents a GitLab issues statistic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/issues_statistics.html -type IssuesStatistics struct { - Statistics struct { - Counts struct { - All int `json:"all"` - Closed int `json:"closed"` - Opened int `json:"opened"` - } `json:"counts"` - } `json:"statistics"` -} - -func (n IssuesStatistics) String() string { - return Stringify(n) -} - -// GetIssuesStatisticsOptions represents the available GetIssuesStatistics() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues_statistics.html#get-issues-statistics -type GetIssuesStatisticsOptions struct { - Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` -} - -// GetIssuesStatistics gets issues statistics on all issues the authenticated -// user has access to. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues_statistics.html#get-issues-statistics -func (s *IssuesStatisticsService) GetIssuesStatistics(opt *GetIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "issues_statistics", opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssuesStatistics) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// GetGroupIssuesStatisticsOptions represents the available GetGroupIssuesStatistics() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues_statistics.html#get-group-issues-statistics -type GetGroupIssuesStatisticsOptions struct { - Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` -} - -// GetGroupIssuesStatistics gets issues count statistics for given group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues_statistics.html#get-group-issues-statistics -func (s *IssuesStatisticsService) GetGroupIssuesStatistics(gid interface{}, opt *GetGroupIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/issues_statistics", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssuesStatistics) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// GetProjectIssuesStatisticsOptions represents the available -// GetProjectIssuesStatistics() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues_statistics.html#get-project-issues-statistics -type GetProjectIssuesStatisticsOptions struct { - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Labels *LabelOptions `url:"labels,omitempty" json:"labels,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeUsername *[]string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` -} - -// GetProjectIssuesStatistics gets issues count statistics for given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/issues_statistics.html#get-project-issues-statistics -func (s *IssuesStatisticsService) GetProjectIssuesStatistics(pid interface{}, opt *GetProjectIssuesStatisticsOptions, options ...RequestOptionFunc) (*IssuesStatistics, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues_statistics", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - is := new(IssuesStatistics) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go b/vendor/github.com/xanzy/go-gitlab/job_token_scope.go deleted file mode 100644 index 35525b76d8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/job_token_scope.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package gitlab - -import ( - "fmt" - "net/http" -) - -// JobTokenScopeService handles communication with project CI settings -// such as token permissions. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html -type JobTokenScopeService struct { - client *Client -} - -// JobTokenAccessSettings represents job token access attributes for this project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html -type JobTokenAccessSettings struct { - InboundEnabled bool `json:"inbound_enabled"` - OutboundEnabled bool `json:"outbound_enabled"` -} - -// GetProjectJobTokenAccessSettings fetch the CI/CD job token access settings (job token scope) of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-access-settings -func (j *JobTokenScopeService) GetProjectJobTokenAccessSettings(pid interface{}, options ...RequestOptionFunc) (*JobTokenAccessSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - jt := new(JobTokenAccessSettings) - resp, err := j.client.Do(req, jt) - if err != nil { - return nil, resp, err - } - - return jt, resp, err -} - -// PatchProjectJobTokenAccessSettingsOptions represents the available -// PatchProjectJobTokenAccessSettings() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#patch-a-projects-cicd-job-token-access-settings -type PatchProjectJobTokenAccessSettingsOptions struct { - Enabled bool `json:"enabled"` -} - -// PatchProjectJobTokenAccessSettings patch the Limit access to this project setting (job token scope) of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#patch-a-projects-cicd-job-token-access-settings -func (j *JobTokenScopeService) PatchProjectJobTokenAccessSettings(pid interface{}, opt *PatchProjectJobTokenAccessSettingsOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, err - } - - return j.client.Do(req, nil) -} - -// JobTokenInboundAllowItem represents a single job token inbound allowlist item. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html -type JobTokenInboundAllowItem struct { - SourceProjectID int `json:"source_project_id"` - TargetProjectID int `json:"target_project_id"` -} - -// GetJobTokenInboundAllowListOptions represents the available -// GetJobTokenInboundAllowList() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist -type GetJobTokenInboundAllowListOptions struct { - ListOptions -} - -// GetProjectJobTokenInboundAllowList fetches the CI/CD job token inbound -// allowlist (job token scope) of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist -func (j *JobTokenScopeService) GetProjectJobTokenInboundAllowList(pid interface{}, opt *GetJobTokenInboundAllowListOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Project - resp, err := j.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// AddProjectToJobScopeAllowListOptions represents the available -// AddProjectToJobScopeAllowList() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#create-a-new-project-to-a-projects-cicd-job-token-inbound-allowlist -type JobTokenInboundAllowOptions struct { - TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` -} - -// AddProjectToJobScopeAllowList adds a new project to a project's job token -// inbound allow list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#create-a-new-project-to-a-projects-cicd-job-token-inbound-allowlist -func (j *JobTokenScopeService) AddProjectToJobScopeAllowList(pid interface{}, opt *JobTokenInboundAllowOptions, options ...RequestOptionFunc) (*JobTokenInboundAllowItem, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - jt := new(JobTokenInboundAllowItem) - resp, err := j.client.Do(req, jt) - if err != nil { - return nil, resp, err - } - - return jt, resp, nil -} - -// RemoveProjectFromJobScopeAllowList removes a project from a project's job -// token inbound allow list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#remove-a-project-from-a-projects-cicd-job-token-inbound-allowlist -func (j *JobTokenScopeService) RemoveProjectFromJobScopeAllowList(pid interface{}, targetProject int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/allowlist/%d`, PathEscape(project), targetProject) - - req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return j.client.Do(req, nil) -} - -// JobTokenAllowlistItem represents a single job token allowlist item. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_job_token_scopes.html -type JobTokenAllowlistItem struct { - SourceProjectID int `json:"source_project_id"` - TargetGroupID int `json:"target_group_id"` -} - -// GetJobTokenAllowlistGroupsOptions represents the available -// GetJobTokenAllowlistGroups() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-allowlist-of-groups -type GetJobTokenAllowlistGroupsOptions struct { - ListOptions -} - -// GetJobTokenAllowListGroups fetches the CI/CD job token allowlist groups -// (job token scopes) of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-allowlist-of-groups -func (j *JobTokenScopeService) GetJobTokenAllowlistGroups(pid interface{}, opt *GetJobTokenAllowlistGroupsOptions, options ...RequestOptionFunc) ([]*Group, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Group - resp, err := j.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// AddGroupToJobTokenAllowlistOptions represents the available -// AddGroupToJobTokenAllowlist() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#add-a-group-to-a-cicd-job-token-allowlist -type AddGroupToJobTokenAllowlistOptions struct { - TargetGroupID *int `url:"target_group_id,omitempty" json:"target_group_id,omitempty"` -} - -// AddProjectToJobScopeGroupsAllowList adds a new group to a project's job token -// inbound groups allow list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#add-a-group-to-a-cicd-job-token-allowlist -func (j *JobTokenScopeService) AddGroupToJobTokenAllowlist(pid interface{}, opt *AddGroupToJobTokenAllowlistOptions, options ...RequestOptionFunc) (*JobTokenAllowlistItem, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist`, PathEscape(project)) - - req, err := j.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - jt := new(JobTokenAllowlistItem) - resp, err := j.client.Do(req, jt) - if err != nil { - return nil, resp, err - } - - return jt, resp, nil -} - -// RemoveGroupFromJopTokenAllowlist removes a group from a project's job -// token inbound groups allow list. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_job_token_scopes.html#remove-a-group-from-a-cicd-job-token-allowlist -func (j *JobTokenScopeService) RemoveGroupFromJobTokenAllowlist(pid interface{}, targetGroup int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf(`projects/%s/job_token_scope/groups_allowlist/%d`, PathEscape(project), targetGroup) - - req, err := j.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return j.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/jobs.go b/vendor/github.com/xanzy/go-gitlab/jobs.go deleted file mode 100644 index f25c020f12..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/jobs.go +++ /dev/null @@ -1,585 +0,0 @@ -// -// Copyright 2021, Arkbriar -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "net/http" - "time" -) - -// JobsService handles communication with the ci builds related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html -type JobsService struct { - client *Client -} - -// Job represents a ci build. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html -type Job struct { - Commit *Commit `json:"commit"` - Coverage float64 `json:"coverage"` - AllowFailure bool `json:"allow_failure"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - ErasedAt *time.Time `json:"erased_at"` - Duration float64 `json:"duration"` - QueuedDuration float64 `json:"queued_duration"` - ArtifactsExpireAt *time.Time `json:"artifacts_expire_at"` - TagList []string `json:"tag_list"` - ID int `json:"id"` - Name string `json:"name"` - Pipeline struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` - Ref string `json:"ref"` - Sha string `json:"sha"` - Status string `json:"status"` - } `json:"pipeline"` - Ref string `json:"ref"` - Artifacts []struct { - FileType string `json:"file_type"` - Filename string `json:"filename"` - Size int `json:"size"` - FileFormat string `json:"file_format"` - } `json:"artifacts"` - ArtifactsFile struct { - Filename string `json:"filename"` - Size int `json:"size"` - } `json:"artifacts_file"` - Runner struct { - ID int `json:"id"` - Description string `json:"description"` - Active bool `json:"active"` - IsShared bool `json:"is_shared"` - Name string `json:"name"` - } `json:"runner"` - Stage string `json:"stage"` - Status string `json:"status"` - FailureReason string `json:"failure_reason"` - Tag bool `json:"tag"` - WebURL string `json:"web_url"` - Project *Project `json:"project"` - User *User `json:"user"` -} - -// Bridge represents a pipeline bridge. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-bridges -type Bridge struct { - Commit *Commit `json:"commit"` - Coverage float64 `json:"coverage"` - AllowFailure bool `json:"allow_failure"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - ErasedAt *time.Time `json:"erased_at"` - Duration float64 `json:"duration"` - QueuedDuration float64 `json:"queued_duration"` - ID int `json:"id"` - Name string `json:"name"` - Pipeline PipelineInfo `json:"pipeline"` - Ref string `json:"ref"` - Stage string `json:"stage"` - Status string `json:"status"` - FailureReason string `json:"failure_reason"` - Tag bool `json:"tag"` - WebURL string `json:"web_url"` - User *User `json:"user"` - DownstreamPipeline *PipelineInfo `json:"downstream_pipeline"` -} - -// ListJobsOptions represents the available ListProjectJobs() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#list-project-jobs -type ListJobsOptions struct { - ListOptions - Scope *[]BuildStateValue `url:"scope[],omitempty" json:"scope,omitempty"` - IncludeRetried *bool `url:"include_retried,omitempty" json:"include_retried,omitempty"` -} - -// ListProjectJobs gets a list of jobs in a project. -// -// The scope of jobs to show, one or array of: created, pending, running, -// failed, success, canceled, skipped; showing all jobs if none provided -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#list-project-jobs -func (s *JobsService) ListProjectJobs(pid interface{}, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var jobs []*Job - resp, err := s.client.Do(req, &jobs) - if err != nil { - return nil, resp, err - } - - return jobs, resp, nil -} - -// ListPipelineJobs gets a list of jobs for specific pipeline in a -// project. If the pipeline ID is not found, it will respond with 404. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-jobs -func (s *JobsService) ListPipelineJobs(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/jobs", PathEscape(project), pipelineID) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var jobs []*Job - resp, err := s.client.Do(req, &jobs) - if err != nil { - return nil, resp, err - } - - return jobs, resp, nil -} - -// ListPipelineBridges gets a list of bridges for specific pipeline in a -// project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-jobs -func (s *JobsService) ListPipelineBridges(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...RequestOptionFunc) ([]*Bridge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/bridges", PathEscape(project), pipelineID) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var bridges []*Bridge - resp, err := s.client.Do(req, &bridges) - if err != nil { - return nil, resp, err - } - - return bridges, resp, nil -} - -// GetJobTokensJobOptions represents the available GetJobTokensJob() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html#get-job-tokens-job -type GetJobTokensJobOptions struct { - JobToken *string `url:"job_token,omitempty" json:"job_token,omitempty"` -} - -// GetJobTokensJob retrieves the job that generated a job token. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/jobs.html#get-job-tokens-job -func (s *JobsService) GetJobTokensJob(opts *GetJobTokensJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "job", opts, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// GetJob gets a single job of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#get-a-single-job -func (s *JobsService) GetJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// GetJobArtifacts get jobs artifacts of a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#get-job-artifacts -func (s *JobsService) GetJobArtifacts(pid interface{}, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - artifactsBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactsBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(artifactsBuf.Bytes()), resp, err -} - -// DownloadArtifactsFileOptions represents the available DownloadArtifactsFile() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#download-the-artifacts-archive -type DownloadArtifactsFileOptions struct { - Job *string `url:"job" json:"job"` -} - -// DownloadArtifactsFile download the artifacts file from the given -// reference name and job provided the job finished successfully. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#download-the-artifacts-archive -func (s *JobsService) DownloadArtifactsFile(pid interface{}, refName string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/artifacts/%s/download", PathEscape(project), refName) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - artifactsBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactsBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(artifactsBuf.Bytes()), resp, err -} - -// DownloadSingleArtifactsFile download a file from the artifacts from the -// given reference name and job provided the job finished successfully. -// Only a single file is going to be extracted from the archive and streamed -// to a client. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#download-a-single-artifact-file-by-job-id -func (s *JobsService) DownloadSingleArtifactsFile(pid interface{}, jobID int, artifactPath string, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - - u := fmt.Sprintf( - "projects/%s/jobs/%d/artifacts/%s", - PathEscape(project), - jobID, - artifactPath, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - artifactBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(artifactBuf.Bytes()), resp, err -} - -// DownloadSingleArtifactsFile download a single artifact file for a specific -// job of the latest successful pipeline for the given reference name from -// inside the job’s artifacts archive. The file is extracted from the archive -// and streamed to the client. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#download-a-single-artifact-file-from-specific-tag-or-branch -func (s *JobsService) DownloadSingleArtifactsFileByTagOrBranch(pid interface{}, refName string, artifactPath string, opt *DownloadArtifactsFileOptions, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - - u := fmt.Sprintf( - "projects/%s/jobs/artifacts/%s/raw/%s", - PathEscape(project), - PathEscape(refName), - artifactPath, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - artifactBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, artifactBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(artifactBuf.Bytes()), resp, err -} - -// GetTraceFile gets a trace of a specific job of a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#get-a-log-file -func (s *JobsService) GetTraceFile(pid interface{}, jobID int, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/trace", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - traceBuf := new(bytes.Buffer) - resp, err := s.client.Do(req, traceBuf) - if err != nil { - return nil, resp, err - } - - return bytes.NewReader(traceBuf.Bytes()), resp, err -} - -// CancelJob cancels a single job of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#cancel-a-job -func (s *JobsService) CancelJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/cancel", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// RetryJob retries a single job of a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#retry-a-job -func (s *JobsService) RetryJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/retry", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// EraseJob erases a single job of a project, removes a job -// artifacts and a job trace. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#erase-a-job -func (s *JobsService) EraseJob(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/erase", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// KeepArtifacts prevents artifacts from being deleted when -// expiration is set. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#keep-artifacts -func (s *JobsService) KeepArtifacts(pid interface{}, jobID int, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/artifacts/keep", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// PlayJobOptions represents the available PlayJob() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#run-a-job -type PlayJobOptions struct { - JobVariablesAttributes *[]*JobVariableOptions `url:"job_variables_attributes,omitempty" json:"job_variables_attributes,omitempty"` -} - -// JobVariableOptions represents a single job variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#run-a-job -type JobVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// PlayJob triggers a manual action to start a job. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/jobs.html#run-a-job -func (s *JobsService) PlayJob(pid interface{}, jobID int, opt *PlayJobOptions, options ...RequestOptionFunc) (*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/play", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - job := new(Job) - resp, err := s.client.Do(req, job) - if err != nil { - return nil, resp, err - } - - return job, resp, nil -} - -// DeleteArtifacts delete artifacts of a job -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#delete-job-artifacts -func (s *JobsService) DeleteArtifacts(pid interface{}, jobID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", PathEscape(project), jobID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteProjectArtifacts delete artifacts eligible for deletion in a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/job_artifacts.html#delete-project-artifacts -func (s *JobsService) DeleteProjectArtifacts(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/artifacts", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/keys.go b/vendor/github.com/xanzy/go-gitlab/keys.go deleted file mode 100644 index d9cf598333..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/keys.go +++ /dev/null @@ -1,97 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// KeysService handles communication with the -// keys related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/keys.html -type KeysService struct { - client *Client -} - -// Key represents a GitLab user's SSH key. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/keys.html -type Key struct { - ID int `json:"id"` - Title string `json:"title"` - Key string `json:"key"` - CreatedAt *time.Time `json:"created_at"` - User User `json:"user"` -} - -// GetKeyWithUser gets a single key by id along with the associated -// user information. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/keys.html#get-ssh-key-with-user-by-id-of-an-ssh-key -func (s *KeysService) GetKeyWithUser(key int, options ...RequestOptionFunc) (*Key, *Response, error) { - u := fmt.Sprintf("keys/%d", key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// GetKeyByFingerprintOptions represents the available GetKeyByFingerprint() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/keys.html#get-user-by-fingerprint-of-ssh-key -// https://docs.gitlab.com/ee/api/keys.html#get-user-by-deploy-key-fingerprint -type GetKeyByFingerprintOptions struct { - Fingerprint string `url:"fingerprint" json:"fingerprint"` -} - -// GetKeyByFingerprint gets a specific SSH key or deploy key by fingerprint -// along with the associated user information. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/keys.html#get-user-by-fingerprint-of-ssh-key -// https://docs.gitlab.com/ee/api/keys.html#get-user-by-deploy-key-fingerprint -func (s *KeysService) GetKeyByFingerprint(opt *GetKeyByFingerprintOptions, options ...RequestOptionFunc) (*Key, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "keys", opt, options) - if err != nil { - return nil, nil, err - } - - k := new(Key) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/labels.go b/vendor/github.com/xanzy/go-gitlab/labels.go deleted file mode 100644 index d36e85b086..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/labels.go +++ /dev/null @@ -1,317 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "net/http" -) - -// LabelsService handles communication with the label related methods of the -// GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html -type LabelsService struct { - client *Client -} - -// Label represents a GitLab label. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html -type Label struct { - ID int `json:"id"` - Name string `json:"name"` - Color string `json:"color"` - TextColor string `json:"text_color"` - Description string `json:"description"` - OpenIssuesCount int `json:"open_issues_count"` - ClosedIssuesCount int `json:"closed_issues_count"` - OpenMergeRequestsCount int `json:"open_merge_requests_count"` - Subscribed bool `json:"subscribed"` - Priority int `json:"priority"` - IsProjectLabel bool `json:"is_project_label"` -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *Label) UnmarshalJSON(data []byte) error { - type alias Label - if err := json.Unmarshal(data, (*alias)(l)); err != nil { - return err - } - - if l.Name == "" { - var raw map[string]interface{} - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - if title, ok := raw["title"].(string); ok { - l.Name = title - } - } - - return nil -} - -func (l Label) String() string { - return Stringify(l) -} - -// ListLabelsOptions represents the available ListLabels() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#list-labels -type ListLabelsOptions struct { - ListOptions - WithCounts *bool `url:"with_counts,omitempty" json:"with_counts,omitempty"` - IncludeAncestorGroups *bool `url:"include_ancestor_groups,omitempty" json:"include_ancestor_groups,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` -} - -// ListLabels gets all labels for given project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#list-labels -func (s *LabelsService) ListLabels(pid interface{}, opt *ListLabelsOptions, options ...RequestOptionFunc) ([]*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var l []*Label - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// GetLabel get a single label for a given project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#get-a-single-project-label -func (s *LabelsService) GetLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var l *Label - resp, err := s.client.Do(req, &l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// CreateLabelOptions represents the available CreateLabel() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#create-a-new-label -type CreateLabelOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Color *string `url:"color,omitempty" json:"color,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Priority *int `url:"priority,omitempty" json:"priority,omitempty"` -} - -// CreateLabel creates a new label for given repository with given name and -// color. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#create-a-new-label -func (s *LabelsService) CreateLabel(pid interface{}, opt *CreateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// DeleteLabelOptions represents the available DeleteLabel() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#delete-a-label -type DeleteLabelOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// DeleteLabel deletes a label given by its name or ID. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#delete-a-label -func (s *LabelsService) DeleteLabel(pid interface{}, lid interface{}, opt *DeleteLabelOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) - - if lid != nil { - label, err := parseID(lid) - if err != nil { - return nil, err - } - u = fmt.Sprintf("projects/%s/labels/%s", PathEscape(project), PathEscape(label)) - } - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// UpdateLabelOptions represents the available UpdateLabel() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#edit-an-existing-label -type UpdateLabelOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"` - Color *string `url:"color,omitempty" json:"color,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Priority *int `url:"priority,omitempty" json:"priority,omitempty"` -} - -// UpdateLabel updates an existing label with new name or now color. At least -// one parameter is required, to update the label. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/labels.html#edit-an-existing-label -func (s *LabelsService) UpdateLabel(pid interface{}, opt *UpdateLabelOptions, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// SubscribeToLabel subscribes the authenticated user to a label to receive -// notifications. If the user is already subscribed to the label, the status -// code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/labels.html#subscribe-to-a-label -func (s *LabelsService) SubscribeToLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Label, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s/subscribe", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(Label) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// UnsubscribeFromLabel unsubscribes the authenticated user from a label to not -// receive notifications from it. If the user is not subscribed to the label, the -// status code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/labels.html#unsubscribe-from-a-label -func (s *LabelsService) UnsubscribeFromLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s/unsubscribe", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// PromoteLabel Promotes a project label to a group label. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/labels.html#promote-a-project-label-to-a-group-label -func (s *LabelsService) PromoteLabel(pid interface{}, labelID interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - label, err := parseID(labelID) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/labels/%s/promote", PathEscape(project), PathEscape(label)) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/license.go b/vendor/github.com/xanzy/go-gitlab/license.go deleted file mode 100644 index 4882f90a9d..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/license.go +++ /dev/null @@ -1,128 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// LicenseService handles communication with the license -// related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/license.html -type LicenseService struct { - client *Client -} - -// License represents a GitLab license. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/license.html -type License struct { - ID int `json:"id"` - Plan string `json:"plan"` - CreatedAt *time.Time `json:"created_at"` - StartsAt *ISOTime `json:"starts_at"` - ExpiresAt *ISOTime `json:"expires_at"` - HistoricalMax int `json:"historical_max"` - MaximumUserCount int `json:"maximum_user_count"` - Expired bool `json:"expired"` - Overage int `json:"overage"` - UserLimit int `json:"user_limit"` - ActiveUsers int `json:"active_users"` - Licensee struct { - Name string `json:"Name"` - Company string `json:"Company"` - Email string `json:"Email"` - } `json:"licensee"` - // Add on codes that may occur in legacy licenses that don't have a plan yet. - // https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/license.rb - AddOns struct { - GitLabAuditorUser int `json:"GitLab_Auditor_User"` - GitLabDeployBoard int `json:"GitLab_DeployBoard"` - GitLabFileLocks int `json:"GitLab_FileLocks"` - GitLabGeo int `json:"GitLab_Geo"` - GitLabServiceDesk int `json:"GitLab_ServiceDesk"` - } `json:"add_ons"` -} - -func (l License) String() string { - return Stringify(l) -} - -// GetLicense retrieves information about the current license. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/license.html#retrieve-information-about-the-current-license -func (s *LicenseService) GetLicense(options ...RequestOptionFunc) (*License, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "license", nil, options) - if err != nil { - return nil, nil, err - } - - l := new(License) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// AddLicenseOptions represents the available AddLicense() options. -// -// https://docs.gitlab.com/ee/api/license.html#add-a-new-license -type AddLicenseOptions struct { - License *string `url:"license" json:"license"` -} - -// AddLicense adds a new license. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/license.html#add-a-new-license -func (s *LicenseService) AddLicense(opt *AddLicenseOptions, options ...RequestOptionFunc) (*License, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "license", opt, options) - if err != nil { - return nil, nil, err - } - - l := new(License) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// DeleteLicense deletes an existing license. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/license.html#delete-a-license -func (s *LicenseService) DeleteLicense(licenseID int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("license/%d", licenseID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/license_templates.go b/vendor/github.com/xanzy/go-gitlab/license_templates.go deleted file mode 100644 index 7eea05fd79..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/license_templates.go +++ /dev/null @@ -1,109 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// LicenseTemplate represents a license template. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/licenses.html -type LicenseTemplate struct { - Key string `json:"key"` - Name string `json:"name"` - Nickname string `json:"nickname"` - Featured bool `json:"featured"` - HTMLURL string `json:"html_url"` - SourceURL string `json:"source_url"` - Description string `json:"description"` - Conditions []string `json:"conditions"` - Permissions []string `json:"permissions"` - Limitations []string `json:"limitations"` - Content string `json:"content"` -} - -// LicenseTemplatesService handles communication with the license templates -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/templates/licenses.html -type LicenseTemplatesService struct { - client *Client -} - -// ListLicenseTemplatesOptions represents the available -// ListLicenseTemplates() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/licenses.html#list-license-templates -type ListLicenseTemplatesOptions struct { - ListOptions - Popular *bool `url:"popular,omitempty" json:"popular,omitempty"` -} - -// ListLicenseTemplates get all license templates. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/licenses.html#list-license-templates -func (s *LicenseTemplatesService) ListLicenseTemplates(opt *ListLicenseTemplatesOptions, options ...RequestOptionFunc) ([]*LicenseTemplate, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "templates/licenses", opt, options) - if err != nil { - return nil, nil, err - } - - var lts []*LicenseTemplate - resp, err := s.client.Do(req, <s) - if err != nil { - return nil, resp, err - } - - return lts, resp, nil -} - -// GetLicenseTemplateOptions represents the available -// GetLicenseTemplate() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/licenses.html#single-license-template -type GetLicenseTemplateOptions struct { - Project *string `url:"project,omitempty" json:"project,omitempty"` - Fullname *string `url:"fullname,omitempty" json:"fullname,omitempty"` -} - -// GetLicenseTemplate get a single license template. You can pass parameters -// to replace the license placeholder. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/templates/licenses.html#single-license-template -func (s *LicenseTemplatesService) GetLicenseTemplate(template string, opt *GetLicenseTemplateOptions, options ...RequestOptionFunc) (*LicenseTemplate, *Response, error) { - u := fmt.Sprintf("templates/licenses/%s", template) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - lt := new(LicenseTemplate) - resp, err := s.client.Do(req, lt) - if err != nil { - return nil, resp, err - } - - return lt, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/markdown.go b/vendor/github.com/xanzy/go-gitlab/markdown.go deleted file mode 100644 index 8c20749fe8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/markdown.go +++ /dev/null @@ -1,47 +0,0 @@ -package gitlab - -import "net/http" - -// MarkdownService handles communication with the markdown related methods of -// the GitLab API. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/markdown.html -type MarkdownService struct { - client *Client -} - -// Markdown represents a markdown document. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/markdown.html -type Markdown struct { - HTML string `json:"html"` -} - -// RenderOptions represents the available Render() options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/markdown.html#render-an-arbitrary-markdown-document -type RenderOptions struct { - Text *string `url:"text,omitempty" json:"text,omitempty"` - GitlabFlavouredMarkdown *bool `url:"gfm,omitempty" json:"gfm,omitempty"` - Project *string `url:"project,omitempty" json:"project,omitempty"` -} - -// Render an arbitrary markdown document. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/markdown.html#render-an-arbitrary-markdown-document -func (s *MarkdownService) Render(opt *RenderOptions, options ...RequestOptionFunc) (*Markdown, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "markdown", opt, options) - if err != nil { - return nil, nil, err - } - - md := new(Markdown) - response, err := s.client.Do(req, md) - if err != nil { - return nil, response, err - } - - return md, response, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/member_roles.go b/vendor/github.com/xanzy/go-gitlab/member_roles.go deleted file mode 100644 index 4d791a9137..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/member_roles.go +++ /dev/null @@ -1,144 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" -) - -// MemberRolesService handles communication with the member roles related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html -type MemberRolesService struct { - client *Client -} - -// MemberRole represents a GitLab member role. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/member_roles.html -type MemberRole struct { - ID int `json:"id"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - GroupID int `json:"group_id"` - BaseAccessLevel AccessLevelValue `json:"base_access_level"` - AdminCICDVariables bool `json:"admin_cicd_variables,omitempty"` - AdminComplianceFramework bool `json:"admin_compliance_framework,omitempty"` - AdminGroupMembers bool `json:"admin_group_member,omitempty"` - AdminMergeRequests bool `json:"admin_merge_request,omitempty"` - AdminPushRules bool `json:"admin_push_rules,omitempty"` - AdminTerraformState bool `json:"admin_terraform_state,omitempty"` - AdminVulnerability bool `json:"admin_vulnerability,omitempty"` - AdminWebHook bool `json:"admin_web_hook,omitempty"` - ArchiveProject bool `json:"archive_project,omitempty"` - ManageDeployTokens bool `json:"manage_deploy_tokens,omitempty"` - ManageGroupAccesToken bool `json:"manage_group_access_tokens,omitempty"` - ManageMergeRequestSettings bool `json:"manage_merge_request_settings,omitempty"` - ManageProjectAccessToken bool `json:"manage_project_access_tokens,omitempty"` - ManageSecurityPolicyLink bool `json:"manage_security_policy_link,omitempty"` - ReadCode bool `json:"read_code,omitempty"` - ReadRunners bool `json:"read_runners,omitempty"` - ReadDependency bool `json:"read_dependency,omitempty"` - ReadVulnerability bool `json:"read_vulnerability,omitempty"` - RemoveGroup bool `json:"remove_group,omitempty"` - RemoveProject bool `json:"remove_project,omitempty"` -} - -// ListMemberRoles gets a list of member roles for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#list-all-member-roles-of-a-group -func (s *MemberRolesService) ListMemberRoles(gid interface{}, options ...RequestOptionFunc) ([]*MemberRole, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrs []*MemberRole - resp, err := s.client.Do(req, &mrs) - if err != nil { - return nil, resp, err - } - - return mrs, resp, nil -} - -// CreateMemberRoleOptions represents the available CreateMemberRole() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group -type CreateMemberRoleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - BaseAccessLevel *AccessLevelValue `url:"base_access_level,omitempty" json:"base_access_level,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - AdminCICDVariables *bool `url:"admin_cicd_variables" json:"admin_cicd_variables,omitempty"` - AdminComplianceFramework *bool `url:"admin_compliance_framework" json:"admin_compliance_framework,omitempty"` - AdminGroupMembers *bool `url:"admin_group_member" json:"admin_group_member,omitempty"` - AdminMergeRequest *bool `url:"admin_merge_request,omitempty" json:"admin_merge_request,omitempty"` - AdminPushRules *bool `url:"admin_push_rules" json:"admin_push_rules,omitempty"` - AdminTerraformState *bool `url:"admin_terraform_state" json:"admin_terraform_state,omitempty"` - AdminVulnerability *bool `url:"admin_vulnerability,omitempty" json:"admin_vulnerability,omitempty"` - AdminWebHook *bool `url:"admin_web_hook" json:"admin_web_hook,omitempty"` - ArchiveProject *bool `url:"archive_project" json:"archive_project,omitempty"` - ManageDeployTokens *bool `url:"manage_deploy_tokens" json:"manage_deploy_tokens,omitempty"` - ManageGroupAccesToken *bool `url:"manage_group_access_tokens" json:"manage_group_access_tokens,omitempty"` - ManageMergeRequestSettings *bool `url:"manage_merge_request_settings" json:"manage_merge_request_settings,omitempty"` - ManageProjectAccessToken *bool `url:"manage_project_access_tokens" json:"manage_project_access_tokens,omitempty"` - ManageSecurityPolicyLink *bool `url:"manage_security_policy_link" json:"manage_security_policy_link,omitempty"` - ReadCode *bool `url:"read_code,omitempty" json:"read_code,omitempty"` - ReadRunners *bool `url:"read_runners" json:"read_runners,omitempty"` - ReadDependency *bool `url:"read_dependency,omitempty" json:"read_dependency,omitempty"` - ReadVulnerability *bool `url:"read_vulnerability,omitempty" json:"read_vulnerability,omitempty"` - RemoveGroup *bool `url:"remove_group" json:"remove_group,omitempty"` - RemoveProject *bool `url:"remove_project" json:"remove_project,omitempty"` -} - -// CreateMemberRole creates a new member role for a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group -func (s *MemberRolesService) CreateMemberRole(gid interface{}, opt *CreateMemberRoleOptions, options ...RequestOptionFunc) (*MemberRole, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/member_roles", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - mr := new(MemberRole) - resp, err := s.client.Do(req, mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil -} - -// DeleteMemberRole deletes a member role from a specified group. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/member_roles.html#remove-member-role-of-a-group -func (s *MemberRolesService) DeleteMemberRole(gid interface{}, memberRole int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/member_roles/%d", PathEscape(group), memberRole) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go b/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go deleted file mode 100644 index d2f1f81ff3..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/merge_request_approvals.go +++ /dev/null @@ -1,440 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// MergeRequestApprovalsService handles communication with the merge request -// approvals related methods of the GitLab API. This includes reading/updating -// approval settings and approve/unapproving merge requests -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_request_approvals.html -type MergeRequestApprovalsService struct { - client *Client -} - -// MergeRequestApprovals represents GitLab merge request approvals. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#merge-request-level-mr-approvals -type MergeRequestApprovals struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` - Title string `json:"title"` - Description string `json:"description"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - MergeStatus string `json:"merge_status"` - Approved bool `json:"approved"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` - ApprovalsRequired int `json:"approvals_required"` - ApprovalsLeft int `json:"approvals_left"` - RequirePasswordToApprove bool `json:"require_password_to_approve"` - ApprovedBy []*MergeRequestApproverUser `json:"approved_by"` - SuggestedApprovers []*BasicUser `json:"suggested_approvers"` - Approvers []*MergeRequestApproverUser `json:"approvers"` - ApproverGroups []*MergeRequestApproverGroup `json:"approver_groups"` - UserHasApproved bool `json:"user_has_approved"` - UserCanApprove bool `json:"user_can_approve"` - ApprovalRulesLeft []*MergeRequestApprovalRule `json:"approval_rules_left"` - HasApprovalRules bool `json:"has_approval_rules"` - MergeRequestApproversAvailable bool `json:"merge_request_approvers_available"` - MultipleApprovalRulesAvailable bool `json:"multiple_approval_rules_available"` -} - -func (m MergeRequestApprovals) String() string { - return Stringify(m) -} - -// MergeRequestApproverGroup represents GitLab project level merge request approver group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#project-level-mr-approvals -type MergeRequestApproverGroup struct { - Group struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Description string `json:"description"` - Visibility string `json:"visibility"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` - LFSEnabled bool `json:"lfs_enabled"` - RequestAccessEnabled bool `json:"request_access_enabled"` - } -} - -// MergeRequestApprovalRule represents a GitLab merge request approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-merge-request-level-rules -type MergeRequestApprovalRule struct { - ID int `json:"id"` - Name string `json:"name"` - RuleType string `json:"rule_type"` - ReportType string `json:"report_type"` - EligibleApprovers []*BasicUser `json:"eligible_approvers"` - ApprovalsRequired int `json:"approvals_required"` - SourceRule *ProjectApprovalRule `json:"source_rule"` - Users []*BasicUser `json:"users"` - Groups []*Group `json:"groups"` - ContainsHiddenGroups bool `json:"contains_hidden_groups"` - Section string `json:"section"` - ApprovedBy []*BasicUser `json:"approved_by"` - Approved bool `json:"approved"` -} - -// MergeRequestApprovalState represents a GitLab merge request approval state. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-the-approval-state-of-merge-requests -type MergeRequestApprovalState struct { - ApprovalRulesOverwritten bool `json:"approval_rules_overwritten"` - Rules []*MergeRequestApprovalRule `json:"rules"` -} - -// String is a stringify for MergeRequestApprovalRule -func (s MergeRequestApprovalRule) String() string { - return Stringify(s) -} - -// MergeRequestApproverUser represents GitLab project level merge request approver user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#project-level-mr-approvals -type MergeRequestApproverUser struct { - User *BasicUser -} - -// ApproveMergeRequestOptions represents the available ApproveMergeRequest() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#approve-merge-request -type ApproveMergeRequestOptions struct { - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` -} - -// ApproveMergeRequest approves a merge request on GitLab. If a non-empty sha -// is provided then it must match the sha at the HEAD of the MR. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#approve-merge-request -func (s *MergeRequestApprovalsService) ApproveMergeRequest(pid interface{}, mr int, opt *ApproveMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approve", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequestApprovals) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UnapproveMergeRequest unapproves a previously approved merge request on GitLab. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#unapprove-merge-request -func (s *MergeRequestApprovalsService) UnapproveMergeRequest(pid interface{}, mr int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/unapprove", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ResetApprovalsOfMergeRequest clear all approvals of merge request on GitLab. -// Available only for bot users based on project or group tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#reset-approvals-of-a-merge-request -func (s *MergeRequestApprovalsService) ResetApprovalsOfMergeRequest(pid interface{}, mr int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/reset_approvals", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodPut, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ChangeMergeRequestApprovalConfigurationOptions represents the available -// ChangeMergeRequestApprovalConfiguration() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated -type ChangeMergeRequestApprovalConfigurationOptions struct { - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` -} - -// GetConfiguration shows information about single merge request approvals -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-configuration-1 -func (s *MergeRequestApprovalsService) GetConfiguration(pid interface{}, mr int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mr) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequestApprovals) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ChangeApprovalConfiguration updates the approval configuration of a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated -func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ChangeMergeRequestAllowedApproversOptions represents the available -// ChangeMergeRequestAllowedApprovers() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers-for-merge-request -type ChangeMergeRequestAllowedApproversOptions struct { - ApproverIDs []int `url:"approver_ids" json:"approver_ids"` - ApproverGroupIDs []int `url:"approver_group_ids" json:"approver_group_ids"` -} - -// ChangeAllowedApprovers updates the approvers for a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers-for-merge-request -func (s *MergeRequestApprovalsService) ChangeAllowedApprovers(pid interface{}, mergeRequest int, opt *ChangeMergeRequestAllowedApproversOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvers", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// GetApprovalRules requests information about a merge request’s approval rules -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-merge-request-level-rules -func (s *MergeRequestApprovalsService) GetApprovalRules(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var par []*MergeRequestApprovalRule - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// GetApprovalState requests information about a merge request’s approval state -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-the-approval-state-of-merge-requests -func (s *MergeRequestApprovalsService) GetApprovalState(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovalState, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_state", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var pas *MergeRequestApprovalState - resp, err := s.client.Do(req, &pas) - if err != nil { - return nil, resp, err - } - - return pas, resp, nil -} - -// CreateMergeRequestApprovalRuleOptions represents the available CreateApprovalRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-merge-request-level-rule -type CreateMergeRequestApprovalRuleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - ApprovalProjectRuleID *int `url:"approval_project_rule_id,omitempty" json:"approval_project_rule_id,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` -} - -// CreateApprovalRule creates a new MR level approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-merge-request-level-rule -func (s *MergeRequestApprovalsService) CreateApprovalRule(pid interface{}, mergeRequest int, opt *CreateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(MergeRequestApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// UpdateMergeRequestApprovalRuleOptions represents the available UpdateApprovalRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-merge-request-level-rule -type UpdateMergeRequestApprovalRuleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` -} - -// UpdateApprovalRule updates an existing approval rule with new options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-merge-request-level-rule -func (s *MergeRequestApprovalsService) UpdateApprovalRule(pid interface{}, mergeRequest int, approvalRule int, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(MergeRequestApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// DeleteApprovalRule deletes a mr level approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#delete-merge-request-level-rule -func (s *MergeRequestApprovalsService) DeleteApprovalRule(pid interface{}, mergeRequest int, approvalRule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_requests.go b/vendor/github.com/xanzy/go-gitlab/merge_requests.go deleted file mode 100644 index a9e8d2e5c6..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/merge_requests.go +++ /dev/null @@ -1,1080 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "net/http" - "time" -) - -// MergeRequestsService handles communication with the merge requests related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_requests.html -type MergeRequestsService struct { - client *Client - timeStats *timeStatsService -} - -// MergeRequest represents a GitLab merge request. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_requests.html -type MergeRequest struct { - ID int `json:"id"` - IID int `json:"iid"` - TargetBranch string `json:"target_branch"` - SourceBranch string `json:"source_branch"` - ProjectID int `json:"project_id"` - Title string `json:"title"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Upvotes int `json:"upvotes"` - Downvotes int `json:"downvotes"` - Author *BasicUser `json:"author"` - Assignee *BasicUser `json:"assignee"` - Assignees []*BasicUser `json:"assignees"` - Reviewers []*BasicUser `json:"reviewers"` - SourceProjectID int `json:"source_project_id"` - TargetProjectID int `json:"target_project_id"` - Labels Labels `json:"labels"` - LabelDetails []*LabelDetails `json:"label_details"` - Description string `json:"description"` - Draft bool `json:"draft"` - WorkInProgress bool `json:"work_in_progress"` - Milestone *Milestone `json:"milestone"` - MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` - DetailedMergeStatus string `json:"detailed_merge_status"` - MergeError string `json:"merge_error"` - MergedBy *BasicUser `json:"merged_by"` - MergedAt *time.Time `json:"merged_at"` - ClosedBy *BasicUser `json:"closed_by"` - ClosedAt *time.Time `json:"closed_at"` - Subscribed bool `json:"subscribed"` - SHA string `json:"sha"` - MergeCommitSHA string `json:"merge_commit_sha"` - SquashCommitSHA string `json:"squash_commit_sha"` - UserNotesCount int `json:"user_notes_count"` - ChangesCount string `json:"changes_count"` - ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"` - ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` - AllowCollaboration bool `json:"allow_collaboration"` - WebURL string `json:"web_url"` - References *IssueReferences `json:"references"` - DiscussionLocked bool `json:"discussion_locked"` - Changes []*MergeRequestDiff `json:"changes"` - User struct { - CanMerge bool `json:"can_merge"` - } `json:"user"` - TimeStats *TimeStats `json:"time_stats"` - Squash bool `json:"squash"` - Pipeline *PipelineInfo `json:"pipeline"` - HeadPipeline *Pipeline `json:"head_pipeline"` - DiffRefs struct { - BaseSha string `json:"base_sha"` - HeadSha string `json:"head_sha"` - StartSha string `json:"start_sha"` - } `json:"diff_refs"` - DivergedCommitsCount int `json:"diverged_commits_count"` - RebaseInProgress bool `json:"rebase_in_progress"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` - Reference string `json:"reference"` - FirstContribution bool `json:"first_contribution"` - TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` - HasConflicts bool `json:"has_conflicts"` - BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` - Overflow bool `json:"overflow"` - - // Deprecated: This parameter is replaced by DetailedMergeStatus in GitLab 15.6. - MergeStatus string `json:"merge_status"` -} - -func (m MergeRequest) String() string { - return Stringify(m) -} - -func (m *MergeRequest) UnmarshalJSON(data []byte) error { - type alias MergeRequest - - raw := make(map[string]interface{}) - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - - labelDetails, ok := raw["labels"].([]interface{}) - if ok && len(labelDetails) > 0 { - // We only want to change anything if we got label details. - if _, ok := labelDetails[0].(map[string]interface{}); !ok { - return json.Unmarshal(data, (*alias)(m)) - } - - labels := make([]interface{}, len(labelDetails)) - for i, details := range labelDetails { - labels[i] = details.(map[string]interface{})["name"] - } - - // Set the correct values - raw["labels"] = labels - raw["label_details"] = labelDetails - - data, err = json.Marshal(raw) - if err != nil { - return err - } - } - - return json.Unmarshal(data, (*alias)(m)) -} - -// MergeRequestDiff represents Gitlab merge request diff. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs -type MergeRequestDiff struct { - OldPath string `json:"old_path"` - NewPath string `json:"new_path"` - AMode string `json:"a_mode"` - BMode string `json:"b_mode"` - Diff string `json:"diff"` - NewFile bool `json:"new_file"` - RenamedFile bool `json:"renamed_file"` - DeletedFile bool `json:"deleted_file"` -} - -// MergeRequestDiffVersion represents Gitlab merge request version. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-diff-versions -type MergeRequestDiffVersion struct { - ID int `json:"id"` - HeadCommitSHA string `json:"head_commit_sha,omitempty"` - BaseCommitSHA string `json:"base_commit_sha,omitempty"` - StartCommitSHA string `json:"start_commit_sha,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - MergeRequestID int `json:"merge_request_id,omitempty"` - State string `json:"state,omitempty"` - RealSize string `json:"real_size,omitempty"` - Commits []*Commit `json:"commits,omitempty"` - Diffs []*Diff `json:"diffs,omitempty"` -} - -func (m MergeRequestDiffVersion) String() string { - return Stringify(m) -} - -// ListMergeRequestsOptions represents the available ListMergeRequests() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-requests -type ListMergeRequestsOptions struct { - ListOptions - Approved *string `url:"approved,omitempty" json:"approved,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - View *string `url:"view,omitempty" json:"view,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - WithMergeStatusRecheck *bool `url:"with_merge_status_recheck,omitempty" json:"with_merge_status_recheck,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - ApproverIDs *ApproverIDsValue `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` - ApprovedByIDs *ApproverIDsValue `url:"approved_by_ids,omitempty" json:"approved_by_ids,omitempty"` - ReviewerID *ReviewerIDValue `url:"reviewer_id,omitempty" json:"reviewer_id,omitempty"` - ReviewerUsername *string `url:"reviewer_username,omitempty" json:"reviewer_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` - TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - Draft *bool `url:"draft,omitempty" json:"draft,omitempty"` - WIP *string `url:"wip,omitempty" json:"wip,omitempty"` -} - -// ListMergeRequests gets all merge requests. The state parameter can be used -// to get only merge requests with a given state (opened, closed, or merged) -// or all of them (all). The pagination parameters page and per_page can be -// used to restrict the list of merge requests. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-requests -func (s *MergeRequestsService) ListMergeRequests(opt *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "merge_requests", opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListProjectMergeRequestsOptions represents the available ListMergeRequests() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-project-merge-requests -type ListProjectMergeRequestsOptions struct { - ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - View *string `url:"view,omitempty" json:"view,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - WithMergeStatusRecheck *bool `url:"with_merge_status_recheck,omitempty" json:"with_merge_status_recheck,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - ApproverIDs *ApproverIDsValue `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` - ApprovedByIDs *ApproverIDsValue `url:"approved_by_ids,omitempty" json:"approved_by_ids,omitempty"` - ReviewerID *ReviewerIDValue `url:"reviewer_id,omitempty" json:"reviewer_id,omitempty"` - ReviewerUsername *string `url:"reviewer_username,omitempty" json:"reviewer_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` - TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - Draft *bool `url:"draft,omitempty" json:"draft,omitempty"` - WIP *string `url:"wip,omitempty" json:"wip,omitempty"` -} - -// ListProjectMergeRequests gets all merge requests for this project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-project-merge-requests -func (s *MergeRequestsService) ListProjectMergeRequests(pid interface{}, opt *ListProjectMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListGroupMergeRequestsOptions represents the available ListGroupMergeRequests() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-group-merge-requests -type ListGroupMergeRequestsOptions struct { - ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - View *string `url:"view,omitempty" json:"view,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *LabelOptions `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelsDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - WithMergeStatusRecheck *bool `url:"with_merge_status_recheck,omitempty" json:"with_merge_status_recheck,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorUsername *string `url:"not[author_username],omitempty" json:"not[author_username],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - ApproverIDs *ApproverIDsValue `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` - ApprovedByIDs *ApproverIDsValue `url:"approved_by_ids,omitempty" json:"approved_by_ids,omitempty"` - ReviewerID *ReviewerIDValue `url:"reviewer_id,omitempty" json:"reviewer_id,omitempty"` - ReviewerUsername *string `url:"reviewer_username,omitempty" json:"reviewer_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` - TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - Draft *bool `url:"draft,omitempty" json:"draft,omitempty"` - WIP *string `url:"wip,omitempty" json:"wip,omitempty"` -} - -// ListGroupMergeRequests gets all merge requests for this group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-group-merge-requests -func (s *MergeRequestsService) ListGroupMergeRequests(gid interface{}, opt *ListGroupMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/merge_requests", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequest - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// GetMergeRequestsOptions represents the available GetMergeRequests() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-mr -type GetMergeRequestsOptions struct { - RenderHTML *bool `url:"render_html,omitempty" json:"render_html,omitempty"` - IncludeDivergedCommitsCount *bool `url:"include_diverged_commits_count,omitempty" json:"include_diverged_commits_count,omitempty"` - IncludeRebaseInProgress *bool `url:"include_rebase_in_progress,omitempty" json:"include_rebase_in_progress,omitempty"` -} - -// GetMergeRequest shows information about a single merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-mr -func (s *MergeRequestsService) GetMergeRequest(pid interface{}, mergeRequest int, opt *GetMergeRequestsOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// GetMergeRequestApprovals gets information about a merge requests approvals -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#merge-request-level-mr-approvals -func (s *MergeRequestsService) GetMergeRequestApprovals(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequestApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - a := new(MergeRequestApprovals) - resp, err := s.client.Do(req, a) - if err != nil { - return nil, resp, err - } - - return a, resp, nil -} - -// GetMergeRequestCommitsOptions represents the available GetMergeRequestCommits() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-commits -type GetMergeRequestCommitsOptions ListOptions - -// GetMergeRequestCommits gets a list of merge request commits. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-commits -func (s *MergeRequestsService) GetMergeRequestCommits(pid interface{}, mergeRequest int, opt *GetMergeRequestCommitsOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/commits", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*Commit - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// GetMergeRequestChangesOptions represents the available GetMergeRequestChanges() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-changes -type GetMergeRequestChangesOptions struct { - AccessRawDiffs *bool `url:"access_raw_diffs,omitempty" json:"access_raw_diffs,omitempty"` - Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` -} - -// GetMergeRequestChanges shows information about the merge request including -// its files and changes. -// -// Deprecated: This endpoint has been replaced by -// MergeRequestsService.ListMergeRequestDiffs() -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-changes -func (s *MergeRequestsService) GetMergeRequestChanges(pid interface{}, mergeRequest int, opt *GetMergeRequestChangesOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/changes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// ListMergeRequestDiffsOptions represents the available ListMergeRequestDiffs() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs -type ListMergeRequestDiffsOptions struct { - ListOptions - Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` -} - -// ListMergeRequestDiffs List diffs of the files changed in a merge request -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs -func (s *MergeRequestsService) ListMergeRequestDiffs(pid interface{}, mergeRequest int, opt *ListMergeRequestDiffsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiff, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/diffs", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*MergeRequestDiff - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// GetMergeRequestParticipants gets a list of merge request participants. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-participants -func (s *MergeRequestsService) GetMergeRequestParticipants(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*BasicUser, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/participants", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var bu []*BasicUser - resp, err := s.client.Do(req, &bu) - if err != nil { - return nil, resp, err - } - - return bu, resp, nil -} - -// MergeRequestReviewer represents a GitLab merge request reviewer. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-reviewers -type MergeRequestReviewer struct { - User *BasicUser `json:"user"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` -} - -// GetMergeRequestReviewers gets a list of merge request reviewers. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-single-merge-request-reviewers -func (s *MergeRequestsService) GetMergeRequestReviewers(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*MergeRequestReviewer, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/reviewers", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mrr []*MergeRequestReviewer - resp, err := s.client.Do(req, &mrr) - if err != nil { - return nil, resp, err - } - - return mrr, resp, nil -} - -// ListMergeRequestPipelines gets all pipelines for the provided merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-pipelines -func (s *MergeRequestsService) ListMergeRequestPipelines(pid interface{}, mergeRequest int, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/pipelines", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var p []*PipelineInfo - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CreateMergeRequestPipeline creates a new pipeline for a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#create-merge-request-pipeline -func (s *MergeRequestsService) CreateMergeRequestPipeline(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*PipelineInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/pipelines", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineInfo) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetIssuesClosedOnMergeOptions represents the available GetIssuesClosedOnMerge() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-issues-that-close-on-merge -type GetIssuesClosedOnMergeOptions ListOptions - -// GetIssuesClosedOnMerge gets all the issues that would be closed by merging the -// provided merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#list-issues-that-close-on-merge -func (s *MergeRequestsService) GetIssuesClosedOnMerge(pid interface{}, mergeRequest int, opt *GetIssuesClosedOnMergeOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/closes_issues", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// CreateMergeRequestOptions represents the available CreateMergeRequest() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#create-mr -type CreateMergeRequestOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"` - TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` - TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` - Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` - AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` -} - -// CreateMergeRequest creates a new merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#create-mr -func (s *MergeRequestsService) CreateMergeRequest(pid interface{}, opt *CreateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UpdateMergeRequestOptions represents the available UpdateMergeRequest() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#update-mr -type UpdateMergeRequestOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"` - AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - AssigneeIDs *[]int `url:"assignee_ids,omitempty" json:"assignee_ids,omitempty"` - ReviewerIDs *[]int `url:"reviewer_ids,omitempty" json:"reviewer_ids,omitempty"` - Labels *LabelOptions `url:"labels,comma,omitempty" json:"labels,omitempty"` - AddLabels *LabelOptions `url:"add_labels,comma,omitempty" json:"add_labels,omitempty"` - RemoveLabels *LabelOptions `url:"remove_labels,comma,omitempty" json:"remove_labels,omitempty"` - MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` - StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` - RemoveSourceBranch *bool `url:"remove_source_branch,omitempty" json:"remove_source_branch,omitempty"` - Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` - DiscussionLocked *bool `url:"discussion_locked,omitempty" json:"discussion_locked,omitempty"` - AllowCollaboration *bool `url:"allow_collaboration,omitempty" json:"allow_collaboration,omitempty"` -} - -// UpdateMergeRequest updates an existing project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#update-mr -func (s *MergeRequestsService) UpdateMergeRequest(pid interface{}, mergeRequest int, opt *UpdateMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteMergeRequest deletes a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#delete-a-merge-request -func (s *MergeRequestsService) DeleteMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// AcceptMergeRequestOptions represents the available AcceptMergeRequest() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#merge-a-merge-request -type AcceptMergeRequestOptions struct { - MergeCommitMessage *string `url:"merge_commit_message,omitempty" json:"merge_commit_message,omitempty"` - SquashCommitMessage *string `url:"squash_commit_message,omitempty" json:"squash_commit_message,omitempty"` - Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` - ShouldRemoveSourceBranch *bool `url:"should_remove_source_branch,omitempty" json:"should_remove_source_branch,omitempty"` - MergeWhenPipelineSucceeds *bool `url:"merge_when_pipeline_succeeds,omitempty" json:"merge_when_pipeline_succeeds,omitempty"` - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` -} - -// AcceptMergeRequest merges changes submitted with MR using this API. If merge -// success you get 200 OK. If it has some conflicts and can not be merged - you -// get 405 and error message 'Branch cannot be merged'. If merge request is -// already merged or closed - you get 405 and error message 'Method Not Allowed' -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#merge-a-merge-request -func (s *MergeRequestsService) AcceptMergeRequest(pid interface{}, mergeRequest int, opt *AcceptMergeRequestOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/merge", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CancelMergeWhenPipelineSucceeds cancels a merge when pipeline succeeds. If -// you don't have permissions to accept this merge request - you'll get a 401. -// If the merge request is already merged or closed - you get 405 and error -// message 'Method Not Allowed'. In case the merge request is not set to be -// merged when the pipeline succeeds, you'll also get a 406 error. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#cancel-merge-when-pipeline-succeeds -func (s *MergeRequestsService) CancelMergeWhenPipelineSucceeds(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/cancel_merge_when_pipeline_succeeds", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// RebaseMergeRequestOptions represents the available RebaseMergeRequest() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#rebase-a-merge-request -type RebaseMergeRequestOptions struct { - SkipCI *bool `url:"skip_ci,omitempty" json:"skip_ci,omitempty"` -} - -// RebaseMergeRequest automatically rebases the source_branch of the merge -// request against its target_branch. If you don’t have permissions to push -// to the merge request’s source branch, you’ll get a 403 Forbidden response. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#rebase-a-merge-request -func (s *MergeRequestsService) RebaseMergeRequest(pid interface{}, mergeRequest int, opt *RebaseMergeRequestOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/rebase", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GetMergeRequestDiffVersionsOptions represents the available -// GetMergeRequestDiffVersions() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-diff-versions -type GetMergeRequestDiffVersionsOptions ListOptions - -// GetMergeRequestDiffVersions get a list of merge request diff versions. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-merge-request-diff-versions -func (s *MergeRequestsService) GetMergeRequestDiffVersions(pid interface{}, mergeRequest int, opt *GetMergeRequestDiffVersionsOptions, options ...RequestOptionFunc) ([]*MergeRequestDiffVersion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/versions", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var v []*MergeRequestDiffVersion - resp, err := s.client.Do(req, &v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// GetSingleMergeRequestDiffVersionOptions represents the available -// GetSingleMergeRequestDiffVersion() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-a-single-merge-request-diff-version -type GetSingleMergeRequestDiffVersionOptions struct { - Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` -} - -// GetSingleMergeRequestDiffVersion get a single MR diff version -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-a-single-merge-request-diff-version -func (s *MergeRequestsService) GetSingleMergeRequestDiffVersion(pid interface{}, mergeRequest, version int, opt *GetSingleMergeRequestDiffVersionOptions, options ...RequestOptionFunc) (*MergeRequestDiffVersion, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/versions/%d", PathEscape(project), mergeRequest, version) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(MergeRequestDiffVersion) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// SubscribeToMergeRequest subscribes the authenticated user to the given merge -// request to receive notifications. If the user is already subscribed to the -// merge request, the status code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#subscribe-to-a-merge-request -func (s *MergeRequestsService) SubscribeToMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/subscribe", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UnsubscribeFromMergeRequest unsubscribes the authenticated user from the -// given merge request to not receive notifications from that merge request. -// If the user is not subscribed to the merge request, status code 304 is -// returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#unsubscribe-from-a-merge-request -func (s *MergeRequestsService) UnsubscribeFromMergeRequest(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/unsubscribe", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(MergeRequest) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateTodo manually creates a todo for the current user on a merge request. -// If there already exists a todo for the user on that merge request, -// status code 304 is returned. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#create-a-to-do-item -func (s *MergeRequestsService) CreateTodo(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*Todo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/todo", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(Todo) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// SetTimeEstimate sets the time estimate for a single project merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#set-a-time-estimate-for-a-merge-request -func (s *MergeRequestsService) SetTimeEstimate(pid interface{}, mergeRequest int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.setTimeEstimate(pid, "merge_requests", mergeRequest, opt, options...) -} - -// ResetTimeEstimate resets the time estimate for a single project merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#reset-the-time-estimate-for-a-merge-request -func (s *MergeRequestsService) ResetTimeEstimate(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.resetTimeEstimate(pid, "merge_requests", mergeRequest, options...) -} - -// AddSpentTime adds spent time for a single project merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#add-spent-time-for-a-merge-request -func (s *MergeRequestsService) AddSpentTime(pid interface{}, mergeRequest int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.addSpentTime(pid, "merge_requests", mergeRequest, opt, options...) -} - -// ResetSpentTime resets the spent time for a single project merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#reset-spent-time-for-a-merge-request -func (s *MergeRequestsService) ResetSpentTime(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.resetSpentTime(pid, "merge_requests", mergeRequest, options...) -} - -// GetTimeSpent gets the spent time for a single project merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_requests.html#get-time-tracking-stats -func (s *MergeRequestsService) GetTimeSpent(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - return s.timeStats.getTimeSpent(pid, "merge_requests", mergeRequest, options...) -} diff --git a/vendor/github.com/xanzy/go-gitlab/merge_trains.go b/vendor/github.com/xanzy/go-gitlab/merge_trains.go deleted file mode 100644 index e55917fa0c..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/merge_trains.go +++ /dev/null @@ -1,170 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// MergeTrainsService handles communication with the merge trains related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_trains.html -type MergeTrainsService struct { - client *Client -} - -// MergeTrain represents a Gitlab merge train. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_trains.html -type MergeTrain struct { - ID int `json:"id"` - MergeRequest *MergeTrainMergeRequest `json:"merge_request"` - User *BasicUser `json:"user"` - Pipeline *Pipeline `json:"pipeline"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - TargetBranch string `json:"target_branch"` - Status string `json:"status"` - MergedAt *time.Time `json:"merged_at"` - Duration int `json:"duration"` -} - -// MergeTrainMergeRequest represents a Gitlab merge request inside merge train. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_trains.html -type MergeTrainMergeRequest struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` - Title string `json:"title"` - Description string `json:"description"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - WebURL string `json:"web_url"` -} - -// ListMergeTrainsOptions represents the available ListMergeTrain() options. -// -// Gitab API docs: -// https://docs.gitlab.com/ee/api/merge_trains.html#list-merge-trains-for-a-project -type ListMergeTrainsOptions struct { - ListOptions - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListProjectMergeTrains get a list of merge trains in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_trains.html#list-merge-trains-for-a-project -func (s *MergeTrainsService) ListProjectMergeTrains(pid interface{}, opt *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mts []*MergeTrain - resp, err := s.client.Do(req, &mts) - if err != nil { - return nil, resp, err - } - - return mts, resp, nil -} - -// ListMergeRequestInMergeTrain gets a list of merge requests added to a merge -// train for the requested target branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_trains.html#list-merge-requests-in-a-merge-train -func (s *MergeTrainsService) ListMergeRequestInMergeTrain(pid interface{}, targetBranch string, opts *ListMergeTrainsOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains/%s", PathEscape(project), targetBranch) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mts []*MergeTrain - resp, err := s.client.Do(req, &mts) - if err != nil { - return nil, resp, err - } - - return mts, resp, nil -} - -// GetMergeRequestOnAMergeTrain Get merge train information for the requested -// merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_trains.html#get-the-status-of-a-merge-request-on-a-merge-train -func (s *MergeTrainsService) GetMergeRequestOnAMergeTrain(pid interface{}, mergeRequest int, options ...RequestOptionFunc) (*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - mt := new(MergeTrain) - resp, err := s.client.Do(req, mt) - if err != nil { - return nil, resp, err - } - - return mt, resp, nil -} - -// AddMergeRequestToMergeTrainOptions represents the available -// AddMergeRequestToMergeTrain() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_trains.html#add-a-merge-request-to-a-merge-train -type AddMergeRequestToMergeTrainOptions struct { - WhenPipelineSucceeds *bool `url:"when_pipeline_succeeds,omitempty" json:"when_pipeline_succeeds,omitempty"` - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` - Squash *bool `url:"squash,omitempty" json:"squash,omitempty"` -} - -// AddMergeRequestToMergeTrain Add a merge request to the merge train targeting -// the merge request’s target branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_trains.html#add-a-merge-request-to-a-merge-train -func (s *MergeTrainsService) AddMergeRequestToMergeTrain(pid interface{}, mergeRequest int, opts *AddMergeRequestToMergeTrainOptions, options ...RequestOptionFunc) ([]*MergeTrain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_trains/merge_requests/%d", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - var mts []*MergeTrain - resp, err := s.client.Do(req, &mts) - if err != nil { - return nil, resp, err - } - - return mts, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/metadata.go b/vendor/github.com/xanzy/go-gitlab/metadata.go deleted file mode 100644 index db23a81e46..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/metadata.go +++ /dev/null @@ -1,63 +0,0 @@ -// -// Copyright 2022, Timo Furrer -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import "net/http" - -// MetadataService handles communication with the GitLab server instance to -// retrieve its metadata information via the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/metadata.html -type MetadataService struct { - client *Client -} - -// Metadata represents a GitLab instance version. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/metadata.html -type Metadata struct { - Version string `json:"version"` - Revision string `json:"revision"` - KAS struct { - Enabled bool `json:"enabled"` - ExternalURL string `json:"externalUrl"` - Version string `json:"version"` - } `json:"kas"` - Enterprise bool `json:"enterprise"` -} - -func (s Metadata) String() string { - return Stringify(s) -} - -// GetMetadata gets a GitLab server instance meteadata. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/metadata.html -func (s *MetadataService) GetMetadata(options ...RequestOptionFunc) (*Metadata, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "metadata", nil, options) - if err != nil { - return nil, nil, err - } - - v := new(Metadata) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/milestones.go b/vendor/github.com/xanzy/go-gitlab/milestones.go deleted file mode 100644 index 17c97e031a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/milestones.go +++ /dev/null @@ -1,272 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// MilestonesService handles communication with the milestone related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/milestones.html -type MilestonesService struct { - client *Client -} - -// Milestone represents a GitLab milestone. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/milestones.html -type Milestone struct { - ID int `json:"id"` - IID int `json:"iid"` - GroupID int `json:"group_id"` - ProjectID int `json:"project_id"` - Title string `json:"title"` - Description string `json:"description"` - StartDate *ISOTime `json:"start_date"` - DueDate *ISOTime `json:"due_date"` - State string `json:"state"` - WebURL string `json:"web_url"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` - Expired *bool `json:"expired"` -} - -func (m Milestone) String() string { - return Stringify(m) -} - -// ListMilestonesOptions represents the available ListMilestones() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#list-project-milestones -type ListMilestonesOptions struct { - ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - IncludeParentMilestones *bool `url:"include_parent_milestones,omitempty" json:"include_parent_milestones,omitempty"` -} - -// ListMilestones returns a list of project milestones. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#list-project-milestones -func (s *MilestonesService) ListMilestones(pid interface{}, opt *ListMilestonesOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*Milestone - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// GetMilestone gets a single project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#get-single-milestone -func (s *MilestonesService) GetMilestone(pid interface{}, milestone int, options ...RequestOptionFunc) (*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// CreateMilestoneOptions represents the available CreateMilestone() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#create-new-milestone -type CreateMilestoneOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` - DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` -} - -// CreateMilestone creates a new project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#create-new-milestone -func (s *MilestonesService) CreateMilestone(pid interface{}, opt *CreateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// UpdateMilestoneOptions represents the available UpdateMilestone() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#edit-milestone -type UpdateMilestoneOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - StartDate *ISOTime `url:"start_date,omitempty" json:"start_date,omitempty"` - DueDate *ISOTime `url:"due_date,omitempty" json:"due_date,omitempty"` - StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"` -} - -// UpdateMilestone updates an existing project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#edit-milestone -func (s *MilestonesService) UpdateMilestone(pid interface{}, milestone int, opt *UpdateMilestoneOptions, options ...RequestOptionFunc) (*Milestone, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - m := new(Milestone) - resp, err := s.client.Do(req, m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DeleteMilestone deletes a specified project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#delete-project-milestone -func (s *MilestonesService) DeleteMilestone(pid interface{}, milestone int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// GetMilestoneIssuesOptions represents the available GetMilestoneIssues() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#get-all-issues-assigned-to-a-single-milestone -type GetMilestoneIssuesOptions ListOptions - -// GetMilestoneIssues gets all issues assigned to a single project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#get-all-issues-assigned-to-a-single-milestone -func (s *MilestonesService) GetMilestoneIssues(pid interface{}, milestone int, opt *GetMilestoneIssuesOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d/issues", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var i []*Issue - resp, err := s.client.Do(req, &i) - if err != nil { - return nil, resp, err - } - - return i, resp, nil -} - -// GetMilestoneMergeRequestsOptions represents the available -// GetMilestoneMergeRequests() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#get-all-merge-requests-assigned-to-a-single-milestone -type GetMilestoneMergeRequestsOptions ListOptions - -// GetMilestoneMergeRequests gets all merge requests assigned to a single -// project milestone. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/milestones.html#get-all-merge-requests-assigned-to-a-single-milestone -func (s *MilestonesService) GetMilestoneMergeRequests(pid interface{}, milestone int, opt *GetMilestoneMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/milestones/%d/merge_requests", PathEscape(project), milestone) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mr []*MergeRequest - resp, err := s.client.Do(req, &mr) - if err != nil { - return nil, resp, err - } - - return mr, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/namespaces.go b/vendor/github.com/xanzy/go-gitlab/namespaces.go deleted file mode 100644 index da82a0c588..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/namespaces.go +++ /dev/null @@ -1,174 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// NamespacesService handles communication with the namespace related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html -type NamespacesService struct { - client *Client -} - -// Namespace represents a GitLab namespace. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html -type Namespace struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Kind string `json:"kind"` - FullPath string `json:"full_path"` - ParentID int `json:"parent_id"` - AvatarURL *string `json:"avatar_url"` - WebURL string `json:"web_url"` - MembersCountWithDescendants int `json:"members_count_with_descendants"` - BillableMembersCount int `json:"billable_members_count"` - Plan string `json:"plan"` - TrialEndsOn *ISOTime `json:"trial_ends_on"` - Trial bool `json:"trial"` - MaxSeatsUsed *int `json:"max_seats_used"` - SeatsInUse *int `json:"seats_in_use"` -} - -func (n Namespace) String() string { - return Stringify(n) -} - -// ListNamespacesOptions represents the available ListNamespaces() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces -type ListNamespacesOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` - OwnedOnly *bool `url:"owned_only,omitempty" json:"owned_only,omitempty"` -} - -// ListNamespaces gets a list of projects accessible by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces -func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options ...RequestOptionFunc) ([]*Namespace, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "namespaces", opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Namespace - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// SearchNamespace gets all namespaces that match your string in their name -// or path. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/namespaces.html#list-namespaces -func (s *NamespacesService) SearchNamespace(query string, options ...RequestOptionFunc) ([]*Namespace, *Response, error) { - var q struct { - Search string `url:"search,omitempty" json:"search,omitempty"` - } - q.Search = query - - req, err := s.client.NewRequest(http.MethodGet, "namespaces", &q, options) - if err != nil { - return nil, nil, err - } - - var n []*Namespace - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// GetNamespace gets a namespace by id. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/namespaces.html#get-namespace-by-id -func (s *NamespacesService) GetNamespace(id interface{}, options ...RequestOptionFunc) (*Namespace, *Response, error) { - namespace, err := parseID(id) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("namespaces/%s", PathEscape(namespace)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Namespace) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// NamespaceExistance represents a namespace exists result. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/namespaces.html#get-existence-of-a-namespace -type NamespaceExistance struct { - Exists bool `json:"exists"` - Suggests []string `json:"suggests"` -} - -// NamespaceExistsOptions represents the available NamespaceExists() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/namespaces.html#get-existence-of-a-namespace -type NamespaceExistsOptions struct { - ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"` -} - -// NamespaceExists checks the existence of a namespace. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/namespaces.html#get-existence-of-a-namespace -func (s *NamespacesService) NamespaceExists(id interface{}, opt *NamespaceExistsOptions, options ...RequestOptionFunc) (*NamespaceExistance, *Response, error) { - namespace, err := parseID(id) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("namespaces/%s/exists", namespace) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(NamespaceExistance) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/notes.go b/vendor/github.com/xanzy/go-gitlab/notes.go deleted file mode 100644 index 0c57ae2294..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/notes.go +++ /dev/null @@ -1,696 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// NotesService handles communication with the notes related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/notes.html -type NotesService struct { - client *Client -} - -// Note represents a GitLab note. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/notes.html -type Note struct { - ID int `json:"id"` - Type NoteTypeValue `json:"type"` - Body string `json:"body"` - Attachment string `json:"attachment"` - Title string `json:"title"` - FileName string `json:"file_name"` - Author struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - System bool `json:"system"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - ExpiresAt *time.Time `json:"expires_at"` - CommitID string `json:"commit_id"` - Position *NotePosition `json:"position"` - NoteableID int `json:"noteable_id"` - NoteableType string `json:"noteable_type"` - ProjectID int `json:"project_id"` - NoteableIID int `json:"noteable_iid"` - Resolvable bool `json:"resolvable"` - Resolved bool `json:"resolved"` - ResolvedAt *time.Time `json:"resolved_at"` - ResolvedBy struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"resolved_by"` - Confidential bool `json:"confidential"` - Internal bool `json:"internal"` -} - -// NotePosition represents the position attributes of a note. -type NotePosition struct { - BaseSHA string `json:"base_sha"` - StartSHA string `json:"start_sha"` - HeadSHA string `json:"head_sha"` - PositionType string `json:"position_type"` - NewPath string `json:"new_path,omitempty"` - NewLine int `json:"new_line,omitempty"` - OldPath string `json:"old_path,omitempty"` - OldLine int `json:"old_line,omitempty"` - LineRange *LineRange `json:"line_range,omitempty"` -} - -// LineRange represents the range of a note. -type LineRange struct { - StartRange *LinePosition `json:"start"` - EndRange *LinePosition `json:"end"` -} - -// LinePosition represents a position in a line range. -type LinePosition struct { - LineCode string `json:"line_code"` - Type string `json:"type"` - OldLine int `json:"old_line"` - NewLine int `json:"new_line"` -} - -func (n Note) String() string { - return Stringify(n) -} - -// ListIssueNotesOptions represents the available ListIssueNotes() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-project-issue-notes -type ListIssueNotesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListIssueNotes gets a list of all notes for a single issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-project-issue-notes -func (s *NotesService) ListIssueNotes(pid interface{}, issue int, opt *ListIssueNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// GetIssueNote returns a single note for a specific project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#get-single-issue-note -func (s *NotesService) GetIssueNote(pid interface{}, issue, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// CreateIssueNoteOptions represents the available CreateIssueNote() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-issue-note -type CreateIssueNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` - CreatedAt *time.Time `url:"created_at,omitempty" json:"created_at,omitempty"` -} - -// CreateIssueNote creates a new note to a single project issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-issue-note -func (s *NotesService) CreateIssueNote(pid interface{}, issue int, opt *CreateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateIssueNoteOptions represents the available UpdateIssueNote() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-issue-note -type UpdateIssueNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// UpdateIssueNote modifies existing note of an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-issue-note -func (s *NotesService) UpdateIssueNote(pid interface{}, issue, note int, opt *UpdateIssueNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteIssueNote deletes an existing note of an issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#delete-an-issue-note -func (s *NotesService) DeleteIssueNote(pid interface{}, issue, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", PathEscape(project), issue, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListSnippetNotesOptions represents the available ListSnippetNotes() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-all-snippet-notes -type ListSnippetNotesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListSnippetNotes gets a list of all notes for a single snippet. Snippet -// notes are comments users can post to a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-all-snippet-notes -func (s *NotesService) ListSnippetNotes(pid interface{}, snippet int, opt *ListSnippetNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// GetSnippetNote returns a single note for a given snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#get-single-snippet-note -func (s *NotesService) GetSnippetNote(pid interface{}, snippet, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// CreateSnippetNoteOptions represents the available CreateSnippetNote() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-snippet-note -type CreateSnippetNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// CreateSnippetNote creates a new note for a single snippet. Snippet notes are -// comments users can post to a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-snippet-note -func (s *NotesService) CreateSnippetNote(pid interface{}, snippet int, opt *CreateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateSnippetNoteOptions represents the available UpdateSnippetNote() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-snippet-note -type UpdateSnippetNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// UpdateSnippetNote modifies existing note of a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-snippet-note -func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet, note int, opt *UpdateSnippetNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteSnippetNote deletes an existing note of a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#delete-a-snippet-note -func (s *NotesService) DeleteSnippetNote(pid interface{}, snippet, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", PathEscape(project), snippet, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListMergeRequestNotesOptions represents the available ListMergeRequestNotes() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-all-merge-request-notes -type ListMergeRequestNotesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListMergeRequestNotes gets a list of all notes for a single merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-all-merge-request-notes -func (s *NotesService) ListMergeRequestNotes(pid interface{}, mergeRequest int, opt *ListMergeRequestNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// GetMergeRequestNote returns a single note for a given merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#get-single-merge-request-note -func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// CreateMergeRequestNoteOptions represents the available -// CreateMergeRequestNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-merge-request-note -type CreateMergeRequestNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// CreateMergeRequestNote creates a new note for a single merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-merge-request-note -func (s *NotesService) CreateMergeRequestNote(pid interface{}, mergeRequest int, opt *CreateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/notes", PathEscape(project), mergeRequest) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateMergeRequestNoteOptions represents the available -// UpdateMergeRequestNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-merge-request-note -type UpdateMergeRequestNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// UpdateMergeRequestNote modifies existing note of a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-merge-request-note -func (s *NotesService) UpdateMergeRequestNote(pid interface{}, mergeRequest, note int, opt *UpdateMergeRequestNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteMergeRequestNote deletes an existing note of a merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#delete-a-merge-request-note -func (s *NotesService) DeleteMergeRequestNote(pid interface{}, mergeRequest, note int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf( - "projects/%s/merge_requests/%d/notes/%d", PathEscape(project), mergeRequest, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListEpicNotesOptions represents the available ListEpicNotes() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-all-epic-notes -type ListEpicNotesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListEpicNotes gets a list of all notes for a single epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#list-all-epic-notes -func (s *NotesService) ListEpicNotes(gid interface{}, epic int, opt *ListEpicNotesOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var n []*Note - resp, err := s.client.Do(req, &n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// GetEpicNote returns a single note for an epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#get-single-epic-note -func (s *NotesService) GetEpicNote(gid interface{}, epic, note int, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// CreateEpicNoteOptions represents the available CreateEpicNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-epic-note -type CreateEpicNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// CreateEpicNote creates a new note for a single merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#create-new-epic-note -func (s *NotesService) CreateEpicNote(gid interface{}, epic int, opt *CreateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// UpdateEpicNoteOptions represents the available UpdateEpicNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-epic-note -type UpdateEpicNoteOptions struct { - Body *string `url:"body,omitempty" json:"body,omitempty"` -} - -// UpdateEpicNote modifies existing note of an epic. -// -// https://docs.gitlab.com/ee/api/notes.html#modify-existing-epic-note -func (s *NotesService) UpdateEpicNote(gid interface{}, epic, note int, opt *UpdateEpicNoteOptions, options ...RequestOptionFunc) (*Note, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - n := new(Note) - resp, err := s.client.Do(req, n) - if err != nil { - return nil, resp, err - } - - return n, resp, nil -} - -// DeleteEpicNote deletes an existing note of a merge request. -// -// https://docs.gitlab.com/ee/api/notes.html#delete-an-epic-note -func (s *NotesService) DeleteEpicNote(gid interface{}, epic, note int, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/notes/%d", PathEscape(group), epic, note) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/notifications.go b/vendor/github.com/xanzy/go-gitlab/notifications.go deleted file mode 100644 index 92c2cb189a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/notifications.go +++ /dev/null @@ -1,242 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "errors" - "fmt" - "net/http" -) - -// NotificationSettingsService handles communication with the notification settings -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/notification_settings.html -type NotificationSettingsService struct { - client *Client -} - -// NotificationSettings represents the Gitlab notification setting. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#valid-notification-levels -type NotificationSettings struct { - Level NotificationLevelValue `json:"level"` - NotificationEmail string `json:"notification_email"` - Events *NotificationEvents `json:"events"` -} - -// NotificationEvents represents the available notification setting events. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#valid-notification-levels -type NotificationEvents struct { - CloseIssue bool `json:"close_issue"` - CloseMergeRequest bool `json:"close_merge_request"` - FailedPipeline bool `json:"failed_pipeline"` - FixedPipeline bool `json:"fixed_pipeline"` - IssueDue bool `json:"issue_due"` - MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` - MergeMergeRequest bool `json:"merge_merge_request"` - MovedProject bool `json:"moved_project"` - NewIssue bool `json:"new_issue"` - NewMergeRequest bool `json:"new_merge_request"` - NewEpic bool `json:"new_epic"` - NewNote bool `json:"new_note"` - PushToMergeRequest bool `json:"push_to_merge_request"` - ReassignIssue bool `json:"reassign_issue"` - ReassignMergeRequest bool `json:"reassign_merge_request"` - ReopenIssue bool `json:"reopen_issue"` - ReopenMergeRequest bool `json:"reopen_merge_request"` - SuccessPipeline bool `json:"success_pipeline"` -} - -func (ns NotificationSettings) String() string { - return Stringify(ns) -} - -// GetGlobalSettings returns current notification settings and email address. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#global-notification-settings -func (s *NotificationSettingsService) GetGlobalSettings(options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - u := "notification_settings" - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil -} - -// NotificationSettingsOptions represents the available options that can be passed -// to the API when updating the notification settings. -type NotificationSettingsOptions struct { - Level *NotificationLevelValue `url:"level,omitempty" json:"level,omitempty"` - NotificationEmail *string `url:"notification_email,omitempty" json:"notification_email,omitempty"` - CloseIssue *bool `url:"close_issue,omitempty" json:"close_issue,omitempty"` - CloseMergeRequest *bool `url:"close_merge_request,omitempty" json:"close_merge_request,omitempty"` - FailedPipeline *bool `url:"failed_pipeline,omitempty" json:"failed_pipeline,omitempty"` - FixedPipeline *bool `url:"fixed_pipeline,omitempty" json:"fixed_pipeline,omitempty"` - IssueDue *bool `url:"issue_due,omitempty" json:"issue_due,omitempty"` - MergeMergeRequest *bool `url:"merge_merge_request,omitempty" json:"merge_merge_request,omitempty"` - MergeWhenPipelineSucceeds *bool `url:"merge_when_pipeline_succeeds,omitempty" json:"merge_when_pipeline_succeeds,omitempty"` - MovedProject *bool `url:"moved_project,omitempty" json:"moved_project,omitempty"` - NewEpic *bool `url:"new_epic,omitempty" json:"new_epic,omitempty"` - NewIssue *bool `url:"new_issue,omitempty" json:"new_issue,omitempty"` - NewMergeRequest *bool `url:"new_merge_request,omitempty" json:"new_merge_request,omitempty"` - NewNote *bool `url:"new_note,omitempty" json:"new_note,omitempty"` - PushToMergeRequest *bool `url:"push_to_merge_request,omitempty" json:"push_to_merge_request,omitempty"` - ReassignIssue *bool `url:"reassign_issue,omitempty" json:"reassign_issue,omitempty"` - ReassignMergeRequest *bool `url:"reassign_merge_request,omitempty" json:"reassign_merge_request,omitempty"` - ReopenIssue *bool `url:"reopen_issue,omitempty" json:"reopen_issue,omitempty"` - ReopenMergeRequest *bool `url:"reopen_merge_request,omitempty" json:"reopen_merge_request,omitempty"` - SuccessPipeline *bool `url:"success_pipeline,omitempty" json:"success_pipeline,omitempty"` -} - -// UpdateGlobalSettings updates current notification settings and email address. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#update-global-notification-settings -func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - if opt.Level != nil && *opt.Level == GlobalNotificationLevel { - return nil, nil, errors.New( - "notification level 'global' is not valid for global notification settings") - } - - u := "notification_settings" - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil -} - -// GetSettingsForGroup returns current group notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#group--project-level-notification-settings -func (s *NotificationSettingsService) GetSettingsForGroup(gid interface{}, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/notification_settings", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil -} - -// GetSettingsForProject returns current project notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#group--project-level-notification-settings -func (s *NotificationSettingsService) GetSettingsForProject(pid interface{}, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/notification_settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil -} - -// UpdateSettingsForGroup updates current group notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#update-groupproject-level-notification-settings -func (s *NotificationSettingsService) UpdateSettingsForGroup(gid interface{}, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/notification_settings", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil -} - -// UpdateSettingsForProject updates current project notification settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/notification_settings.html#update-groupproject-level-notification-settings -func (s *NotificationSettingsService) UpdateSettingsForProject(pid interface{}, opt *NotificationSettingsOptions, options ...RequestOptionFunc) (*NotificationSettings, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/notification_settings", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ns := new(NotificationSettings) - resp, err := s.client.Do(req, ns) - if err != nil { - return nil, resp, err - } - - return ns, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/packages.go b/vendor/github.com/xanzy/go-gitlab/packages.go deleted file mode 100644 index a6b252ed2e..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/packages.go +++ /dev/null @@ -1,261 +0,0 @@ -// -// Copyright 2021, Kordian Bruck -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// PackagesService handles communication with the packages related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html -type PackagesService struct { - client *Client -} - -// Package represents a GitLab package. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html -type Package struct { - ID int `json:"id"` - Name string `json:"name"` - Version string `json:"version"` - PackageType string `json:"package_type"` - Status string `json:"status"` - Links *PackageLinks `json:"_links"` - CreatedAt *time.Time `json:"created_at"` - LastDownloadedAt *time.Time `json:"last_downloaded_at"` - Tags []PackageTag `json:"tags"` -} - -func (s Package) String() string { - return Stringify(s) -} - -// GroupPackage represents a GitLab group package. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html -type GroupPackage struct { - Package - ProjectID int `json:"project_id"` - ProjectPath string `json:"project_path"` -} - -func (s GroupPackage) String() string { - return Stringify(s) -} - -// PackageLinks holds links for itself and deleting. -type PackageLinks struct { - WebPath string `json:"web_path"` - DeleteAPIPath string `json:"delete_api_path"` -} - -func (s PackageLinks) String() string { - return Stringify(s) -} - -// PackageTag holds label information about the package -type PackageTag struct { - ID int `json:"id"` - PackageID int `json:"package_id"` - Name string `json:"name"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` -} - -func (s PackageTag) String() string { - return Stringify(s) -} - -// PackageFile represents one file contained within a package. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/packages.html -type PackageFile struct { - ID int `json:"id"` - PackageID int `json:"package_id"` - CreatedAt *time.Time `json:"created_at"` - FileName string `json:"file_name"` - Size int `json:"size"` - FileMD5 string `json:"file_md5"` - FileSHA1 string `json:"file_sha1"` - FileSHA256 string `json:"file_sha256"` - Pipeline *[]Pipeline `json:"pipelines"` -} - -func (s PackageFile) String() string { - return Stringify(s) -} - -// ListProjectPackagesOptions represents the available ListProjectPackages() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#within-a-project -type ListProjectPackagesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - PackageType *string `url:"package_type,omitempty" json:"package_type,omitempty"` - PackageName *string `url:"package_name,omitempty" json:"package_name,omitempty"` - PackageVersion *string `url:"package_version,omitempty" json:"package_version,omitempty"` - IncludeVersionless *bool `url:"include_versionless,omitempty" json:"include_versionless,omitempty"` - Status *string `url:"status,omitempty" json:"status,omitempty"` -} - -// ListProjectPackages gets a list of packages in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#within-a-project -func (s *PackagesService) ListProjectPackages(pid interface{}, opt *ListProjectPackagesOptions, options ...RequestOptionFunc) ([]*Package, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/packages", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Package - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// ListGroupPackagesOptions represents the available ListGroupPackages() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#within-a-group -type ListGroupPackagesOptions struct { - ListOptions - ExcludeSubGroups *bool `url:"exclude_subgroups,omitempty" json:"exclude_subgroups,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - PackageType *string `url:"package_type,omitempty" json:"package_type,omitempty"` - PackageName *string `url:"package_name,omitempty" json:"package_name,omitempty"` - IncludeVersionless *bool `url:"include_versionless,omitempty" json:"include_versionless,omitempty"` - Status *string `url:"status,omitempty" json:"status,omitempty"` -} - -// ListGroupPackages gets a list of packages in a group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#within-a-group -func (s *PackagesService) ListGroupPackages(gid interface{}, opt *ListGroupPackagesOptions, options ...RequestOptionFunc) ([]*GroupPackage, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/packages", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*GroupPackage - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// ListPackageFilesOptions represents the available ListPackageFiles() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#list-package-files -type ListPackageFilesOptions ListOptions - -// ListPackageFiles gets a list of files that are within a package -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#list-package-files -func (s *PackagesService) ListPackageFiles(pid interface{}, pkg int, opt *ListPackageFilesOptions, options ...RequestOptionFunc) ([]*PackageFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/packages/%d/package_files", - PathEscape(project), - pkg, - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pfs []*PackageFile - resp, err := s.client.Do(req, &pfs) - if err != nil { - return nil, resp, err - } - - return pfs, resp, nil -} - -// DeleteProjectPackage deletes a package in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#delete-a-project-package -func (s *PackagesService) DeleteProjectPackage(pid interface{}, pkg int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/packages/%d", PathEscape(project), pkg) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeletePackageFile deletes a file in project package -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/packages.html#delete-a-package-file -func (s *PackagesService) DeletePackageFile(pid interface{}, pkg, file int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/packages/%d/package_files/%d", PathEscape(project), pkg, file) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/pages.go b/vendor/github.com/xanzy/go-gitlab/pages.go deleted file mode 100644 index 617b0ba4b9..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/pages.go +++ /dev/null @@ -1,45 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -type PagesService struct { - client *Client -} - -// UnpublishPages unpublished pages. The user must have admin privileges. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages.html#unpublish-pages -func (s *PagesService) UnpublishPages(gid interface{}, options ...RequestOptionFunc) (*Response, error) { - page, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pages", PathEscape(page)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/pages_domains.go b/vendor/github.com/xanzy/go-gitlab/pages_domains.go deleted file mode 100644 index 07d985da76..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/pages_domains.go +++ /dev/null @@ -1,216 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// PagesDomainsService handles communication with the pages domains -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pages_domains.html -type PagesDomainsService struct { - client *Client -} - -// PagesDomain represents a pages domain. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pages_domains.html -type PagesDomain struct { - Domain string `json:"domain"` - AutoSslEnabled bool `json:"auto_ssl_enabled"` - URL string `json:"url"` - ProjectID int `json:"project_id"` - Verified bool `json:"verified"` - VerificationCode string `json:"verification_code"` - EnabledUntil *time.Time `json:"enabled_until"` - Certificate struct { - Subject string `json:"subject"` - Expired bool `json:"expired"` - Expiration *time.Time `json:"expiration"` - Certificate string `json:"certificate"` - CertificateText string `json:"certificate_text"` - } `json:"certificate"` -} - -// ListPagesDomainsOptions represents the available ListPagesDomains() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#list-pages-domains -type ListPagesDomainsOptions ListOptions - -// ListPagesDomains gets a list of project pages domains. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#list-pages-domains -func (s *PagesDomainsService) ListPagesDomains(pid interface{}, opt *ListPagesDomainsOptions, options ...RequestOptionFunc) ([]*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pd []*PagesDomain - resp, err := s.client.Do(req, &pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil -} - -// ListAllPagesDomains gets a list of all pages domains. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#list-all-pages-domains -func (s *PagesDomainsService) ListAllPagesDomains(options ...RequestOptionFunc) ([]*PagesDomain, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "pages/domains", nil, options) - if err != nil { - return nil, nil, err - } - - var pd []*PagesDomain - resp, err := s.client.Do(req, &pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil -} - -// GetPagesDomain get a specific pages domain for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#single-pages-domain -func (s *PagesDomainsService) GetPagesDomain(pid interface{}, domain string, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pd := new(PagesDomain) - resp, err := s.client.Do(req, pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil -} - -// CreatePagesDomainOptions represents the available CreatePagesDomain() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#create-new-pages-domain -type CreatePagesDomainOptions struct { - Domain *string `url:"domain,omitempty" json:"domain,omitempty"` - AutoSslEnabled *bool `url:"auto_ssl_enabled,omitempty" json:"auto_ssl_enabled,omitempty"` - Certificate *string `url:"certificate,omitempty" json:"certificate,omitempty"` - Key *string `url:"key,omitempty" json:"key,omitempty"` -} - -// CreatePagesDomain creates a new project pages domain. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#create-new-pages-domain -func (s *PagesDomainsService) CreatePagesDomain(pid interface{}, opt *CreatePagesDomainOptions, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pd := new(PagesDomain) - resp, err := s.client.Do(req, pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil -} - -// UpdatePagesDomainOptions represents the available UpdatePagesDomain() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#update-pages-domain -type UpdatePagesDomainOptions struct { - AutoSslEnabled *bool `url:"auto_ssl_enabled,omitempty" json:"auto_ssl_enabled,omitempty"` - Certificate *string `url:"certificate,omitempty" json:"certificate,omitempty"` - Key *string `url:"key,omitempty" json:"key,omitempty"` -} - -// UpdatePagesDomain updates an existing project pages domain. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#update-pages-domain -func (s *PagesDomainsService) UpdatePagesDomain(pid interface{}, domain string, opt *UpdatePagesDomainOptions, options ...RequestOptionFunc) (*PagesDomain, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pd := new(PagesDomain) - resp, err := s.client.Do(req, pd) - if err != nil { - return nil, resp, err - } - - return pd, resp, nil -} - -// DeletePagesDomain deletes an existing prject pages domain. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pages_domains.html#delete-pages-domain -func (s *PagesDomainsService) DeletePagesDomain(pid interface{}, domain string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pages/domains/%s", PathEscape(project), domain) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go deleted file mode 100644 index 14aee9ee05..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go +++ /dev/null @@ -1,222 +0,0 @@ -// -// Copyright 2022, Ryan Glab -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// PersonalAccessTokensService handles communication with the personal access -// tokens related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/personal_access_tokens.html -type PersonalAccessTokensService struct { - client *Client -} - -// PersonalAccessToken represents a personal access token. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/personal_access_tokens.html -type PersonalAccessToken struct { - ID int `json:"id"` - Name string `json:"name"` - Revoked bool `json:"revoked"` - CreatedAt *time.Time `json:"created_at"` - Scopes []string `json:"scopes"` - UserID int `json:"user_id"` - LastUsedAt *time.Time `json:"last_used_at,omitempty"` - Active bool `json:"active"` - ExpiresAt *ISOTime `json:"expires_at"` - Token string `json:"token,omitempty"` -} - -func (p PersonalAccessToken) String() string { - return Stringify(p) -} - -// ListPersonalAccessTokensOptions represents the available -// ListPersonalAccessTokens() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens -type ListPersonalAccessTokensOptions struct { - ListOptions - CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` - LastUsedAfter *ISOTime `url:"last_used_after,omitempty" json:"last_used_after,omitempty"` - LastUsedBefore *ISOTime `url:"last_used_before,omitempty" json:"last_used_before,omitempty"` - Revoked *bool `url:"revoked,omitempty" json:"revoked,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` -} - -// ListPersonalAccessTokens gets a list of all personal access tokens. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#list-personal-access-tokens -func (s *PersonalAccessTokensService) ListPersonalAccessTokens(opt *ListPersonalAccessTokensOptions, options ...RequestOptionFunc) ([]*PersonalAccessToken, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "personal_access_tokens", opt, options) - if err != nil { - return nil, nil, err - } - - var pats []*PersonalAccessToken - resp, err := s.client.Do(req, &pats) - if err != nil { - return nil, resp, err - } - - return pats, resp, nil -} - -// GetSinglePersonalAccessTokenByID get a single personal access token by its ID. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id -func (s *PersonalAccessTokensService) GetSinglePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d", token) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// GetSinglePersonalAccessToken get a single personal access token by using -// passing the token in a header. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header -func (s *PersonalAccessTokensService) GetSinglePersonalAccessToken(options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := "personal_access_tokens/self" - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RotatePersonalAccessTokenOptions represents the available RotatePersonalAccessToken() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token -type RotatePersonalAccessTokenOptions struct { - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// RotatePersonalAccessToken is a backwards-compat shim for RotatePersonalAccessTokenByID. -func (s *PersonalAccessTokensService) RotatePersonalAccessToken(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - return s.RotatePersonalAccessTokenByID(token, opt, options...) -} - -// RotatePersonalAccessTokenByID revokes a token and returns a new token that -// expires in one week per default. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-personal-access-token-id -func (s *PersonalAccessTokensService) RotatePersonalAccessTokenByID(token int, opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d/rotate", token) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RotatePersonalAccessTokenSelf revokes the currently authenticated token -// and returns a new token that expires in one week per default. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#use-a-request-header -func (s *PersonalAccessTokensService) RotatePersonalAccessTokenSelf(opt *RotatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := "personal_access_tokens/self/rotate" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(PersonalAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RevokePersonalAccessToken is a backwards-compat shim for RevokePersonalAccessTokenByID. -func (s *PersonalAccessTokensService) RevokePersonalAccessToken(token int, options ...RequestOptionFunc) (*Response, error) { - return s.RevokePersonalAccessTokenByID(token, options...) -} - -// RevokePersonalAccessTokenByID revokes a personal access token by its ID. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-personal-access-token-id-1 -func (s *PersonalAccessTokensService) RevokePersonalAccessTokenByID(token int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("personal_access_tokens/%d", token) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// RevokePersonalAccessTokenSelf revokes the currently authenticated -// personal access token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/personal_access_tokens.html#using-a-request-header-1 -func (s *PersonalAccessTokensService) RevokePersonalAccessTokenSelf(options ...RequestOptionFunc) (*Response, error) { - u := "personal_access_tokens/self" - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go b/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go deleted file mode 100644 index 51477f21bd..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/pipeline_schedules.go +++ /dev/null @@ -1,385 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// PipelineSchedulesService handles communication with the pipeline -// schedules related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipeline_schedules.html -type PipelineSchedulesService struct { - client *Client -} - -// PipelineSchedule represents a pipeline schedule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html -type PipelineSchedule struct { - ID int `json:"id"` - Description string `json:"description"` - Ref string `json:"ref"` - Cron string `json:"cron"` - CronTimezone string `json:"cron_timezone"` - NextRunAt *time.Time `json:"next_run_at"` - Active bool `json:"active"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Owner *User `json:"owner"` - LastPipeline *LastPipeline `json:"last_pipeline"` - Variables []*PipelineVariable `json:"variables"` -} - -// LastPipeline represents the last pipeline ran by schedule -// this will be returned only for individual schedule get operation -type LastPipeline struct { - ID int `json:"id"` - SHA string `json:"sha"` - Ref string `json:"ref"` - Status string `json:"status"` - WebURL string `json:"web_url"` -} - -// ListPipelineSchedulesOptions represents the available ListPipelineTriggers() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipeline-schedules -type ListPipelineSchedulesOptions ListOptions - -// ListPipelineSchedules gets a list of project triggers. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipeline-schedules -func (s *PipelineSchedulesService) ListPipelineSchedules(pid interface{}, opt *ListPipelineSchedulesOptions, options ...RequestOptionFunc) ([]*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*PipelineSchedule - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// GetPipelineSchedule gets a pipeline schedule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-a-single-pipeline-schedule -func (s *PipelineSchedulesService) GetPipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListPipelinesTriggeredByScheduleOptions represents the available -// ListPipelinesTriggeredBySchedule() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipelines-triggered-by-a-pipeline-schedule -type ListPipelinesTriggeredByScheduleOptions ListOptions - -// ListPipelinesTriggeredBySchedule gets all pipelines triggered by a pipeline -// schedule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#get-all-pipelines-triggered-by-a-pipeline-schedule -func (s *PipelineSchedulesService) ListPipelinesTriggeredBySchedule(pid interface{}, schedule int, opt *ListPipelinesTriggeredByScheduleOptions, options ...RequestOptionFunc) ([]*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/pipelines", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Pipeline - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CreatePipelineScheduleOptions represents the available -// CreatePipelineSchedule() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule -type CreatePipelineScheduleOptions struct { - Description *string `url:"description" json:"description"` - Ref *string `url:"ref" json:"ref"` - Cron *string `url:"cron" json:"cron"` - CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` - Active *bool `url:"active,omitempty" json:"active,omitempty"` -} - -// CreatePipelineSchedule creates a pipeline schedule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule -func (s *PipelineSchedulesService) CreatePipelineSchedule(pid interface{}, opt *CreatePipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// EditPipelineScheduleOptions represents the available -// EditPipelineSchedule() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule -type EditPipelineScheduleOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - Cron *string `url:"cron,omitempty" json:"cron,omitempty"` - CronTimezone *string `url:"cron_timezone,omitempty" json:"cron_timezone,omitempty"` - Active *bool `url:"active,omitempty" json:"active,omitempty"` -} - -// EditPipelineSchedule edits a pipeline schedule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule -func (s *PipelineSchedulesService) EditPipelineSchedule(pid interface{}, schedule int, opt *EditPipelineScheduleOptions, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// TakeOwnershipOfPipelineSchedule sets the owner of the specified -// pipeline schedule to the user issuing the request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#take-ownership-of-a-pipeline-schedule -func (s *PipelineSchedulesService) TakeOwnershipOfPipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*PipelineSchedule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/take_ownership", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineSchedule) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// DeletePipelineSchedule deletes a pipeline schedule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#delete-a-pipeline-schedule -func (s *PipelineSchedulesService) DeletePipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// RunPipelineSchedule triggers a new scheduled pipeline to run immediately. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#run-a-scheduled-pipeline-immediately -func (s *PipelineSchedulesService) RunPipelineSchedule(pid interface{}, schedule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/play", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// CreatePipelineScheduleVariableOptions represents the available -// CreatePipelineScheduleVariable() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule -type CreatePipelineScheduleVariableOptions struct { - Key *string `url:"key" json:"key"` - Value *string `url:"value" json:"value"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// CreatePipelineScheduleVariable creates a pipeline schedule variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#create-a-new-pipeline-schedule -func (s *PipelineSchedulesService) CreatePipelineScheduleVariable(pid interface{}, schedule int, opt *CreatePipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables", PathEscape(project), schedule) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineVariable) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// EditPipelineScheduleVariableOptions represents the available -// EditPipelineScheduleVariable() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule-variable -type EditPipelineScheduleVariableOptions struct { - Value *string `url:"value" json:"value"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// EditPipelineScheduleVariable creates a pipeline schedule variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#edit-a-pipeline-schedule-variable -func (s *PipelineSchedulesService) EditPipelineScheduleVariable(pid interface{}, schedule int, key string, opt *EditPipelineScheduleVariableOptions, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables/%s", PathEscape(project), schedule, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineVariable) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// DeletePipelineScheduleVariable creates a pipeline schedule variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_schedules.html#delete-a-pipeline-schedule-variable -func (s *PipelineSchedulesService) DeletePipelineScheduleVariable(pid interface{}, schedule int, key string, options ...RequestOptionFunc) (*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline_schedules/%d/variables/%s", PathEscape(project), schedule, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineVariable) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go b/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go deleted file mode 100644 index 39269a3dbc..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go +++ /dev/null @@ -1,248 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// PipelineTriggersService handles Project pipeline triggers. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html -type PipelineTriggersService struct { - client *Client -} - -// PipelineTrigger represents a project pipeline trigger. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html -type PipelineTrigger struct { - ID int `json:"id"` - Description string `json:"description"` - CreatedAt *time.Time `json:"created_at"` - DeletedAt *time.Time `json:"deleted_at"` - LastUsed *time.Time `json:"last_used"` - Token string `json:"token"` - UpdatedAt *time.Time `json:"updated_at"` - Owner *User `json:"owner"` -} - -// ListPipelineTriggersOptions represents the available ListPipelineTriggers() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#list-project-trigger-tokens -type ListPipelineTriggersOptions ListOptions - -// ListPipelineTriggers gets a list of project triggers. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#list-project-trigger-tokens -func (s *PipelineTriggersService) ListPipelineTriggers(pid interface{}, opt *ListPipelineTriggersOptions, options ...RequestOptionFunc) ([]*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pt []*PipelineTrigger - resp, err := s.client.Do(req, &pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// GetPipelineTrigger gets a specific pipeline trigger for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#get-trigger-token-details -func (s *PipelineTriggersService) GetPipelineTrigger(pid interface{}, trigger int, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// AddPipelineTriggerOptions represents the available AddPipelineTrigger() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#create-a-trigger-token -type AddPipelineTriggerOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` -} - -// AddPipelineTrigger adds a pipeline trigger to a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#create-a-trigger-token -func (s *PipelineTriggersService) AddPipelineTrigger(pid interface{}, opt *AddPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// EditPipelineTriggerOptions represents the available EditPipelineTrigger() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#update-a-project-trigger-token -type EditPipelineTriggerOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` -} - -// EditPipelineTrigger edits a trigger for a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#update-a-project-trigger-token -func (s *PipelineTriggersService) EditPipelineTrigger(pid interface{}, trigger int, opt *EditPipelineTriggerOptions, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// TakeOwnershipOfPipelineTrigger sets the owner of the specified -// pipeline trigger to the user issuing the request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#take-ownership-of-a-project-trigger -func (s *PipelineTriggersService) TakeOwnershipOfPipelineTrigger(pid interface{}, trigger int, options ...RequestOptionFunc) (*PipelineTrigger, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d/take_ownership", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - pt := new(PipelineTrigger) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// DeletePipelineTrigger removes a trigger from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#remove-a-project-trigger-token -func (s *PipelineTriggersService) DeletePipelineTrigger(pid interface{}, trigger int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/triggers/%d", PathEscape(project), trigger) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// RunPipelineTriggerOptions represents the available RunPipelineTrigger() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#trigger-a-pipeline-with-a-token -type RunPipelineTriggerOptions struct { - Ref *string `url:"ref" json:"ref"` - Token *string `url:"token" json:"token"` - Variables map[string]string `url:"variables,omitempty" json:"variables,omitempty"` -} - -// RunPipelineTrigger starts a trigger from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipeline_triggers.html#trigger-a-pipeline-with-a-token -func (s *PipelineTriggersService) RunPipelineTrigger(pid interface{}, opt *RunPipelineTriggerOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/trigger/pipeline", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(Pipeline) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go deleted file mode 100644 index 3f2448447e..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/pipelines.go +++ /dev/null @@ -1,408 +0,0 @@ -// -// Copyright 2021, Igor Varavko -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// PipelinesService handles communication with the repositories related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html -type PipelinesService struct { - client *Client -} - -// PipelineVariable represents a pipeline variable. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html -type PipelineVariable struct { - Key string `json:"key"` - Value string `json:"value"` - VariableType VariableTypeValue `json:"variable_type"` -} - -// Pipeline represents a GitLab pipeline. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html -type Pipeline struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` - Status string `json:"status"` - Source string `json:"source"` - Ref string `json:"ref"` - Name string `json:"name"` - SHA string `json:"sha"` - BeforeSHA string `json:"before_sha"` - Tag bool `json:"tag"` - YamlErrors string `json:"yaml_errors"` - User *BasicUser `json:"user"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at"` - CommittedAt *time.Time `json:"committed_at"` - Duration int `json:"duration"` - QueuedDuration int `json:"queued_duration"` - Coverage string `json:"coverage"` - WebURL string `json:"web_url"` - DetailedStatus *DetailedStatus `json:"detailed_status"` -} - -// DetailedStatus contains detailed information about the status of a pipeline. -type DetailedStatus struct { - Icon string `json:"icon"` - Text string `json:"text"` - Label string `json:"label"` - Group string `json:"group"` - Tooltip string `json:"tooltip"` - HasDetails bool `json:"has_details"` - DetailsPath string `json:"details_path"` - Illustration struct { - Image string `json:"image"` - } `json:"illustration"` - Favicon string `json:"favicon"` -} - -func (p Pipeline) String() string { - return Stringify(p) -} - -// PipelineTestReport contains a detailed report of a test run. -type PipelineTestReport struct { - TotalTime float64 `json:"total_time"` - TotalCount int `json:"total_count"` - SuccessCount int `json:"success_count"` - FailedCount int `json:"failed_count"` - SkippedCount int `json:"skipped_count"` - ErrorCount int `json:"error_count"` - TestSuites []*PipelineTestSuites `json:"test_suites"` -} - -// PipelineTestSuites contains test suites results. -type PipelineTestSuites struct { - Name string `json:"name"` - TotalTime float64 `json:"total_time"` - TotalCount int `json:"total_count"` - SuccessCount int `json:"success_count"` - FailedCount int `json:"failed_count"` - SkippedCount int `json:"skipped_count"` - ErrorCount int `json:"error_count"` - TestCases []*PipelineTestCases `json:"test_cases"` -} - -// PipelineTestCases contains test cases details. -type PipelineTestCases struct { - Status string `json:"status"` - Name string `json:"name"` - Classname string `json:"classname"` - File string `json:"file"` - ExecutionTime float64 `json:"execution_time"` - SystemOutput interface{} `json:"system_output"` - StackTrace string `json:"stack_trace"` - AttachmentURL string `json:"attachment_url"` - RecentFailures *RecentFailures `json:"recent_failures"` -} - -// RecentFailures contains failures count for the project's default branch. -type RecentFailures struct { - Count int `json:"count"` - BaseBranch string `json:"base_branch"` -} - -func (p PipelineTestReport) String() string { - return Stringify(p) -} - -// PipelineInfo shows the basic entities of a pipeline, mostly used as fields -// on other assets, like Commit. -type PipelineInfo struct { - ID int `json:"id"` - IID int `json:"iid"` - ProjectID int `json:"project_id"` - Status string `json:"status"` - Source string `json:"source"` - Ref string `json:"ref"` - SHA string `json:"sha"` - WebURL string `json:"web_url"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` -} - -func (p PipelineInfo) String() string { - return Stringify(p) -} - -// ListProjectPipelinesOptions represents the available ListProjectPipelines() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines -type ListProjectPipelinesOptions struct { - ListOptions - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - Status *BuildStateValue `url:"status,omitempty" json:"status,omitempty"` - Source *string `url:"source,omitempty" json:"source,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` - YamlErrors *bool `url:"yaml_errors,omitempty" json:"yaml_errors,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListProjectPipelines gets a list of project piplines. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#list-project-pipelines -func (s *PipelinesService) ListProjectPipelines(pid interface{}, opt *ListProjectPipelinesOptions, options ...RequestOptionFunc) ([]*PipelineInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*PipelineInfo - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetPipeline gets a single project pipeline. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-a-single-pipeline -func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetPipelineVariables gets the variables of a single project pipeline. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-variables-of-a-pipeline -func (s *PipelinesService) GetPipelineVariables(pid interface{}, pipeline int, options ...RequestOptionFunc) ([]*PipelineVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/variables", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var p []*PipelineVariable - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetPipelineTestReport gets the test report of a single project pipeline. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-a-pipelines-test-report -func (s *PipelinesService) GetPipelineTestReport(pid interface{}, pipeline int, options ...RequestOptionFunc) (*PipelineTestReport, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/test_report", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(PipelineTestReport) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetLatestPipelineOptions represents the available GetLatestPipeline() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline -type GetLatestPipelineOptions struct { - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -// GetLatestPipeline gets the latest pipeline for a specific ref in a project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#get-the-latest-pipeline -func (s *PipelinesService) GetLatestPipeline(pid interface{}, opt *GetLatestPipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/latest", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CreatePipelineOptions represents the available CreatePipeline() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline -type CreatePipelineOptions struct { - Ref *string `url:"ref" json:"ref"` - Variables *[]*PipelineVariableOptions `url:"variables,omitempty" json:"variables,omitempty"` -} - -// PipelineVariable represents a pipeline variable. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline -type PipelineVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// CreatePipeline creates a new project pipeline. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/pipelines.html#create-a-new-pipeline -func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOptions, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipeline", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// RetryPipelineBuild retries failed builds in a pipeline -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipelines.html#retry-jobs-in-a-pipeline -func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/retry", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CancelPipelineBuild cancels a pipeline builds -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipelines.html#cancel-a-pipelines-jobs -func (s *PipelinesService) CancelPipelineBuild(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Pipeline, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d/cancel", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Pipeline) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// DeletePipeline deletes an existing pipeline. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/pipelines.html#delete-a-pipeline -func (s *PipelinesService) DeletePipeline(pid interface{}, pipeline int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/pipelines/%d", PathEscape(project), pipeline) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/plan_limits.go b/vendor/github.com/xanzy/go-gitlab/plan_limits.go deleted file mode 100644 index 893ae756eb..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/plan_limits.go +++ /dev/null @@ -1,104 +0,0 @@ -// -// Copyright 2021, Igor Varavko -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import "net/http" - -// PlanLimitsService handles communication with the repositories related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/plan_limits.html -type PlanLimitsService struct { - client *Client -} - -// PlanLimit represents a GitLab pipeline. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/plan_limits.html -type PlanLimit struct { - ConanMaxFileSize int `json:"conan_max_file_size,omitempty"` - GenericPackagesMaxFileSize int `json:"generic_packages_max_file_size,omitempty"` - HelmMaxFileSize int `json:"helm_max_file_size,omitempty"` - MavenMaxFileSize int `json:"maven_max_file_size,omitempty"` - NPMMaxFileSize int `json:"npm_max_file_size,omitempty"` - NugetMaxFileSize int `json:"nuget_max_file_size,omitempty"` - PyPiMaxFileSize int `json:"pypi_max_file_size,omitempty"` - TerraformModuleMaxFileSize int `json:"terraform_module_max_file_size,omitempty"` -} - -// GetCurrentPlanLimitsOptions represents the available GetCurrentPlanLimits() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/plan_limits.html#get-current-plan-limits -type GetCurrentPlanLimitsOptions struct { - PlanName *string `url:"plan_name,omitempty" json:"plan_name,omitempty"` -} - -// List the current limits of a plan on the GitLab instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/plan_limits.html#get-current-plan-limits -func (s *PlanLimitsService) GetCurrentPlanLimits(opt *GetCurrentPlanLimitsOptions, options ...RequestOptionFunc) (*PlanLimit, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/plan_limits", opt, options) - if err != nil { - return nil, nil, err - } - - pl := new(PlanLimit) - resp, err := s.client.Do(req, pl) - if err != nil { - return nil, resp, err - } - - return pl, resp, nil -} - -// ChangePlanLimitOptions represents the available ChangePlanLimits() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/plan_limits.html#change-plan-limits -type ChangePlanLimitOptions struct { - PlanName *string `url:"plan_name,omitempty" json:"plan_name,omitempty"` - ConanMaxFileSize *int `url:"conan_max_file_size,omitempty" json:"conan_max_file_size,omitempty"` - GenericPackagesMaxFileSize *int `url:"generic_packages_max_file_size,omitempty" json:"generic_packages_max_file_size,omitempty"` - HelmMaxFileSize *int `url:"helm_max_file_size,omitempty" json:"helm_max_file_size,omitempty"` - MavenMaxFileSize *int `url:"maven_max_file_size,omitempty" json:"maven_max_file_size,omitempty"` - NPMMaxFileSize *int `url:"npm_max_file_size,omitempty" json:"npm_max_file_size,omitempty"` - NugetMaxFileSize *int `url:"nuget_max_file_size,omitempty" json:"nuget_max_file_size,omitempty"` - PyPiMaxFileSize *int `url:"pypi_max_file_size,omitempty" json:"pypi_max_file_size,omitempty"` - TerraformModuleMaxFileSize *int `url:"terraform_module_max_file_size,omitempty" json:"terraform_module_max_file_size,omitempty"` -} - -// ChangePlanLimits modifies the limits of a plan on the GitLab instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/plan_limits.html#change-plan-limits -func (s *PlanLimitsService) ChangePlanLimits(opt *ChangePlanLimitOptions, options ...RequestOptionFunc) (*PlanLimit, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/plan_limits", opt, options) - if err != nil { - return nil, nil, err - } - - pl := new(PlanLimit) - resp, err := s.client.Do(req, pl) - if err != nil { - return nil, resp, err - } - - return pl, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go b/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go deleted file mode 100644 index 2d6057e053..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_access_tokens.go +++ /dev/null @@ -1,200 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ProjectAccessTokensService handles communication with the -// project access tokens related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_access_tokens.html -type ProjectAccessTokensService struct { - client *Client -} - -// ProjectAccessToken represents a GitLab project access token. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_access_tokens.html -type ProjectAccessToken struct { - ID int `json:"id"` - UserID int `json:"user_id"` - Name string `json:"name"` - Scopes []string `json:"scopes"` - CreatedAt *time.Time `json:"created_at"` - LastUsedAt *time.Time `json:"last_used_at"` - ExpiresAt *ISOTime `json:"expires_at"` - Active bool `json:"active"` - Revoked bool `json:"revoked"` - Token string `json:"token"` - AccessLevel AccessLevelValue `json:"access_level"` -} - -func (v ProjectAccessToken) String() string { - return Stringify(v) -} - -// ListProjectAccessTokensOptions represents the available -// ListProjectAccessTokens() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#list-project-access-tokens -type ListProjectAccessTokensOptions ListOptions - -// ListProjectAccessTokens gets a list of all project access tokens in a -// project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#list-project-access-tokens -func (s *ProjectAccessTokensService) ListProjectAccessTokens(pid interface{}, opt *ListProjectAccessTokensOptions, options ...RequestOptionFunc) ([]*ProjectAccessToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pats []*ProjectAccessToken - resp, err := s.client.Do(req, &pats) - if err != nil { - return nil, resp, err - } - - return pats, resp, nil -} - -// GetProjectAccessToken gets a single project access tokens in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#get-a-project-access-token -func (s *ProjectAccessTokensService) GetProjectAccessToken(pid interface{}, id int, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, &pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// CreateProjectAccessTokenOptions represents the available CreateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#create-a-project-access-token -type CreateProjectAccessTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// CreateProjectAccessToken creates a new project access token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#create-a-project-access-token -func (s *ProjectAccessTokensService) CreateProjectAccessToken(pid interface{}, opt *CreateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RotateProjectAccessTokenOptions represents the available RotateProjectAccessToken() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#rotate-a-project-access-token -type RotateProjectAccessTokenOptions struct { - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// RotateProjectAccessToken revokes a project access token and returns a new -// project access token that expires in one week per default. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#rotate-a-project-access-token -func (s *ProjectAccessTokensService) RotateProjectAccessToken(pid interface{}, id int, opt *RotateProjectAccessTokenOptions, options ...RequestOptionFunc) (*ProjectAccessToken, *Response, error) { - projects, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/%d/rotate", PathEscape(projects), id) - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pat := new(ProjectAccessToken) - resp, err := s.client.Do(req, pat) - if err != nil { - return nil, resp, err - } - - return pat, resp, nil -} - -// RevokeProjectAccessToken revokes a project access token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_access_tokens.html#revoke-a-project-access-token -func (s *ProjectAccessTokensService) RevokeProjectAccessToken(pid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/access_tokens/%d", PathEscape(project), id) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_badges.go b/vendor/github.com/xanzy/go-gitlab/project_badges.go deleted file mode 100644 index d5afe1e039..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_badges.go +++ /dev/null @@ -1,230 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ProjectBadge represents a project badge. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#list-all-badges-of-a-project -type ProjectBadge struct { - ID int `json:"id"` - Name string `json:"name"` - LinkURL string `json:"link_url"` - ImageURL string `json:"image_url"` - RenderedLinkURL string `json:"rendered_link_url"` - RenderedImageURL string `json:"rendered_image_url"` - // Kind represents a project badge kind. Can be empty, when used PreviewProjectBadge(). - Kind string `json:"kind"` -} - -// ProjectBadgesService handles communication with the project badges -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_badges.html -type ProjectBadgesService struct { - client *Client -} - -// ListProjectBadgesOptions represents the available ListProjectBadges() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#list-all-badges-of-a-project -type ListProjectBadgesOptions struct { - ListOptions - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// ListProjectBadges gets a list of a project's badges and its group badges. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#list-all-badges-of-a-project -func (s *ProjectBadgesService) ListProjectBadges(pid interface{}, opt *ListProjectBadgesOptions, options ...RequestOptionFunc) ([]*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pb []*ProjectBadge - resp, err := s.client.Do(req, &pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil -} - -// GetProjectBadge gets a project badge. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#get-a-badge-of-a-project -func (s *ProjectBadgesService) GetProjectBadge(pid interface{}, badge int, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil -} - -// AddProjectBadgeOptions represents the available AddProjectBadge() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#add-a-badge-to-a-project -type AddProjectBadgeOptions struct { - LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` - ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// AddProjectBadge adds a badge to a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#add-a-badge-to-a-project -func (s *ProjectBadgesService) AddProjectBadge(pid interface{}, opt *AddProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil -} - -// EditProjectBadgeOptions represents the available EditProjectBadge() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#edit-a-badge-of-a-project -type EditProjectBadgeOptions struct { - LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` - ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` -} - -// EditProjectBadge updates a badge of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#edit-a-badge-of-a-project -func (s *ProjectBadgesService) EditProjectBadge(pid interface{}, badge int, opt *EditProjectBadgeOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil -} - -// DeleteProjectBadge removes a badge from a project. Only project's -// badges will be removed by using this endpoint. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#remove-a-badge-from-a-project -func (s *ProjectBadgesService) DeleteProjectBadge(pid interface{}, badge int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/badges/%d", PathEscape(project), badge) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ProjectBadgePreviewOptions represents the available PreviewProjectBadge() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#preview-a-badge-from-a-project -type ProjectBadgePreviewOptions struct { - LinkURL *string `url:"link_url,omitempty" json:"link_url,omitempty"` - ImageURL *string `url:"image_url,omitempty" json:"image_url,omitempty"` -} - -// PreviewProjectBadge returns how the link_url and image_url final URLs would be after -// resolving the placeholder interpolation. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_badges.html#preview-a-badge-from-a-project -func (s *ProjectBadgesService) PreviewProjectBadge(pid interface{}, opt *ProjectBadgePreviewOptions, options ...RequestOptionFunc) (*ProjectBadge, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/badges/render", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - pb := new(ProjectBadge) - resp, err := s.client.Do(req, &pb) - if err != nil { - return nil, resp, err - } - - return pb, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_clusters.go b/vendor/github.com/xanzy/go-gitlab/project_clusters.go deleted file mode 100644 index 792a69b613..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_clusters.go +++ /dev/null @@ -1,236 +0,0 @@ -// -// Copyright 2021, Matej Velikonja -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ProjectClustersService handles communication with the -// project clusters related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html -type ProjectClustersService struct { - client *Client -} - -// ProjectCluster represents a GitLab Project Cluster. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_clusters.html -type ProjectCluster struct { - ID int `json:"id"` - Name string `json:"name"` - Domain string `json:"domain"` - CreatedAt *time.Time `json:"created_at"` - ProviderType string `json:"provider_type"` - PlatformType string `json:"platform_type"` - EnvironmentScope string `json:"environment_scope"` - ClusterType string `json:"cluster_type"` - User *User `json:"user"` - PlatformKubernetes *PlatformKubernetes `json:"platform_kubernetes"` - ManagementProject *ManagementProject `json:"management_project"` - Project *Project `json:"project"` -} - -func (v ProjectCluster) String() string { - return Stringify(v) -} - -// PlatformKubernetes represents a GitLab Project Cluster PlatformKubernetes. -type PlatformKubernetes struct { - APIURL string `json:"api_url"` - Token string `json:"token"` - CaCert string `json:"ca_cert"` - Namespace string `json:"namespace"` - AuthorizationType string `json:"authorization_type"` -} - -// ManagementProject represents a GitLab Project Cluster management_project. -type ManagementProject struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` -} - -// ListClusters gets a list of all clusters in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#list-project-clusters -func (s *ProjectClustersService) ListClusters(pid interface{}, options ...RequestOptionFunc) ([]*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var pcs []*ProjectCluster - resp, err := s.client.Do(req, &pcs) - if err != nil { - return nil, resp, err - } - - return pcs, resp, nil -} - -// GetCluster gets a cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#get-a-single-project-cluster -func (s *ProjectClustersService) GetCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pc := new(ProjectCluster) - resp, err := s.client.Do(req, &pc) - if err != nil { - return nil, resp, err - } - - return pc, resp, nil -} - -// AddClusterOptions represents the available AddCluster() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#add-existing-cluster-to-project -type AddClusterOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Domain *string `url:"domain,omitempty" json:"domain,omitempty"` - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - Managed *bool `url:"managed,omitempty" json:"managed,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - PlatformKubernetes *AddPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` - ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` -} - -// AddPlatformKubernetesOptions represents the available PlatformKubernetes options for adding. -type AddPlatformKubernetesOptions struct { - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` - Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` - AuthorizationType *string `url:"authorization_type,omitempty" json:"authorization_type,omitempty"` -} - -// AddCluster adds an existing cluster to the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#add-existing-cluster-to-project -func (s *ProjectClustersService) AddCluster(pid interface{}, opt *AddClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters/user", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pc := new(ProjectCluster) - resp, err := s.client.Do(req, pc) - if err != nil { - return nil, resp, err - } - - return pc, resp, nil -} - -// EditClusterOptions represents the available EditCluster() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#edit-project-cluster -type EditClusterOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Domain *string `url:"domain,omitempty" json:"domain,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - ManagementProjectID *string `url:"management_project_id,omitempty" json:"management_project_id,omitempty"` - PlatformKubernetes *EditPlatformKubernetesOptions `url:"platform_kubernetes_attributes,omitempty" json:"platform_kubernetes_attributes,omitempty"` -} - -// EditPlatformKubernetesOptions represents the available PlatformKubernetes options for editing. -type EditPlatformKubernetesOptions struct { - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - CaCert *string `url:"ca_cert,omitempty" json:"ca_cert,omitempty"` - Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` -} - -// EditCluster updates an existing project cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#edit-project-cluster -func (s *ProjectClustersService) EditCluster(pid interface{}, cluster int, opt *EditClusterOptions, options ...RequestOptionFunc) (*ProjectCluster, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pc := new(ProjectCluster) - resp, err := s.client.Do(req, pc) - if err != nil { - return nil, resp, err - } - - return pc, resp, nil -} - -// DeleteCluster deletes an existing project cluster. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_clusters.html#delete-project-cluster -func (s *ProjectClustersService) DeleteCluster(pid interface{}, cluster int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/clusters/%d", PathEscape(project), cluster) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go b/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go deleted file mode 100644 index 98c358868c..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_feature_flags.go +++ /dev/null @@ -1,246 +0,0 @@ -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ProjectFeatureFlagService handles operations on gitlab project feature -// flags using the following api: -// -// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html -type ProjectFeatureFlagService struct { - client *Client -} - -// ProjectFeatureFlag represents a GitLab project iteration. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html -type ProjectFeatureFlag struct { - Name string `json:"name"` - Description string `json:"description"` - Active bool `json:"active"` - Version string `json:"version"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Scopes []*ProjectFeatureFlagScope `json:"scopes"` - Strategies []*ProjectFeatureFlagStrategy `json:"strategies"` -} - -// ProjectFeatureFlagScope defines the scopes of a feature flag -// -// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html -type ProjectFeatureFlagScope struct { - ID int `json:"id"` - EnvironmentScope string `json:"environment_scope"` -} - -// ProjectFeatureFlagStrategy defines the strategy used for a feature flag -// -// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html -type ProjectFeatureFlagStrategy struct { - ID int `json:"id"` - Name string `json:"name"` - Parameters *ProjectFeatureFlagStrategyParameter `json:"parameters"` - Scopes []*ProjectFeatureFlagScope `json:"scopes"` -} - -// ProjectFeatureFlagStrategyParameter is used in updating and creating feature flags -// -// GitLab API docs: https://docs.gitlab.com/ee/api/feature_flags.html -type ProjectFeatureFlagStrategyParameter struct { - GroupID string `json:"groupId,omitempty"` - UserIDs string `json:"userIds,omitempty"` - Percentage string `json:"percentage,omitempty"` - - // Following fields aren't documented in Gitlab API docs, - // but are present in Gitlab API since 13.5. - // Docs: https://docs.getunleash.io/reference/activation-strategies#gradual-rollout - Rollout string `json:"rollout,omitempty"` - Stickiness string `json:"stickiness,omitempty"` -} - -func (i ProjectFeatureFlag) String() string { - return Stringify(i) -} - -// ListProjectFeatureFlagOptions contains the options for ListProjectFeatureFlags -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#list-feature-flags-for-a-project -type ListProjectFeatureFlagOptions struct { - ListOptions - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` -} - -// ListProjectFeatureFlags returns a list with the feature flags of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#list-feature-flags-for-a-project -func (s *ProjectFeatureFlagService) ListProjectFeatureFlags(pid interface{}, opt *ListProjectFeatureFlagOptions, options ...RequestOptionFunc) ([]*ProjectFeatureFlag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pffs []*ProjectFeatureFlag - resp, err := s.client.Do(req, &pffs) - if err != nil { - return nil, resp, err - } - - return pffs, resp, nil -} - -// GetProjectFeatureFlag gets a single feature flag for the specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#get-a-single-feature-flag -func (s *ProjectFeatureFlagService) GetProjectFeatureFlag(pid interface{}, name string, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags/%s", PathEscape(project), name) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - flag := new(ProjectFeatureFlag) - resp, err := s.client.Do(req, flag) - if err != nil { - return nil, resp, err - } - - return flag, resp, nil -} - -// CreateProjectFeatureFlagOptions represents the available -// CreateProjectFeatureFlag() options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag -type CreateProjectFeatureFlagOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Version *string `url:"version,omitempty" json:"version,omitempty"` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - Strategies *[]*FeatureFlagStrategyOptions `url:"strategies,omitempty" json:"strategies,omitempty"` -} - -// FeatureFlagStrategyOptions represents the available feature flag strategy -// options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag -type FeatureFlagStrategyOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Parameters *ProjectFeatureFlagStrategyParameter `url:"parameters,omitempty" json:"parameters,omitempty"` - Scopes *[]*ProjectFeatureFlagScope `url:"scopes,omitempty" json:"scopes,omitempty"` -} - -// ProjectFeatureFlagScopeOptions represents the available feature flag scope -// options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag -type ProjectFeatureFlagScopeOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - EnvironmentScope *string `url:"id,omitempty" json:"environment_scope,omitempty"` -} - -// CreateProjectFeatureFlag creates a feature flag -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#create-a-feature-flag -func (s *ProjectFeatureFlagService) CreateProjectFeatureFlag(pid interface{}, opt *CreateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags", - PathEscape(project), - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - flag := new(ProjectFeatureFlag) - resp, err := s.client.Do(req, flag) - if err != nil { - return flag, resp, err - } - - return flag, resp, nil -} - -// UpdateProjectFeatureFlagOptions represents the available -// UpdateProjectFeatureFlag() options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#update-a-feature-flag -type UpdateProjectFeatureFlagOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - Strategies *[]*FeatureFlagStrategyOptions `url:"strategies,omitempty" json:"strategies,omitempty"` -} - -// UpdateProjectFeatureFlag updates a feature flag -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#update-a-feature-flag -func (s *ProjectFeatureFlagService) UpdateProjectFeatureFlag(pid interface{}, name string, opt *UpdateProjectFeatureFlagOptions, options ...RequestOptionFunc) (*ProjectFeatureFlag, *Response, error) { - group, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags/%s", - PathEscape(group), - name, - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - flag := new(ProjectFeatureFlag) - resp, err := s.client.Do(req, flag) - if err != nil { - return flag, resp, err - } - - return flag, resp, nil -} - -// DeleteProjectFeatureFlag deletes a feature flag -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/feature_flags.html#delete-a-feature-flag -func (s *ProjectFeatureFlagService) DeleteProjectFeatureFlag(pid interface{}, name string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/feature_flags/%s", PathEscape(project), name) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_import_export.go b/vendor/github.com/xanzy/go-gitlab/project_import_export.go deleted file mode 100644 index 266be839a6..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_import_export.go +++ /dev/null @@ -1,225 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "io" - "net/http" - "time" -) - -// ProjectImportExportService handles communication with the project -// import/export related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html -type ProjectImportExportService struct { - client *Client -} - -// ImportStatus represents a project import status. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#import-status -type ImportStatus struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreateAt *time.Time `json:"create_at"` - ImportStatus string `json:"import_status"` - ImportType string `json:"import_type"` - CorrelationID string `json:"correlation_id"` - ImportError string `json:"import_error"` -} - -func (s ImportStatus) String() string { - return Stringify(s) -} - -// ExportStatus represents a project export status. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#export-status -type ExportStatus struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` - ExportStatus string `json:"export_status"` - Message string `json:"message"` - Links struct { - APIURL string `json:"api_url"` - WebURL string `json:"web_url"` - } `json:"_links"` -} - -func (s ExportStatus) String() string { - return Stringify(s) -} - -// ScheduleExportOptions represents the available ScheduleExport() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#schedule-an-export -type ScheduleExportOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - Upload struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - HTTPMethod *string `url:"http_method,omitempty" json:"http_method,omitempty"` - } `url:"upload,omitempty" json:"upload,omitempty"` -} - -// ScheduleExport schedules a project export. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#schedule-an-export -func (s *ProjectImportExportService) ScheduleExport(pid interface{}, opt *ScheduleExportOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/export", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ExportStatus get the status of export. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#export-status -func (s *ProjectImportExportService) ExportStatus(pid interface{}, options ...RequestOptionFunc) (*ExportStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/export", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - es := new(ExportStatus) - resp, err := s.client.Do(req, es) - if err != nil { - return nil, resp, err - } - - return es, resp, nil -} - -// ExportDownload download the finished export. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#export-download -func (s *ProjectImportExportService) ExportDownload(pid interface{}, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/export/download", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} - -// ImportFileOptions represents the available ImportFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#import-a-file -type ImportFileOptions struct { - Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Overwrite *bool `url:"overwrite,omitempty" json:"overwrite,omitempty"` - OverrideParams *CreateProjectOptions `url:"override_params,omitempty" json:"override_params,omitempty"` -} - -// Import a project from an archive file. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#import-a-file -func (s *ProjectImportExportService) ImportFromFile(archive io.Reader, opt *ImportFileOptions, options ...RequestOptionFunc) (*ImportStatus, *Response, error) { - req, err := s.client.UploadRequest( - http.MethodPost, - "projects/import", - archive, - "archive.tar.gz", - UploadFile, - opt, - options, - ) - if err != nil { - return nil, nil, err - } - - is := new(ImportStatus) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} - -// ImportStatus get the status of an import. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_import_export.html#import-status -func (s *ProjectImportExportService) ImportStatus(pid interface{}, options ...RequestOptionFunc) (*ImportStatus, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/import", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - is := new(ImportStatus) - resp, err := s.client.Do(req, is) - if err != nil { - return nil, resp, err - } - - return is, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_iterations.go b/vendor/github.com/xanzy/go-gitlab/project_iterations.go deleted file mode 100644 index 1fe0ddf811..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_iterations.go +++ /dev/null @@ -1,90 +0,0 @@ -// -// Copyright 2022, Daniel Steinke -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// IterationsAPI handles communication with the project iterations related -// methods of the GitLab API -// -// GitLab API docs: https://docs.gitlab.com/ee/api/iterations.html -type ProjectIterationsService struct { - client *Client -} - -// ProjectIteration represents a GitLab project iteration. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/iterations.html -type ProjectIteration struct { - ID int `json:"id"` - IID int `json:"iid"` - Sequence int `json:"sequence"` - GroupID int `json:"group_id"` - Title string `json:"title"` - Description string `json:"description"` - State int `json:"state"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - DueDate *ISOTime `json:"due_date"` - StartDate *ISOTime `json:"start_date"` - WebURL string `json:"web_url"` -} - -func (i ProjectIteration) String() string { - return Stringify(i) -} - -// ListProjectIterationsOptions contains the available ListProjectIterations() -// options -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/iterations.html#list-project-iterations -type ListProjectIterationsOptions struct { - ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - IncludeAncestors *bool `url:"include_ancestors,omitempty" json:"include_ancestors,omitempty"` -} - -// ListProjectIterations returns a list of projects iterations. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/iterations.html#list-project-iterations -func (i *ProjectIterationsService) ListProjectIterations(pid interface{}, opt *ListProjectIterationsOptions, options ...RequestOptionFunc) ([]*ProjectIteration, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/iterations", PathEscape(project)) - - req, err := i.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pis []*ProjectIteration - resp, err := i.client.Do(req, &pis) - if err != nil { - return nil, resp, err - } - - return pis, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go b/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go deleted file mode 100644 index d6f23f2d9a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_managed_licenses.go +++ /dev/null @@ -1,188 +0,0 @@ -// -// Copyright 2021, Andrea Perizzato -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ManagedLicensesService handles communication with the managed licenses -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/managed_licenses.html -type ManagedLicensesService struct { - client *Client -} - -// ManagedLicense represents a managed license. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/managed_licenses.html -type ManagedLicense struct { - ID int `json:"id"` - Name string `json:"name"` - ApprovalStatus LicenseApprovalStatusValue `json:"approval_status"` -} - -// ListManagedLicenses returns a list of managed licenses from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#list-managed-licenses -func (s *ManagedLicensesService) ListManagedLicenses(pid interface{}, options ...RequestOptionFunc) ([]*ManagedLicense, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/managed_licenses", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var mls []*ManagedLicense - resp, err := s.client.Do(req, &mls) - if err != nil { - return nil, resp, err - } - - return mls, resp, nil -} - -// GetManagedLicense returns an existing managed license. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#show-an-existing-managed-license -func (s *ManagedLicensesService) GetManagedLicense(pid, mlid interface{}, options ...RequestOptionFunc) (*ManagedLicense, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - license, err := parseID(mlid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/managed_licenses/%s", PathEscape(project), PathEscape(license)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ml := new(ManagedLicense) - resp, err := s.client.Do(req, ml) - if err != nil { - return nil, resp, err - } - - return ml, resp, nil -} - -// AddManagedLicenseOptions represents the available AddManagedLicense() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#create-a-new-managed-license -type AddManagedLicenseOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalStatus *LicenseApprovalStatusValue `url:"approval_status,omitempty" json:"approval_status,omitempty"` -} - -// AddManagedLicense adds a managed license to a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#create-a-new-managed-license -func (s *ManagedLicensesService) AddManagedLicense(pid interface{}, opt *AddManagedLicenseOptions, options ...RequestOptionFunc) (*ManagedLicense, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/managed_licenses", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ml := new(ManagedLicense) - resp, err := s.client.Do(req, ml) - if err != nil { - return nil, resp, err - } - - return ml, resp, nil -} - -// DeleteManagedLicense deletes a managed license with a given ID. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#delete-a-managed-license -func (s *ManagedLicensesService) DeleteManagedLicense(pid, mlid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - license, err := parseID(mlid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/managed_licenses/%s", PathEscape(project), PathEscape(license)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// EditManagedLicenceOptions represents the available EditManagedLicense() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#edit-an-existing-managed-license -type EditManagedLicenceOptions struct { - ApprovalStatus *LicenseApprovalStatusValue `url:"approval_status,omitempty" json:"approval_status,omitempty"` -} - -// EditManagedLicense updates an existing managed license with a new approval -// status. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/managed_licenses.html#edit-an-existing-managed-license -func (s *ManagedLicensesService) EditManagedLicense(pid, mlid interface{}, opt *EditManagedLicenceOptions, options ...RequestOptionFunc) (*ManagedLicense, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - license, err := parseID(mlid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/managed_licenses/%s", PathEscape(project), PathEscape(license)) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - ml := new(ManagedLicense) - resp, err := s.client.Do(req, ml) - if err != nil { - return nil, resp, err - } - - return ml, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_members.go b/vendor/github.com/xanzy/go-gitlab/project_members.go deleted file mode 100644 index 37d4b8a2e6..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_members.go +++ /dev/null @@ -1,238 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ProjectMembersService handles communication with the project members -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/members.html -type ProjectMembersService struct { - client *Client -} - -// ListProjectMembersOptions represents the available ListProjectMembers() and -// ListAllProjectMembers() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -type ListProjectMembersOptions struct { - ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` - UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` -} - -// ListProjectMembers gets a list of a project's team members viewable by the -// authenticated user. Returns only direct members and not inherited members -// through ancestors groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -func (s *ProjectMembersService) ListProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pm []*ProjectMember - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// ListAllProjectMembers gets a list of a project's team members viewable by the -// authenticated user. Returns a list including inherited members through -// ancestor groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project-including-inherited-and-invited-members -func (s *ProjectMembersService) ListAllProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...RequestOptionFunc) ([]*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/all", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pm []*ProjectMember - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// GetProjectMember gets a project team member. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project -func (s *ProjectMembersService) GetProjectMember(pid interface{}, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// GetInheritedProjectMember gets a project team member, including inherited -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#get-a-member-of-a-group-or-project-including-inherited-and-invited-members -func (s *ProjectMembersService) GetInheritedProjectMember(pid interface{}, user int, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/all/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// AddProjectMemberOptions represents the available AddProjectMember() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project -type AddProjectMemberOptions struct { - UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` -} - -// AddProjectMember adds a user to a project team. This is an idempotent -// method and can be called multiple times with the same parameters. Adding -// team membership to a user that is already a member does not affect the -// existing membership. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#add-a-member-to-a-group-or-project -func (s *ProjectMembersService) AddProjectMember(pid interface{}, opt *AddProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// EditProjectMemberOptions represents the available EditProjectMember() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project -type EditProjectMemberOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at,omitempty"` - MemberRoleID *int `url:"member_role_id,omitempty" json:"member_role_id,omitempty"` -} - -// EditProjectMember updates a project team member to a specified access level.. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project -func (s *ProjectMembersService) EditProjectMember(pid interface{}, user int, opt *EditProjectMemberOptions, options ...RequestOptionFunc) (*ProjectMember, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMember) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// DeleteProjectMember removes a user from a project team. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project -func (s *ProjectMembersService) DeleteProjectMember(pid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/members/%d", PathEscape(project), user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_mirror.go b/vendor/github.com/xanzy/go-gitlab/project_mirror.go deleted file mode 100644 index 16f030d39c..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_mirror.go +++ /dev/null @@ -1,195 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ProjectMirrorService handles communication with the project mirror -// related methods of the GitLab API. -// -// GitLAb API docs: https://docs.gitlab.com/ee/api/remote_mirrors.html -type ProjectMirrorService struct { - client *Client -} - -// ProjectMirror represents a project mirror configuration. -// -// GitLAb API docs: https://docs.gitlab.com/ee/api/remote_mirrors.html -type ProjectMirror struct { - Enabled bool `json:"enabled"` - ID int `json:"id"` - LastError string `json:"last_error"` - LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` - LastUpdateAt *time.Time `json:"last_update_at"` - LastUpdateStartedAt *time.Time `json:"last_update_started_at"` - MirrorBranchRegex string `json:"mirror_branch_regex"` - OnlyProtectedBranches bool `json:"only_protected_branches"` - KeepDivergentRefs bool `json:"keep_divergent_refs"` - UpdateStatus string `json:"update_status"` - URL string `json:"url"` -} - -// ListProjectMirrorOptions represents the available ListProjectMirror() options. -type ListProjectMirrorOptions ListOptions - -// ListProjectMirror gets a list of mirrors configured on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#list-a-projects-remote-mirrors -func (s *ProjectMirrorService) ListProjectMirror(pid interface{}, opt *ListProjectMirrorOptions, options ...RequestOptionFunc) ([]*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pm []*ProjectMirror - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// GetProjectMirror gets a single mirror configured on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#get-a-single-projects-remote-mirror -func (s *ProjectMirrorService) GetProjectMirror(pid interface{}, mirror int, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMirror) - resp, err := s.client.Do(req, &pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// AddProjectMirrorOptions contains the properties requires to create -// a new project mirror. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#create-a-push-mirror -type AddProjectMirrorOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - KeepDivergentRefs *bool `url:"keep_divergent_refs,omitempty" json:"keep_divergent_refs,omitempty"` - OnlyProtectedBranches *bool `url:"only_protected_branches,omitempty" json:"only_protected_branches,omitempty"` - MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` -} - -// AddProjectMirror creates a new mirror on the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#create-a-push-mirror -func (s *ProjectMirrorService) AddProjectMirror(pid interface{}, opt *AddProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMirror) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// EditProjectMirrorOptions contains the properties requires to edit -// an existing project mirror. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#update-a-remote-mirrors-attributes -type EditProjectMirrorOptions struct { - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - KeepDivergentRefs *bool `url:"keep_divergent_refs,omitempty" json:"keep_divergent_refs,omitempty"` - OnlyProtectedBranches *bool `url:"only_protected_branches,omitempty" json:"only_protected_branches,omitempty"` - MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` -} - -// EditProjectMirror updates a project team member to a specified access level.. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#update-a-remote-mirrors-attributes -func (s *ProjectMirrorService) EditProjectMirror(pid interface{}, mirror int, opt *EditProjectMirrorOptions, options ...RequestOptionFunc) (*ProjectMirror, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pm := new(ProjectMirror) - resp, err := s.client.Do(req, pm) - if err != nil { - return nil, resp, err - } - - return pm, resp, nil -} - -// DeleteProjectMirror deletes a project mirror. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/remote_mirrors.html#delete-a-remote-mirror -func (s *ProjectMirrorService) DeleteProjectMirror(pid interface{}, mirror int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/remote_mirrors/%d", PathEscape(project), mirror) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go deleted file mode 100644 index 3beecb1f75..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_repository_storage_move.go +++ /dev/null @@ -1,199 +0,0 @@ -// -// Copyright 2023, Nick Westbury -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ProjectRepositoryStorageMoveService handles communication with the -// repositories related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html -type ProjectRepositoryStorageMoveService struct { - client *Client -} - -// ProjectRepositoryStorageMove represents the status of a repository move. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html -type ProjectRepositoryStorageMove struct { - ID int `json:"id"` - CreatedAt *time.Time `json:"created_at"` - State string `json:"state"` - SourceStorageName string `json:"source_storage_name"` - DestinationStorageName string `json:"destination_storage_name"` - Project *RepositoryProject `json:"project"` -} - -type RepositoryProject struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` -} - -// RetrieveAllProjectStorageMovesOptions represents the available -// RetrieveAllStorageMoves() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-project-repository-storage-moves -type RetrieveAllProjectStorageMovesOptions ListOptions - -// RetrieveAllStorageMoves retrieves all project repository storage moves -// accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-project-repository-storage-moves -func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { - req, err := p.client.NewRequest(http.MethodGet, "project_repository_storage_moves", opts, options) - if err != nil { - return nil, nil, err - } - - var psms []*ProjectRepositoryStorageMove - resp, err := p.client.Do(req, &psms) - if err != nil { - return nil, resp, err - } - - return psms, resp, err -} - -// RetrieveAllStorageMovesForProject retrieves all repository storage moves for -// a single project accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-project -func (p ProjectRepositoryStorageMoveService) RetrieveAllStorageMovesForProject(project int, opts RetrieveAllProjectStorageMovesOptions, options ...RequestOptionFunc) ([]*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("projects/%d/repository_storage_moves", project) - - req, err := p.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var psms []*ProjectRepositoryStorageMove - resp, err := p.client.Do(req, &psms) - if err != nil { - return nil, resp, err - } - - return psms, resp, err -} - -// GetStorageMove gets a single project repository storage move. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#get-a-single-project-repository-storage-move -func (p ProjectRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("project_repository_storage_moves/%d", repositoryStorage) - - req, err := p.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - psm := new(ProjectRepositoryStorageMove) - resp, err := p.client.Do(req, psm) - if err != nil { - return nil, resp, err - } - - return psm, resp, err -} - -// GetStorageMoveForProject gets a single repository storage move for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-project -func (p ProjectRepositoryStorageMoveService) GetStorageMoveForProject(project int, repositoryStorage int, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("projects/%d/repository_storage_moves/%d", project, repositoryStorage) - - req, err := p.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - psm := new(ProjectRepositoryStorageMove) - resp, err := p.client.Do(req, psm) - if err != nil { - return nil, resp, err - } - - return psm, resp, err -} - -// ScheduleStorageMoveForProjectOptions represents the available -// ScheduleStorageMoveForProject() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-project -type ScheduleStorageMoveForProjectOptions struct { - DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` -} - -// ScheduleStorageMoveForProject schedule a repository to be moved for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-project -func (p ProjectRepositoryStorageMoveService) ScheduleStorageMoveForProject(project int, opts ScheduleStorageMoveForProjectOptions, options ...RequestOptionFunc) (*ProjectRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("projects/%d/repository_storage_moves", project) - - req, err := p.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - psm := new(ProjectRepositoryStorageMove) - resp, err := p.client.Do(req, psm) - if err != nil { - return nil, resp, err - } - - return psm, resp, err -} - -// ScheduleAllProjectStorageMovesOptions represents the available -// ScheduleAllStorageMoves() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard -type ScheduleAllProjectStorageMovesOptions struct { - SourceStorageName *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"` - DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` -} - -// ScheduleAllStorageMoves schedules all repositories to be moved. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_repository_storage_moves.html#schedule-repository-storage-moves-for-all-projects-on-a-storage-shard -func (p ProjectRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllProjectStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := p.client.NewRequest(http.MethodPost, "project_repository_storage_moves", opts, options) - if err != nil { - return nil, err - } - - return p.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_snippets.go b/vendor/github.com/xanzy/go-gitlab/project_snippets.go deleted file mode 100644 index fe8e46aaf2..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_snippets.go +++ /dev/null @@ -1,209 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "net/http" -) - -// ProjectSnippetsService handles communication with the project snippets -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_snippets.html -type ProjectSnippetsService struct { - client *Client -} - -// ListProjectSnippetsOptions represents the available ListSnippets() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_snippets.html#list-snippets -type ListProjectSnippetsOptions ListOptions - -// ListSnippets gets a list of project snippets. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_snippets.html#list-snippets -func (s *ProjectSnippetsService) ListSnippets(pid interface{}, opt *ListProjectSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// GetSnippet gets a single project snippet -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#single-snippet -func (s *ProjectSnippetsService) GetSnippet(pid interface{}, snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// CreateProjectSnippetOptions represents the available CreateSnippet() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#create-new-snippet -type CreateProjectSnippetOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - Files *[]*CreateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` -} - -// CreateSnippet creates a new project snippet. The user must have permission -// to create new snippets. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#create-new-snippet -func (s *ProjectSnippetsService) CreateSnippet(pid interface{}, opt *CreateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// UpdateProjectSnippetOptions represents the available UpdateSnippet() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#update-snippet -type UpdateProjectSnippetOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - Files *[]*UpdateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` -} - -// UpdateSnippet updates an existing project snippet. The user must have -// permission to change an existing snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#update-snippet -func (s *ProjectSnippetsService) UpdateSnippet(pid interface{}, snippet int, opt *UpdateProjectSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// DeleteSnippet deletes an existing project snippet. This is an idempotent -// function and deleting a non-existent snippet still returns a 200 OK status -// code. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#delete-snippet -func (s *ProjectSnippetsService) DeleteSnippet(pid interface{}, snippet int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SnippetContent returns the raw project snippet as plain text. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_snippets.html#snippet-content -func (s *ProjectSnippetsService) SnippetContent(pid interface{}, snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/snippets/%d/raw", PathEscape(project), snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_templates.go b/vendor/github.com/xanzy/go-gitlab/project_templates.go deleted file mode 100644 index 86010bd1b3..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_templates.go +++ /dev/null @@ -1,110 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ProjectTemplatesService handles communication with the project templates -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_templates.html -type ProjectTemplatesService struct { - client *Client -} - -// ProjectTemplate represents a GitLab ProjectTemplate. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_templates.html -type ProjectTemplate struct { - Key string `json:"key"` - Name string `json:"name"` - Nickname string `json:"nickname"` - Popular bool `json:"popular"` - HTMLURL string `json:"html_url"` - SourceURL string `json:"source_url"` - Description string `json:"description"` - Conditions []string `json:"conditions"` - Permissions []string `json:"permissions"` - Limitations []string `json:"limitations"` - Content string `json:"content"` -} - -func (s ProjectTemplate) String() string { - return Stringify(s) -} - -// ListProjectTemplatesOptions represents the available ListSnippets() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_templates.html#get-all-templates-of-a-particular-type -type ListProjectTemplatesOptions struct { - ListOptions - ID *int `url:"id,omitempty" json:"id,omitempty"` - Type *string `url:"type,omitempty" json:"type,omitempty"` -} - -// ListTemplates gets a list of project templates. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_templates.html#get-all-templates-of-a-particular-type -func (s *ProjectTemplatesService) ListTemplates(pid interface{}, templateType string, opt *ListProjectTemplatesOptions, options ...RequestOptionFunc) ([]*ProjectTemplate, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/templates/%s", PathEscape(project), templateType) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pt []*ProjectTemplate - resp, err := s.client.Do(req, &pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// GetProjectTemplate gets a single project template. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_templates.html#get-one-template-of-a-particular-type -func (s *ProjectTemplatesService) GetProjectTemplate(pid interface{}, templateType string, templateName string, options ...RequestOptionFunc) (*ProjectTemplate, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/templates/%s/%s", PathEscape(project), templateType, templateName) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ptd := new(ProjectTemplate) - resp, err := s.client.Do(req, ptd) - if err != nil { - return nil, resp, err - } - - return ptd, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_variables.go b/vendor/github.com/xanzy/go-gitlab/project_variables.go deleted file mode 100644 index e75c74634a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_variables.go +++ /dev/null @@ -1,232 +0,0 @@ -// -// Copyright 2021, Patrick Webster -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// ProjectVariablesService handles communication with the -// project variables related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html -type ProjectVariablesService struct { - client *Client -} - -// ProjectVariable represents a GitLab Project Variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html -type ProjectVariable struct { - Key string `json:"key"` - Value string `json:"value"` - VariableType VariableTypeValue `json:"variable_type"` - Protected bool `json:"protected"` - Masked bool `json:"masked"` - Raw bool `json:"raw"` - EnvironmentScope string `json:"environment_scope"` - Description string `json:"description"` -} - -func (v ProjectVariable) String() string { - return Stringify(v) -} - -// VariableFilter filters available for project variable related functions -type VariableFilter struct { - EnvironmentScope string `url:"environment_scope, omitempty" json:"environment_scope,omitempty"` -} - -// ListProjectVariablesOptions represents the available options for listing variables -// in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#list-project-variables -type ListProjectVariablesOptions ListOptions - -// ListVariables gets a list of all variables in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#list-project-variables -func (s *ProjectVariablesService) ListVariables(pid interface{}, opt *ListProjectVariablesOptions, options ...RequestOptionFunc) ([]*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var vs []*ProjectVariable - resp, err := s.client.Do(req, &vs) - if err != nil { - return nil, resp, err - } - - return vs, resp, nil -} - -// GetProjectVariableOptions represents the available GetVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#get-a-single-variable -type GetProjectVariableOptions struct { - Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` -} - -// GetVariable gets a variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#get-a-single-variable -func (s *ProjectVariablesService) GetVariable(pid interface{}, key string, opt *GetProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(ProjectVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// CreateProjectVariableOptions represents the available CreateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#create-a-variable -type CreateProjectVariableOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` - Value *string `url:"value,omitempty" json:"value,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` - Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// CreateVariable creates a new project variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#create-a-variable -func (s *ProjectVariablesService) CreateVariable(pid interface{}, opt *CreateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(ProjectVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// UpdateProjectVariableOptions represents the available UpdateVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#update-a-variable -type UpdateProjectVariableOptions struct { - Value *string `url:"value,omitempty" json:"value,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EnvironmentScope *string `url:"environment_scope,omitempty" json:"environment_scope,omitempty"` - Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` - Masked *bool `url:"masked,omitempty" json:"masked,omitempty"` - Protected *bool `url:"protected,omitempty" json:"protected,omitempty"` - Raw *bool `url:"raw,omitempty" json:"raw,omitempty"` - VariableType *VariableTypeValue `url:"variable_type,omitempty" json:"variable_type,omitempty"` -} - -// UpdateVariable updates a project's variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#update-a-variable -func (s *ProjectVariablesService) UpdateVariable(pid interface{}, key string, opt *UpdateProjectVariableOptions, options ...RequestOptionFunc) (*ProjectVariable, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - v := new(ProjectVariable) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} - -// RemoveProjectVariableOptions represents the available RemoveVariable() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#delete-a-variable -type RemoveProjectVariableOptions struct { - Filter *VariableFilter `url:"filter,omitempty" json:"filter,omitempty"` -} - -// RemoveVariable removes a project's variable. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_level_variables.html#delete-a-variable -func (s *ProjectVariablesService) RemoveVariable(pid interface{}, key string, opt *RemoveProjectVariableOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/variables/%s", PathEscape(project), url.PathEscape(key)) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go b/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go deleted file mode 100644 index f46f36fa7e..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go +++ /dev/null @@ -1,150 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ProjectVulnerabilitiesService handles communication with the projects -// vulnerabilities related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_vulnerabilities.html -type ProjectVulnerabilitiesService struct { - client *Client -} - -// Project represents a GitLab project vulnerability. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_vulnerabilities.html -type ProjectVulnerability struct { - AuthorID int `json:"author_id"` - Confidence string `json:"confidence"` - CreatedAt *time.Time `json:"created_at"` - Description string `json:"description"` - DismissedAt *time.Time `json:"dismissed_at"` - DismissedByID int `json:"dismissed_by_id"` - DueDate *time.Time `json:"due_date"` - Finding *Finding `json:"finding"` - ID int `json:"id"` - LastEditedAt *time.Time `json:"last_edited_at"` - LastEditedByID int `json:"last_edited_by_id"` - Project *Project `json:"project"` - ProjectDefaultBranch string `json:"project_default_branch"` - ReportType string `json:"report_type"` - ResolvedAt *time.Time `json:"resolved_at"` - ResolvedByID int `json:"resolved_by_id"` - ResolvedOnDefaultBranch bool `json:"resolved_on_default_branch"` - Severity string `json:"severity"` - StartDate *time.Time `json:"start_date"` - State string `json:"state"` - Title string `json:"title"` - UpdatedAt *time.Time `json:"updated_at"` - UpdatedByID int `json:"updated_by_id"` -} - -// Project represents a GitLab project vulnerability finding. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/project_vulnerabilities.html -type Finding struct { - Confidence string `json:"confidence"` - CreatedAt *time.Time `json:"created_at"` - ID int `json:"id"` - LocationFingerprint string `json:"location_fingerprint"` - MetadataVersion string `json:"metadata_version"` - Name string `json:"name"` - PrimaryIdentifierID int `json:"primary_identifier_id"` - ProjectFingerprint string `json:"project_fingerprint"` - ProjectID int `json:"project_id"` - RawMetadata string `json:"raw_metadata"` - ReportType string `json:"report_type"` - ScannerID int `json:"scanner_id"` - Severity string `json:"severity"` - UpdatedAt *time.Time `json:"updated_at"` - UUID string `json:"uuid"` - VulnerabilityID int `json:"vulnerability_id"` -} - -// ListProjectVulnerabilitiesOptions represents the available -// ListProjectVulnerabilities() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#list-project-vulnerabilities -type ListProjectVulnerabilitiesOptions struct { - ListOptions -} - -// ListProjectVulnerabilities gets a list of all project vulnerabilities. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#list-project-vulnerabilities -func (s *ProjectVulnerabilitiesService) ListProjectVulnerabilities(pid interface{}, opt *ListProjectVulnerabilitiesOptions, options ...RequestOptionFunc) ([]*ProjectVulnerability, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/vulnerabilities", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectVulnerability - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CreateVulnerabilityOptions represents the available CreateVulnerability() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#new-vulnerability -type CreateVulnerabilityOptions struct { - FindingID *int `url:"finding_id,omitempty" json:"finding_id,omitempty"` -} - -// CreateVulnerability creates a new vulnerability on the selected project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/project_vulnerabilities.html#new-vulnerability -func (s *ProjectVulnerabilitiesService) CreateVulnerability(pid interface{}, opt *CreateVulnerabilityOptions, options ...RequestOptionFunc) (*ProjectVulnerability, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/vulnerabilities", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(ProjectVulnerability) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go deleted file mode 100644 index cc23f265d0..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/projects.go +++ /dev/null @@ -1,2263 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "github.com/hashicorp/go-retryablehttp" -) - -// ProjectsService handles communication with the repositories related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html -type ProjectsService struct { - client *Client -} - -// Project represents a GitLab project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html -type Project struct { - ID int `json:"id"` - Description string `json:"description"` - DefaultBranch string `json:"default_branch"` - Visibility VisibilityValue `json:"visibility"` - SSHURLToRepo string `json:"ssh_url_to_repo"` - HTTPURLToRepo string `json:"http_url_to_repo"` - WebURL string `json:"web_url"` - ReadmeURL string `json:"readme_url"` - TagList []string `json:"tag_list"` - Topics []string `json:"topics"` - Owner *User `json:"owner"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - IssuesEnabled bool `json:"issues_enabled"` - OpenIssuesCount int `json:"open_issues_count"` - MergeRequestsEnabled bool `json:"merge_requests_enabled"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` - JobsEnabled bool `json:"jobs_enabled"` - WikiEnabled bool `json:"wiki_enabled"` - SnippetsEnabled bool `json:"snippets_enabled"` - ResolveOutdatedDiffDiscussions bool `json:"resolve_outdated_diff_discussions"` - ContainerExpirationPolicy *ContainerExpirationPolicy `json:"container_expiration_policy,omitempty"` - ContainerRegistryEnabled bool `json:"container_registry_enabled"` - ContainerRegistryAccessLevel AccessControlValue `json:"container_registry_access_level"` - ContainerRegistryImagePrefix string `json:"container_registry_image_prefix,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - LastActivityAt *time.Time `json:"last_activity_at,omitempty"` - CreatorID int `json:"creator_id"` - Namespace *ProjectNamespace `json:"namespace"` - Permissions *Permissions `json:"permissions"` - MarkedForDeletionAt *ISOTime `json:"marked_for_deletion_at"` - EmptyRepo bool `json:"empty_repo"` - Archived bool `json:"archived"` - AvatarURL string `json:"avatar_url"` - LicenseURL string `json:"license_url"` - License *ProjectLicense `json:"license"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - GroupRunnersEnabled bool `json:"group_runners_enabled"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - ForksCount int `json:"forks_count"` - StarCount int `json:"star_count"` - RunnersToken string `json:"runners_token"` - AllowMergeOnSkippedPipeline bool `json:"allow_merge_on_skipped_pipeline"` - OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"` - OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"` - RemoveSourceBranchAfterMerge bool `json:"remove_source_branch_after_merge"` - PreventMergeWithoutJiraIssue bool `json:"prevent_merge_without_jira_issue"` - PrintingMergeRequestLinkEnabled bool `json:"printing_merge_request_link_enabled"` - LFSEnabled bool `json:"lfs_enabled"` - RepositoryStorage string `json:"repository_storage"` - RequestAccessEnabled bool `json:"request_access_enabled"` - MergeMethod MergeMethodValue `json:"merge_method"` - CanCreateMergeRequestIn bool `json:"can_create_merge_request_in"` - ForkedFromProject *ForkParent `json:"forked_from_project"` - Mirror bool `json:"mirror"` - MirrorUserID int `json:"mirror_user_id"` - MirrorTriggerBuilds bool `json:"mirror_trigger_builds"` - OnlyMirrorProtectedBranches bool `json:"only_mirror_protected_branches"` - MirrorOverwritesDivergedBranches bool `json:"mirror_overwrites_diverged_branches"` - PackagesEnabled bool `json:"packages_enabled"` - ServiceDeskEnabled bool `json:"service_desk_enabled"` - ServiceDeskAddress string `json:"service_desk_address"` - IssuesAccessLevel AccessControlValue `json:"issues_access_level"` - ReleasesAccessLevel AccessControlValue `json:"releases_access_level,omitempty"` - RepositoryAccessLevel AccessControlValue `json:"repository_access_level"` - MergeRequestsAccessLevel AccessControlValue `json:"merge_requests_access_level"` - ForkingAccessLevel AccessControlValue `json:"forking_access_level"` - WikiAccessLevel AccessControlValue `json:"wiki_access_level"` - BuildsAccessLevel AccessControlValue `json:"builds_access_level"` - SnippetsAccessLevel AccessControlValue `json:"snippets_access_level"` - PagesAccessLevel AccessControlValue `json:"pages_access_level"` - OperationsAccessLevel AccessControlValue `json:"operations_access_level"` - AnalyticsAccessLevel AccessControlValue `json:"analytics_access_level"` - EnvironmentsAccessLevel AccessControlValue `json:"environments_access_level"` - FeatureFlagsAccessLevel AccessControlValue `json:"feature_flags_access_level"` - InfrastructureAccessLevel AccessControlValue `json:"infrastructure_access_level"` - MonitorAccessLevel AccessControlValue `json:"monitor_access_level"` - AutocloseReferencedIssues bool `json:"autoclose_referenced_issues"` - SuggestionCommitMessage string `json:"suggestion_commit_message"` - SquashOption SquashOptionValue `json:"squash_option"` - EnforceAuthChecksOnUploads bool `json:"enforce_auth_checks_on_uploads,omitempty"` - SharedWithGroups []struct { - GroupID int `json:"group_id"` - GroupName string `json:"group_name"` - GroupFullPath string `json:"group_full_path"` - GroupAccessLevel int `json:"group_access_level"` - } `json:"shared_with_groups"` - Statistics *Statistics `json:"statistics"` - Links *Links `json:"_links,omitempty"` - ImportURL string `json:"import_url"` - ImportType string `json:"import_type"` - ImportStatus string `json:"import_status"` - ImportError string `json:"import_error"` - CIDefaultGitDepth int `json:"ci_default_git_depth"` - CIForwardDeploymentEnabled bool `json:"ci_forward_deployment_enabled"` - CIForwardDeploymentRollbackAllowed bool `json:"ci_forward_deployment_rollback_allowed"` - CISeperateCache bool `json:"ci_separated_caches"` - CIJobTokenScopeEnabled bool `json:"ci_job_token_scope_enabled"` - CIOptInJWT bool `json:"ci_opt_in_jwt"` - CIAllowForkPipelinesToRunInParentProject bool `json:"ci_allow_fork_pipelines_to_run_in_parent_project"` - CIRestrictPipelineCancellationRole AccessControlValue `json:"ci_restrict_pipeline_cancellation_role"` - PublicJobs bool `json:"public_jobs"` - BuildTimeout int `json:"build_timeout"` - AutoCancelPendingPipelines string `json:"auto_cancel_pending_pipelines"` - CIConfigPath string `json:"ci_config_path"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - ComplianceFrameworks []string `json:"compliance_frameworks"` - BuildCoverageRegex string `json:"build_coverage_regex"` - IssuesTemplate string `json:"issues_template"` - MergeRequestsTemplate string `json:"merge_requests_template"` - IssueBranchTemplate string `json:"issue_branch_template"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - MergePipelinesEnabled bool `json:"merge_pipelines_enabled"` - MergeTrainsEnabled bool `json:"merge_trains_enabled"` - RestrictUserDefinedVariables bool `json:"restrict_user_defined_variables"` - MergeCommitTemplate string `json:"merge_commit_template"` - SquashCommitTemplate string `json:"squash_commit_template"` - AutoDevopsDeployStrategy string `json:"auto_devops_deploy_strategy"` - AutoDevopsEnabled bool `json:"auto_devops_enabled"` - BuildGitStrategy string `json:"build_git_strategy"` - EmailsEnabled bool `json:"emails_enabled"` - ExternalAuthorizationClassificationLabel string `json:"external_authorization_classification_label"` - RequirementsEnabled bool `json:"requirements_enabled"` - RequirementsAccessLevel AccessControlValue `json:"requirements_access_level"` - SecurityAndComplianceEnabled bool `json:"security_and_compliance_enabled"` - SecurityAndComplianceAccessLevel AccessControlValue `json:"security_and_compliance_access_level"` - MergeRequestDefaultTargetSelf bool `json:"mr_default_target_self"` - ModelExperimentsAccessLevel AccessControlValue `json:"model_experiments_access_level"` - ModelRegistryAccessLevel AccessControlValue `json:"model_registry_access_level"` - PreReceiveSecretDetectionEnabled bool `json:"pre_receive_secret_detection_enabled"` - - // Deprecated: Use EmailsEnabled instead - EmailsDisabled bool `json:"emails_disabled"` - // Deprecated: This parameter has been renamed to PublicJobs in GitLab 9.0. - PublicBuilds bool `json:"public_builds"` -} - -// BasicProject included in other service responses (such as todos). -type BasicProject struct { - ID int `json:"id"` - Description string `json:"description"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - CreatedAt *time.Time `json:"created_at"` -} - -// ContainerExpirationPolicy represents the container expiration policy. -type ContainerExpirationPolicy struct { - Cadence string `json:"cadence"` - KeepN int `json:"keep_n"` - OlderThan string `json:"older_than"` - NameRegex string `json:"name_regex"` - NameRegexDelete string `json:"name_regex_delete"` - NameRegexKeep string `json:"name_regex_keep"` - Enabled bool `json:"enabled"` - NextRunAt *time.Time `json:"next_run_at"` -} - -// ForkParent represents the parent project when this is a fork. -type ForkParent struct { - ID int `json:"id"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - HTTPURLToRepo string `json:"http_url_to_repo"` - WebURL string `json:"web_url"` - RepositoryStorage string `json:"repository_storage"` -} - -// GroupAccess represents group access. -type GroupAccess struct { - AccessLevel AccessLevelValue `json:"access_level"` - NotificationLevel NotificationLevelValue `json:"notification_level"` -} - -// Links represents a project web links for self, issues, merge_requests, -// repo_branches, labels, events, members. -type Links struct { - Self string `json:"self"` - Issues string `json:"issues"` - MergeRequests string `json:"merge_requests"` - RepoBranches string `json:"repo_branches"` - Labels string `json:"labels"` - Events string `json:"events"` - Members string `json:"members"` - ClusterAgents string `json:"cluster_agents"` -} - -// Permissions represents permissions. -type Permissions struct { - ProjectAccess *ProjectAccess `json:"project_access"` - GroupAccess *GroupAccess `json:"group_access"` -} - -// ProjectAccess represents project access. -type ProjectAccess struct { - AccessLevel AccessLevelValue `json:"access_level"` - NotificationLevel NotificationLevelValue `json:"notification_level"` -} - -// ProjectLicense represent the license for a project. -type ProjectLicense struct { - Key string `json:"key"` - Name string `json:"name"` - Nickname string `json:"nickname"` - HTMLURL string `json:"html_url"` - SourceURL string `json:"source_url"` -} - -// ProjectNamespace represents a project namespace. -type ProjectNamespace struct { - ID int `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - Kind string `json:"kind"` - FullPath string `json:"full_path"` - ParentID int `json:"parent_id"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` -} - -// Repository represents a repository. -type Repository struct { - Name string `json:"name"` - Description string `json:"description"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` - GitSSHURL string `json:"git_ssh_url"` - GitHTTPURL string `json:"git_http_url"` - Namespace string `json:"namespace"` - Visibility VisibilityValue `json:"visibility"` - PathWithNamespace string `json:"path_with_namespace"` - DefaultBranch string `json:"default_branch"` - Homepage string `json:"homepage"` - URL string `json:"url"` - SSHURL string `json:"ssh_url"` - HTTPURL string `json:"http_url"` -} - -// Statistics represents a statistics record for a group or project. -type Statistics struct { - CommitCount int64 `json:"commit_count"` - StorageSize int64 `json:"storage_size"` - RepositorySize int64 `json:"repository_size"` - WikiSize int64 `json:"wiki_size"` - LFSObjectsSize int64 `json:"lfs_objects_size"` - JobArtifactsSize int64 `json:"job_artifacts_size"` - PipelineArtifactsSize int64 `json:"pipeline_artifacts_size"` - PackagesSize int64 `json:"packages_size"` - SnippetsSize int64 `json:"snippets_size"` - UploadsSize int64 `json:"uploads_size"` - ContainerRegistrySize int64 `json:"container_registry_size"` -} - -func (s Project) String() string { - return Stringify(s) -} - -// ProjectApprovalRule represents a GitLab project approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules -type ProjectApprovalRule struct { - ID int `json:"id"` - Name string `json:"name"` - RuleType string `json:"rule_type"` - ReportType string `json:"report_type"` - EligibleApprovers []*BasicUser `json:"eligible_approvers"` - ApprovalsRequired int `json:"approvals_required"` - Users []*BasicUser `json:"users"` - Groups []*Group `json:"groups"` - ContainsHiddenGroups bool `json:"contains_hidden_groups"` - ProtectedBranches []*ProtectedBranch `json:"protected_branches"` - AppliesToAllProtectedBranches bool `json:"applies_to_all_protected_branches"` -} - -func (s ProjectApprovalRule) String() string { - return Stringify(s) -} - -// ListProjectsOptions represents the available ListProjects() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-all-projects -type ListProjectsOptions struct { - ListOptions - Archived *bool `url:"archived,omitempty" json:"archived,omitempty"` - IDAfter *int `url:"id_after,omitempty" json:"id_after,omitempty"` - IDBefore *int `url:"id_before,omitempty" json:"id_before,omitempty"` - Imported *bool `url:"imported,omitempty" json:"imported,omitempty"` - IncludeHidden *bool `url:"include_hidden,omitempty" json:"include_hidden,omitempty"` - IncludePendingDelete *bool `url:"include_pending_delete,omitempty" json:"include_pending_delete,omitempty"` - LastActivityAfter *time.Time `url:"last_activity_after,omitempty" json:"last_activity_after,omitempty"` - LastActivityBefore *time.Time `url:"last_activity_before,omitempty" json:"last_activity_before,omitempty"` - Membership *bool `url:"membership,omitempty" json:"membership,omitempty"` - MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Owned *bool `url:"owned,omitempty" json:"owned,omitempty"` - RepositoryChecksumFailed *bool `url:"repository_checksum_failed,omitempty" json:"repository_checksum_failed,omitempty"` - RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - SearchNamespaces *bool `url:"search_namespaces,omitempty" json:"search_namespaces,omitempty"` - Simple *bool `url:"simple,omitempty" json:"simple,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Starred *bool `url:"starred,omitempty" json:"starred,omitempty"` - Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` - Topic *string `url:"topic,omitempty" json:"topic,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - WikiChecksumFailed *bool `url:"wiki_checksum_failed,omitempty" json:"wiki_checksum_failed,omitempty"` - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` - WithIssuesEnabled *bool `url:"with_issues_enabled,omitempty" json:"with_issues_enabled,omitempty"` - WithMergeRequestsEnabled *bool `url:"with_merge_requests_enabled,omitempty" json:"with_merge_requests_enabled,omitempty"` - WithProgrammingLanguage *string `url:"with_programming_language,omitempty" json:"with_programming_language,omitempty"` -} - -// ListProjects gets a list of projects accessible by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-all-projects -func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "projects", opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListUserProjects gets a list of projects for the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-user-projects -func (s *ProjectsService) ListUserProjects(uid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/projects", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListUserContributedProjects gets a list of visible projects a given user has contributed to. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-projects-a-user-has-contributed-to -func (s *ProjectsService) ListUserContributedProjects(uid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/contributed_projects", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListUserStarredProjects gets a list of projects starred by the given user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-projects-starred-by-a-user -func (s *ProjectsService) ListUserStarredProjects(uid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/starred_projects", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*Project - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ProjectUser represents a GitLab project user. -type ProjectUser struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` -} - -// ListProjectUserOptions represents the available ListProjectsUsers() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#get-project-users -type ListProjectUserOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` -} - -// ListProjectsUsers gets a list of users for the given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-project-users -func (s *ProjectsService) ListProjectsUsers(pid interface{}, opt *ListProjectUserOptions, options ...RequestOptionFunc) ([]*ProjectUser, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/users", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectUser - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ProjectGroup represents a GitLab project group. -type ProjectGroup struct { - ID int `json:"id"` - Name string `json:"name"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - FullName string `json:"full_name"` - FullPath string `json:"full_path"` -} - -// ListProjectGroupOptions represents the available ListProjectsGroups() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-a-projects-groups -type ListProjectGroupOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` - SharedMinAccessLevel *AccessLevelValue `url:"shared_min_access_level,omitempty" json:"shared_min_access_level,omitempty"` - SharedVisiableOnly *bool `url:"shared_visible_only,omitempty" json:"shared_visible_only,omitempty"` - SkipGroups *[]int `url:"skip_groups,omitempty" json:"skip_groups,omitempty"` - WithShared *bool `url:"with_shared,omitempty" json:"with_shared,omitempty"` -} - -// ListProjectsGroups gets a list of groups for the given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-groups -func (s *ProjectsService) ListProjectsGroups(pid interface{}, opt *ListProjectGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/groups", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProjectGroup - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ProjectLanguages is a map of strings because the response is arbitrary -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/projects.html#languages -type ProjectLanguages map[string]float32 - -// GetProjectLanguages gets a list of languages used by the project -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#languages -func (s *ProjectsService) GetProjectLanguages(pid interface{}, options ...RequestOptionFunc) (*ProjectLanguages, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/languages", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(ProjectLanguages) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetProjectOptions represents the available GetProject() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#get-single-project -type GetProjectOptions struct { - License *bool `url:"license,omitempty" json:"license,omitempty"` - Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"` - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` -} - -// GetProject gets a specific project, identified by project ID or -// NAMESPACE/PROJECT_NAME, which is owned by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-single-project -func (s *ProjectsService) GetProject(pid interface{}, opt *GetProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CreateProjectOptions represents the available CreateProject() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project -type CreateProjectOptions struct { - AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` - OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` - AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` - AutoCancelPendingPipelines *string `url:"auto_cancel_pending_pipelines,omitempty" json:"auto_cancel_pending_pipelines,omitempty"` - AutoDevopsDeployStrategy *string `url:"auto_devops_deploy_strategy,omitempty" json:"auto_devops_deploy_strategy,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - AutocloseReferencedIssues *bool `url:"autoclose_referenced_issues,omitempty" json:"autoclose_referenced_issues,omitempty"` - Avatar *ProjectAvatar `url:"-" json:"-"` - BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` - BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` - BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` - BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` - CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` - ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` - ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` - DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` - ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` - ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` - GroupWithProjectTemplatesID *int `url:"group_with_project_templates_id,omitempty" json:"group_with_project_templates_id,omitempty"` - ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` - InitializeWithReadme *bool `url:"initialize_with_readme,omitempty" json:"initialize_with_readme,omitempty"` - IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` - IssueBranchTemplate *string `url:"issue_branch_template,omitempty" json:"issue_branch_template,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` - MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` - MergePipelinesEnabled *bool `url:"merge_pipelines_enabled,omitempty" json:"merge_pipelines_enabled,omitempty"` - MergeRequestsAccessLevel *AccessControlValue `url:"merge_requests_access_level,omitempty" json:"merge_requests_access_level,omitempty"` - MergeTrainsEnabled *bool `url:"merge_trains_enabled,omitempty" json:"merge_trains_enabled,omitempty"` - Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` - MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` - ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` - ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` - OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` - OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` - OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` - PackagesEnabled *bool `url:"packages_enabled,omitempty" json:"packages_enabled,omitempty"` - PagesAccessLevel *AccessControlValue `url:"pages_access_level,omitempty" json:"pages_access_level,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` - ReleasesAccessLevel *AccessControlValue `url:"releases_access_level,omitempty" json:"releases_access_level,omitempty"` - EnvironmentsAccessLevel *AccessControlValue `url:"environments_access_level,omitempty" json:"environments_access_level,omitempty"` - FeatureFlagsAccessLevel *AccessControlValue `url:"feature_flags_access_level,omitempty" json:"feature_flags_access_level,omitempty"` - InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` - MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` - RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` - PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` - RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` - RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - RequirementsAccessLevel *AccessControlValue `url:"requirements_access_level,omitempty" json:"requirements_access_level,omitempty"` - ResolveOutdatedDiffDiscussions *bool `url:"resolve_outdated_diff_discussions,omitempty" json:"resolve_outdated_diff_discussions,omitempty"` - SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` - ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` - SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` - SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` - SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` - SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` - TemplateName *string `url:"template_name,omitempty" json:"template_name,omitempty"` - TemplateProjectID *int `url:"template_project_id,omitempty" json:"template_project_id,omitempty"` - Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` - UseCustomTemplate *bool `url:"use_custom_template,omitempty" json:"use_custom_template,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` - - // Deprecated: No longer supported in recent versions. - CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` - // Deprecated: Use ContainerRegistryAccessLevel instead. - ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"` - // Deprecated: Use EmailsEnabled instead - EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` - // Deprecated: Use IssuesAccessLevel instead. - IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` - // Deprecated: No longer supported in recent versions. - IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` - // Deprecated: Use BuildsAccessLevel instead. - JobsEnabled *bool `url:"jobs_enabled,omitempty" json:"jobs_enabled,omitempty"` - // Deprecated: Use MergeRequestsAccessLevel instead. - MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"` - // Deprecated: No longer supported in recent versions. - MergeRequestsTemplate *string `url:"merge_requests_template,omitempty" json:"merge_requests_template,omitempty"` - // Deprecated: No longer supported in recent versions. - ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` - // Deprecated: Use SnippetsAccessLevel instead. - SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"` - // Deprecated: Use Topics instead. (Deprecated in GitLab 14.0) - TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` - // Deprecated: Use WikiAccessLevel instead. - WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"` -} - -// ContainerExpirationPolicyAttributes represents the available container -// expiration policy attributes. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project -type ContainerExpirationPolicyAttributes struct { - Cadence *string `url:"cadence,omitempty" json:"cadence,omitempty"` - KeepN *int `url:"keep_n,omitempty" json:"keep_n,omitempty"` - OlderThan *string `url:"older_than,omitempty" json:"older_than,omitempty"` - NameRegexDelete *string `url:"name_regex_delete,omitempty" json:"name_regex_delete,omitempty"` - NameRegexKeep *string `url:"name_regex_keep,omitempty" json:"name_regex_keep,omitempty"` - Enabled *bool `url:"enabled,omitempty" json:"enabled,omitempty"` - - // Deprecated: Is replaced by NameRegexDelete and is internally hardwired to its value. - NameRegex *string `url:"name_regex,omitempty" json:"name_regex,omitempty"` -} - -// ProjectAvatar represents a GitLab project avatar. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project -type ProjectAvatar struct { - Filename string - Image io.Reader -} - -// MarshalJSON implements the json.Marshaler interface. -func (a *ProjectAvatar) MarshalJSON() ([]byte, error) { - if a.Filename == "" && a.Image == nil { - return []byte(`""`), nil - } - type alias ProjectAvatar - return json.Marshal((*alias)(a)) -} - -// CreateProject creates a new project owned by the authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project -func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - if opt.ContainerExpirationPolicyAttributes != nil { - // This is needed to satisfy the API. Should be deleted - // when NameRegex is removed (it's now deprecated). - opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete - } - - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "projects", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "projects", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// CreateProjectForUserOptions represents the available CreateProjectForUser() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#create-project-for-user -type CreateProjectForUserOptions CreateProjectOptions - -// CreateProjectForUser creates a new project owned by the specified user. -// Available only for admins. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#create-project-for-user -func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUserOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - if opt.ContainerExpirationPolicyAttributes != nil { - // This is needed to satisfy the API. Should be deleted - // when NameRegex is removed (it's now deprecated). - opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete - } - - var err error - var req *retryablehttp.Request - u := fmt.Sprintf("projects/user/%d", user) - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// EditProjectOptions represents the available EditProject() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#edit-project -type EditProjectOptions struct { - AllowMergeOnSkippedPipeline *bool `url:"allow_merge_on_skipped_pipeline,omitempty" json:"allow_merge_on_skipped_pipeline,omitempty"` - AllowPipelineTriggerApproveDeployment *bool `url:"allow_pipeline_trigger_approve_deployment,omitempty" json:"allow_pipeline_trigger_approve_deployment,omitempty"` - OnlyAllowMergeIfAllStatusChecksPassed *bool `url:"only_allow_merge_if_all_status_checks_passed,omitempty" json:"only_allow_merge_if_all_status_checks_passed,omitempty"` - AnalyticsAccessLevel *AccessControlValue `url:"analytics_access_level,omitempty" json:"analytics_access_level,omitempty"` - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` - AutoCancelPendingPipelines *string `url:"auto_cancel_pending_pipelines,omitempty" json:"auto_cancel_pending_pipelines,omitempty"` - AutoDevopsDeployStrategy *string `url:"auto_devops_deploy_strategy,omitempty" json:"auto_devops_deploy_strategy,omitempty"` - AutoDevopsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - AutocloseReferencedIssues *bool `url:"autoclose_referenced_issues,omitempty" json:"autoclose_referenced_issues,omitempty"` - Avatar *ProjectAvatar `url:"-" json:"avatar,omitempty"` - BuildCoverageRegex *string `url:"build_coverage_regex,omitempty" json:"build_coverage_regex,omitempty"` - BuildGitStrategy *string `url:"build_git_strategy,omitempty" json:"build_git_strategy,omitempty"` - BuildTimeout *int `url:"build_timeout,omitempty" json:"build_timeout,omitempty"` - BuildsAccessLevel *AccessControlValue `url:"builds_access_level,omitempty" json:"builds_access_level,omitempty"` - CIConfigPath *string `url:"ci_config_path,omitempty" json:"ci_config_path,omitempty"` - CIDefaultGitDepth *int `url:"ci_default_git_depth,omitempty" json:"ci_default_git_depth,omitempty"` - CIForwardDeploymentEnabled *bool `url:"ci_forward_deployment_enabled,omitempty" json:"ci_forward_deployment_enabled,omitempty"` - CIForwardDeploymentRollbackAllowed *bool `url:"ci_forward_deployment_rollback_allowed,omitempty" json:"ci_forward_deployment_rollback_allowed,omitempty"` - CISeperateCache *bool `url:"ci_separated_caches,omitempty" json:"ci_separated_caches,omitempty"` - CIRestrictPipelineCancellationRole *AccessControlValue `url:"ci_restrict_pipeline_cancellation_role,omitempty" json:"ci_restrict_pipeline_cancellation_role,omitempty"` - ContainerExpirationPolicyAttributes *ContainerExpirationPolicyAttributes `url:"container_expiration_policy_attributes,omitempty" json:"container_expiration_policy_attributes,omitempty"` - ContainerRegistryAccessLevel *AccessControlValue `url:"container_registry_access_level,omitempty" json:"container_registry_access_level,omitempty"` - DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - EmailsEnabled *bool `url:"emails_enabled,omitempty" json:"emails_enabled,omitempty"` - EnforceAuthChecksOnUploads *bool `url:"enforce_auth_checks_on_uploads,omitempty" json:"enforce_auth_checks_on_uploads,omitempty"` - ExternalAuthorizationClassificationLabel *string `url:"external_authorization_classification_label,omitempty" json:"external_authorization_classification_label,omitempty"` - ForkingAccessLevel *AccessControlValue `url:"forking_access_level,omitempty" json:"forking_access_level,omitempty"` - ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"` - IssuesAccessLevel *AccessControlValue `url:"issues_access_level,omitempty" json:"issues_access_level,omitempty"` - IssueBranchTemplate *string `url:"issue_branch_template,omitempty" json:"issue_branch_template,omitempty"` - IssuesTemplate *string `url:"issues_template,omitempty" json:"issues_template,omitempty"` - KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` - LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"` - MergeCommitTemplate *string `url:"merge_commit_template,omitempty" json:"merge_commit_template,omitempty"` - MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` - MergeMethod *MergeMethodValue `url:"merge_method,omitempty" json:"merge_method,omitempty"` - MergePipelinesEnabled *bool `url:"merge_pipelines_enabled,omitempty" json:"merge_pipelines_enabled,omitempty"` - MergeRequestsAccessLevel *AccessControlValue `url:"merge_requests_access_level,omitempty" json:"merge_requests_access_level,omitempty"` - MergeRequestsTemplate *string `url:"merge_requests_template,omitempty" json:"merge_requests_template,omitempty"` - MergeTrainsEnabled *bool `url:"merge_trains_enabled,omitempty" json:"merge_trains_enabled,omitempty"` - Mirror *bool `url:"mirror,omitempty" json:"mirror,omitempty"` - MirrorBranchRegex *string `url:"mirror_branch_regex,omitempty" json:"mirror_branch_regex,omitempty"` - MirrorOverwritesDivergedBranches *bool `url:"mirror_overwrites_diverged_branches,omitempty" json:"mirror_overwrites_diverged_branches,omitempty"` - MirrorTriggerBuilds *bool `url:"mirror_trigger_builds,omitempty" json:"mirror_trigger_builds,omitempty"` - MirrorUserID *int `url:"mirror_user_id,omitempty" json:"mirror_user_id,omitempty"` - ModelExperimentsAccessLevel *AccessControlValue `url:"model_experiments_access_level,omitempty" json:"model_experiments_access_level,omitempty"` - ModelRegistryAccessLevel *AccessControlValue `url:"model_registry_access_level,omitempty" json:"model_registry_access_level,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"` - OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"` - OnlyMirrorProtectedBranches *bool `url:"only_mirror_protected_branches,omitempty" json:"only_mirror_protected_branches,omitempty"` - OperationsAccessLevel *AccessControlValue `url:"operations_access_level,omitempty" json:"operations_access_level,omitempty"` - PackagesEnabled *bool `url:"packages_enabled,omitempty" json:"packages_enabled,omitempty"` - PagesAccessLevel *AccessControlValue `url:"pages_access_level,omitempty" json:"pages_access_level,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"` - ReleasesAccessLevel *AccessControlValue `url:"releases_access_level,omitempty" json:"releases_access_level,omitempty"` - EnvironmentsAccessLevel *AccessControlValue `url:"environments_access_level,omitempty" json:"environments_access_level,omitempty"` - FeatureFlagsAccessLevel *AccessControlValue `url:"feature_flags_access_level,omitempty" json:"feature_flags_access_level,omitempty"` - InfrastructureAccessLevel *AccessControlValue `url:"infrastructure_access_level,omitempty" json:"infrastructure_access_level,omitempty"` - MonitorAccessLevel *AccessControlValue `url:"monitor_access_level,omitempty" json:"monitor_access_level,omitempty"` - RemoveSourceBranchAfterMerge *bool `url:"remove_source_branch_after_merge,omitempty" json:"remove_source_branch_after_merge,omitempty"` - PreventMergeWithoutJiraIssue *bool `url:"prevent_merge_without_jira_issue,omitempty" json:"prevent_merge_without_jira_issue,omitempty"` - PrintingMergeRequestLinkEnabled *bool `url:"printing_merge_request_link_enabled,omitempty" json:"printing_merge_request_link_enabled,omitempty"` - RepositoryAccessLevel *AccessControlValue `url:"repository_access_level,omitempty" json:"repository_access_level,omitempty"` - RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` - RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"` - RequirementsAccessLevel *AccessControlValue `url:"requirements_access_level,omitempty" json:"requirements_access_level,omitempty"` - ResolveOutdatedDiffDiscussions *bool `url:"resolve_outdated_diff_discussions,omitempty" json:"resolve_outdated_diff_discussions,omitempty"` - RestrictUserDefinedVariables *bool `url:"restrict_user_defined_variables,omitempty" json:"restrict_user_defined_variables,omitempty"` - SecurityAndComplianceAccessLevel *AccessControlValue `url:"security_and_compliance_access_level,omitempty" json:"security_and_compliance_access_level,omitempty"` - ServiceDeskEnabled *bool `url:"service_desk_enabled,omitempty" json:"service_desk_enabled,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - GroupRunnersEnabled *bool `url:"group_runners_enabled,omitempty" json:"group_runners_enabled,omitempty"` - ShowDefaultAwardEmojis *bool `url:"show_default_award_emojis,omitempty" json:"show_default_award_emojis,omitempty"` - SnippetsAccessLevel *AccessControlValue `url:"snippets_access_level,omitempty" json:"snippets_access_level,omitempty"` - SquashCommitTemplate *string `url:"squash_commit_template,omitempty" json:"squash_commit_template,omitempty"` - SquashOption *SquashOptionValue `url:"squash_option,omitempty" json:"squash_option,omitempty"` - SuggestionCommitMessage *string `url:"suggestion_commit_message,omitempty" json:"suggestion_commit_message,omitempty"` - Topics *[]string `url:"topics,omitempty" json:"topics,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - WikiAccessLevel *AccessControlValue `url:"wiki_access_level,omitempty" json:"wiki_access_level,omitempty"` - - // Deprecated: Use ContainerRegistryAccessLevel instead. - ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"` - // Deprecated: Use EmailsEnabled instead - EmailsDisabled *bool `url:"emails_disabled,omitempty" json:"emails_disabled,omitempty"` - // Deprecated: Use IssuesAccessLevel instead. - IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` - // Deprecated: Use BuildsAccessLevel instead. - JobsEnabled *bool `url:"jobs_enabled,omitempty" json:"jobs_enabled,omitempty"` - // Deprecated: Use MergeRequestsAccessLevel instead. - MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"` - // Deprecated: Use SnippetsAccessLevel instead. - SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"` - // Deprecated: Use Topics instead. (Deprecated in GitLab 14.0) - TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` - // Deprecated: Use WikiAccessLevel instead. - WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"` -} - -// EditProject updates an existing project. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#edit-project -func (s *ProjectsService) EditProject(pid interface{}, opt *EditProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - if opt.ContainerExpirationPolicyAttributes != nil { - // This is needed to satisfy the API. Should be deleted - // when NameRegex is removed (it's now deprecated). - opt.ContainerExpirationPolicyAttributes.NameRegex = opt.ContainerExpirationPolicyAttributes.NameRegexDelete - } - - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - var req *retryablehttp.Request - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ForkProjectOptions represents the available ForkProject() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#fork-project -type ForkProjectOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - MergeRequestDefaultTargetSelf *bool `url:"mr_default_target_self,omitempty" json:"mr_default_target_self,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"` - NamespacePath *string `url:"namespace_path,omitempty" json:"namespace_path,omitempty"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - - // Deprecated: This parameter has been split into NamespaceID and NamespacePath. - Namespace *string `url:"namespace,omitempty" json:"namespace,omitempty"` -} - -// ForkProject forks a project into the user namespace of the authenticated -// user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#fork-project -func (s *ProjectsService) ForkProject(pid interface{}, opt *ForkProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/fork", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// StarProject stars a given the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#star-a-project -func (s *ProjectsService) StarProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/star", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListProjectInvidedGroupOptions represents the available -// ListProjectsInvitedGroups() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups -type ListProjectInvidedGroupOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` - MinAccessLevel *AccessLevelValue `url:"min_access_level,omitempty" json:"min_access_level,omitempty"` - Relation *[]string `url:"relation,omitempty" json:"relation,omitempty"` - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` -} - -// ListProjectsInvitedGroups lists invited groups of a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-a-projects-invited-groups -func (s *ProjectsService) ListProjectsInvitedGroups(pid interface{}, opt *ListProjectInvidedGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/invited_groups", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pg []*ProjectGroup - resp, err := s.client.Do(req, &pg) - if err != nil { - return nil, resp, err - } - - return pg, resp, nil -} - -// UnstarProject unstars a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#unstar-a-project -func (s *ProjectsService) UnstarProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/unstar", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ArchiveProject archives the project if the user is either admin or the -// project owner of this project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#archive-a-project -func (s *ProjectsService) ArchiveProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/archive", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// UnarchiveProject unarchives the project if the user is either admin or -// the project owner of this project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#unarchive-a-project -func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/unarchive", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// DeleteProject removes a project including all associated resources -// (issues, merge requests etc.) -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#delete-project -func (s *ProjectsService) DeleteProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ShareWithGroupOptions represents options to share project with groups -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#share-project-with-group -type ShareWithGroupOptions struct { - ExpiresAt *string `url:"expires_at" json:"expires_at"` - GroupAccess *AccessLevelValue `url:"group_access" json:"group_access"` - GroupID *int `url:"group_id" json:"group_id"` -} - -// ShareProjectWithGroup allows to share a project with a group. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#share-project-with-group -func (s *ProjectsService) ShareProjectWithGroup(pid interface{}, opt *ShareWithGroupOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/share", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteSharedProjectFromGroup allows to unshare a project from a group. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#delete-a-shared-project-link-within-a-group -func (s *ProjectsService) DeleteSharedProjectFromGroup(pid interface{}, groupID int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/share/%d", PathEscape(project), groupID) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ProjectMember represents a project member. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project -type ProjectMember struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - AccessLevel AccessLevelValue `json:"access_level"` - WebURL string `json:"web_url"` - AvatarURL string `json:"avatar_url"` -} - -// HookCustomHeader represents a project or group hook custom header -// Note: "Key" is returned from the Get operation, but "Value" is not -// The List operation doesn't return any headers at all for Projects, -// but does return headers for Groups -type HookCustomHeader struct { - Key string `json:"key"` - Value string `json:"value"` -} - -// ProjectHook represents a project hook. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-project-hooks -type ProjectHook struct { - ID int `json:"id"` - URL string `json:"url"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - ProjectID int `json:"project_id"` - PushEvents bool `json:"push_events"` - PushEventsBranchFilter string `json:"push_events_branch_filter"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - JobEvents bool `json:"job_events"` - PipelineEvents bool `json:"pipeline_events"` - WikiPageEvents bool `json:"wiki_page_events"` - DeploymentEvents bool `json:"deployment_events"` - ReleasesEvents bool `json:"releases_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - CreatedAt *time.Time `json:"created_at"` - ResourceAccessTokenEvents bool `json:"resource_access_token_events"` - CustomWebhookTemplate string `json:"custom_webhook_template"` - CustomHeaders []*HookCustomHeader `json:"custom_headers"` -} - -// ListProjectHooksOptions represents the available ListProjectHooks() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#list-project-hooks -type ListProjectHooksOptions ListOptions - -// ListProjectHooks gets a list of project hooks. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-project-hooks -func (s *ProjectsService) ListProjectHooks(pid interface{}, opt *ListProjectHooksOptions, options ...RequestOptionFunc) ([]*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ph []*ProjectHook - resp, err := s.client.Do(req, &ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil -} - -// GetProjectHook gets a specific hook for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-project-hook -func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ph := new(ProjectHook) - resp, err := s.client.Do(req, ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil -} - -// AddProjectHookOptions represents the available AddProjectHook() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#add-project-hook -type AddProjectHookOptions struct { - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` - CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` -} - -// AddProjectHook adds a hook to a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#add-project-hook -func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ph := new(ProjectHook) - resp, err := s.client.Do(req, ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil -} - -// EditProjectHookOptions represents the available EditProjectHook() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#edit-project-hook -type EditProjectHookOptions struct { - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushEventsBranchFilter *string `url:"push_events_branch_filter,omitempty" json:"push_events_branch_filter,omitempty"` - ReleasesEvents *bool `url:"releases_events,omitempty" json:"releases_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - ResourceAccessTokenEvents *bool `url:"resource_access_token_events,omitempty" json:"resource_access_token_events,omitempty"` - CustomWebhookTemplate *string `url:"custom_webhook_template,omitempty" json:"custom_webhook_template,omitempty"` - CustomHeaders *[]*HookCustomHeader `url:"custom_headers,omitempty" json:"custom_headers,omitempty"` -} - -// EditProjectHook edits a hook for a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#edit-project-hook -func (s *ProjectsService) EditProjectHook(pid interface{}, hook int, opt *EditProjectHookOptions, options ...RequestOptionFunc) (*ProjectHook, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ph := new(ProjectHook) - resp, err := s.client.Do(req, ph) - if err != nil { - return nil, resp, err - } - - return ph, resp, nil -} - -// DeleteProjectHook removes a hook from a project. This is an idempotent -// method and can be called multiple times. Either the hook is available or not. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#delete-project-hook -func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d", PathEscape(project), hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// TriggerTestProjectHook Trigger a test hook for a specified project. -// -// In GitLab 17.0 and later, this endpoint has a special rate limit. -// In GitLab 17.0 the rate was three requests per minute for each project hook. -// In GitLab 17.1 this was changed to five requests per minute for each project -// and authenticated user. -// -// To disable this limit on self-managed GitLab and GitLab Dedicated, -// an administrator can disable the feature flag named web_hook_test_api_endpoint_rate_limit. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#trigger-a-test-project-hook -func (s *ProjectsService) TriggerTestProjectHook(pid interface{}, hook int, event ProjectHookEvent, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/test/%s", PathEscape(project), hook, string(event)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SetHookCustomHeaderOptions represents a project or group hook custom header. -// If the header isn't present, it will be created. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header -type SetHookCustomHeaderOptions struct { - Value *string `json:"value,omitempty"` -} - -// SetProjectCustomHeader creates or updates a project custom webhook header. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#set-a-custom-header -func (s *ProjectsService) SetProjectCustomHeader(pid interface{}, hook int, key string, opt *SetHookCustomHeaderOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteProjectCustomHeader deletes a project custom webhook header. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#delete-a-custom-header -func (s *ProjectsService) DeleteProjectCustomHeader(pid interface{}, hook int, key string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/hooks/%d/custom_headers/%s", PathEscape(project), hook, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ProjectForkRelation represents a project fork relationship. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#admin-fork-relation -type ProjectForkRelation struct { - ID int `json:"id"` - ForkedToProjectID int `json:"forked_to_project_id"` - ForkedFromProjectID int `json:"forked_from_project_id"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` -} - -// CreateProjectForkRelation creates a forked from/to relation between -// existing projects. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#create-a-forked-fromto-relation-between-existing-projects. -func (s *ProjectsService) CreateProjectForkRelation(pid interface{}, fork int, options ...RequestOptionFunc) (*ProjectForkRelation, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/fork/%d", PathEscape(project), fork) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - pfr := new(ProjectForkRelation) - resp, err := s.client.Do(req, pfr) - if err != nil { - return nil, resp, err - } - - return pfr, resp, nil -} - -// DeleteProjectForkRelation deletes an existing forked from relationship. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#delete-an-existing-forked-from-relationship -func (s *ProjectsService) DeleteProjectForkRelation(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/fork", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ProjectFile represents an uploaded project file. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#upload-a-file -type ProjectFile struct { - Alt string `json:"alt"` - URL string `json:"url"` - FullPath string `json:"full_path"` - Markdown string `json:"markdown"` -} - -// UploadFile uploads a file. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#upload-a-file -func (s *ProjectsService) UploadFile(pid interface{}, content io.Reader, filename string, options ...RequestOptionFunc) (*ProjectFile, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/uploads", PathEscape(project)) - - req, err := s.client.UploadRequest( - http.MethodPost, - u, - content, - filename, - UploadFile, - nil, - options, - ) - if err != nil { - return nil, nil, err - } - - pf := new(ProjectFile) - resp, err := s.client.Do(req, pf) - if err != nil { - return nil, resp, err - } - - return pf, resp, nil -} - -// UploadAvatar uploads an avatar. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#upload-a-project-avatar -func (s *ProjectsService) UploadAvatar(pid interface{}, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s", PathEscape(project)) - - req, err := s.client.UploadRequest( - http.MethodPut, - u, - avatar, - filename, - UploadAvatar, - nil, - options, - ) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ListProjectForks gets a list of project forks. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#list-forks-of-a-project -func (s *ProjectsService) ListProjectForks(pid interface{}, opt *ListProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/forks", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var forks []*Project - resp, err := s.client.Do(req, &forks) - if err != nil { - return nil, resp, err - } - - return forks, resp, nil -} - -// ProjectPushRules represents a project push rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#push-rules -type ProjectPushRules struct { - ID int `json:"id"` - ProjectID int `json:"project_id"` - CommitMessageRegex string `json:"commit_message_regex"` - CommitMessageNegativeRegex string `json:"commit_message_negative_regex"` - BranchNameRegex string `json:"branch_name_regex"` - DenyDeleteTag bool `json:"deny_delete_tag"` - CreatedAt *time.Time `json:"created_at"` - MemberCheck bool `json:"member_check"` - PreventSecrets bool `json:"prevent_secrets"` - AuthorEmailRegex string `json:"author_email_regex"` - FileNameRegex string `json:"file_name_regex"` - MaxFileSize int `json:"max_file_size"` - CommitCommitterCheck bool `json:"commit_committer_check"` - CommitCommitterNameCheck bool `json:"commit_committer_name_check"` - RejectUnsignedCommits bool `json:"reject_unsigned_commits"` - RejectNonDCOCommits bool `json:"reject_non_dco_commits"` -} - -// GetProjectPushRules gets the push rules of a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-project-push-rules -func (s *ProjectsService) GetProjectPushRules(pid interface{}, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ppr := new(ProjectPushRules) - resp, err := s.client.Do(req, ppr) - if err != nil { - return nil, resp, err - } - - return ppr, resp, nil -} - -// AddProjectPushRuleOptions represents the available AddProjectPushRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#add-project-push-rule -type AddProjectPushRuleOptions struct { - AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` - BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` - CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` - CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` - CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` - CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` - DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` - FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` - MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` - PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` - RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` - RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` -} - -// AddProjectPushRule adds a push rule to a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#add-project-push-rule -func (s *ProjectsService) AddProjectPushRule(pid interface{}, opt *AddProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - ppr := new(ProjectPushRules) - resp, err := s.client.Do(req, ppr) - if err != nil { - return nil, resp, err - } - - return ppr, resp, nil -} - -// EditProjectPushRuleOptions represents the available EditProjectPushRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#edit-project-push-rule -type EditProjectPushRuleOptions struct { - AuthorEmailRegex *string `url:"author_email_regex,omitempty" json:"author_email_regex,omitempty"` - BranchNameRegex *string `url:"branch_name_regex,omitempty" json:"branch_name_regex,omitempty"` - CommitCommitterCheck *bool `url:"commit_committer_check,omitempty" json:"commit_committer_check,omitempty"` - CommitCommitterNameCheck *bool `url:"commit_committer_name_check,omitempty" json:"commit_committer_name_check,omitempty"` - CommitMessageNegativeRegex *string `url:"commit_message_negative_regex,omitempty" json:"commit_message_negative_regex,omitempty"` - CommitMessageRegex *string `url:"commit_message_regex,omitempty" json:"commit_message_regex,omitempty"` - DenyDeleteTag *bool `url:"deny_delete_tag,omitempty" json:"deny_delete_tag,omitempty"` - FileNameRegex *string `url:"file_name_regex,omitempty" json:"file_name_regex,omitempty"` - MaxFileSize *int `url:"max_file_size,omitempty" json:"max_file_size,omitempty"` - MemberCheck *bool `url:"member_check,omitempty" json:"member_check,omitempty"` - PreventSecrets *bool `url:"prevent_secrets,omitempty" json:"prevent_secrets,omitempty"` - RejectUnsignedCommits *bool `url:"reject_unsigned_commits,omitempty" json:"reject_unsigned_commits,omitempty"` - RejectNonDCOCommits *bool `url:"reject_non_dco_commits,omitempty" json:"reject_non_dco_commits,omitempty"` -} - -// EditProjectPushRule edits a push rule for a specified project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#edit-project-push-rule -func (s *ProjectsService) EditProjectPushRule(pid interface{}, opt *EditProjectPushRuleOptions, options ...RequestOptionFunc) (*ProjectPushRules, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ppr := new(ProjectPushRules) - resp, err := s.client.Do(req, ppr) - if err != nil { - return nil, resp, err - } - - return ppr, resp, nil -} - -// DeleteProjectPushRule removes a push rule from a project. This is an -// idempotent method and can be called multiple times. Either the push rule is -// available or not. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#delete-project-push-rule -func (s *ProjectsService) DeleteProjectPushRule(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/push_rule", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ProjectApprovals represents GitLab project level merge request approvals. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#project-level-mr-approvals -type ProjectApprovals struct { - Approvers []*MergeRequestApproverUser `json:"approvers"` - ApproverGroups []*MergeRequestApproverGroup `json:"approver_groups"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` - ResetApprovalsOnPush bool `json:"reset_approvals_on_push"` - DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` - MergeRequestsAuthorApproval bool `json:"merge_requests_author_approval"` - MergeRequestsDisableCommittersApproval bool `json:"merge_requests_disable_committers_approval"` - RequirePasswordToApprove bool `json:"require_password_to_approve"` - SelectiveCodeOwnerRemovals bool `json:"selective_code_owner_removals,omitempty"` -} - -// GetApprovalConfiguration get the approval configuration for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-configuration -func (s *ProjectsService) GetApprovalConfiguration(pid interface{}, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approvals", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pa := new(ProjectApprovals) - resp, err := s.client.Do(req, pa) - if err != nil { - return nil, resp, err - } - - return pa, resp, nil -} - -// ChangeApprovalConfigurationOptions represents the available -// ApprovalConfiguration() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-configuration -type ChangeApprovalConfigurationOptions struct { - ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"` - DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` - MergeRequestsAuthorApproval *bool `url:"merge_requests_author_approval,omitempty" json:"merge_requests_author_approval,omitempty"` - MergeRequestsDisableCommittersApproval *bool `url:"merge_requests_disable_committers_approval,omitempty" json:"merge_requests_disable_committers_approval,omitempty"` - RequirePasswordToApprove *bool `url:"require_password_to_approve,omitempty" json:"require_password_to_approve,omitempty"` - ResetApprovalsOnPush *bool `url:"reset_approvals_on_push,omitempty" json:"reset_approvals_on_push,omitempty"` - SelectiveCodeOwnerRemovals *bool `url:"selective_code_owner_removals,omitempty" json:"selective_code_owner_removals,omitempty"` -} - -// ChangeApprovalConfiguration updates the approval configuration for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-configuration -func (s *ProjectsService) ChangeApprovalConfiguration(pid interface{}, opt *ChangeApprovalConfigurationOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approvals", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pa := new(ProjectApprovals) - resp, err := s.client.Do(req, pa) - if err != nil { - return nil, resp, err - } - - return pa, resp, nil -} - -// GetProjectApprovalRulesListsOptions represents the available GetProjectApprovalRules() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules -type GetProjectApprovalRulesListsOptions ListOptions - -// GetProjectApprovalRules looks up the list of project level approver rules. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-project-level-rules -func (s *ProjectsService) GetProjectApprovalRules(pid interface{}, opt *GetProjectApprovalRulesListsOptions, options ...RequestOptionFunc) ([]*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var par []*ProjectApprovalRule - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// GetProjectApprovalRule gets the project level approvers. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#get-a-single-project-level-rule -func (s *ProjectsService) GetProjectApprovalRule(pid interface{}, ruleID int, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), ruleID) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - par := new(ProjectApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// CreateProjectLevelRuleOptions represents the available CreateProjectApprovalRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-project-level-rule -type CreateProjectLevelRuleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - ReportType *string `url:"report_type,omitempty" json:"report_type,omitempty"` - RuleType *string `url:"rule_type,omitempty" json:"rule_type,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` - AppliesToAllProtectedBranches *bool `url:"applies_to_all_protected_branches,omitempty" json:"applies_to_all_protected_branches,omitempty"` -} - -// CreateProjectApprovalRule creates a new project-level approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#create-project-level-rule -func (s *ProjectsService) CreateProjectApprovalRule(pid interface{}, opt *CreateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(ProjectApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// UpdateProjectLevelRuleOptions represents the available UpdateProjectApprovalRule() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-project-level-rule -type UpdateProjectLevelRuleOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ApprovalsRequired *int `url:"approvals_required,omitempty" json:"approvals_required,omitempty"` - UserIDs *[]int `url:"user_ids,omitempty" json:"user_ids,omitempty"` - GroupIDs *[]int `url:"group_ids,omitempty" json:"group_ids,omitempty"` - ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` - AppliesToAllProtectedBranches *bool `url:"applies_to_all_protected_branches,omitempty" json:"applies_to_all_protected_branches,omitempty"` -} - -// UpdateProjectApprovalRule updates an existing approval rule with new options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#update-project-level-rule -func (s *ProjectsService) UpdateProjectApprovalRule(pid interface{}, approvalRule int, opt *UpdateProjectLevelRuleOptions, options ...RequestOptionFunc) (*ProjectApprovalRule, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), approvalRule) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - par := new(ProjectApprovalRule) - resp, err := s.client.Do(req, &par) - if err != nil { - return nil, resp, err - } - - return par, resp, nil -} - -// DeleteProjectApprovalRule deletes a project-level approval rule. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#delete-project-level-rule -func (s *ProjectsService) DeleteProjectApprovalRule(pid interface{}, approvalRule int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/approval_rules/%d", PathEscape(project), approvalRule) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ChangeAllowedApproversOptions represents the available ChangeAllowedApprovers() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers -type ChangeAllowedApproversOptions struct { - ApproverGroupIDs *[]int `url:"approver_group_ids,omitempty" json:"approver_group_ids,omitempty"` - ApproverIDs *[]int `url:"approver_ids,omitempty" json:"approver_ids,omitempty"` -} - -// ChangeAllowedApprovers updates the list of approvers and approver groups. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-allowed-approvers -func (s *ProjectsService) ChangeAllowedApprovers(pid interface{}, opt *ChangeAllowedApproversOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/approvers", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pa := new(ProjectApprovals) - resp, err := s.client.Do(req, pa) - if err != nil { - return nil, resp, err - } - - return pa, resp, nil -} - -// ProjectPullMirrorDetails represent the details of the configuration pull -// mirror and its update status. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-a-projects-pull-mirror-details -type ProjectPullMirrorDetails struct { - ID int `json:"id"` - LastError string `json:"last_error"` - LastSuccessfulUpdateAt *time.Time `json:"last_successful_update_at"` - LastUpdateAt *time.Time `json:"last_update_at"` - LastUpdateStartedAt *time.Time `json:"last_update_started_at"` - UpdateStatus string `json:"update_status"` - URL string `json:"url"` -} - -// GetProjectPullMirrorDetails returns the pull mirror details. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-a-projects-pull-mirror-details -func (s *ProjectsService) GetProjectPullMirrorDetails(pid interface{}, options ...RequestOptionFunc) (*ProjectPullMirrorDetails, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pmd := new(ProjectPullMirrorDetails) - resp, err := s.client.Do(req, pmd) - if err != nil { - return nil, resp, err - } - - return pmd, resp, nil -} - -// StartMirroringProject start the pull mirroring process for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#start-the-pull-mirroring-process-for-a-project -func (s *ProjectsService) StartMirroringProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/mirror/pull", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - resp, err := s.client.Do(req, nil) - if err != nil { - return resp, err - } - - return resp, nil -} - -// TransferProjectOptions represents the available TransferProject() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace -type TransferProjectOptions struct { - Namespace interface{} `url:"namespace,omitempty" json:"namespace,omitempty"` -} - -// TransferProject transfer a project into the specified namespace -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#transfer-a-project-to-a-new-namespace -func (s *ProjectsService) TransferProject(pid interface{}, opt *TransferProjectOptions, options ...RequestOptionFunc) (*Project, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/transfer", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(Project) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// StartHousekeepingProject start the Housekeeping task for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#start-the-housekeeping-task-for-a-project -func (s *ProjectsService) StartHousekeepingProject(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/housekeeping", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GetRepositoryStorage Get the path to repository storage. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/projects.html#get-the-path-to-repository-storage -type ProjectReposityStorage struct { - ProjectID int `json:"project_id"` - DiskPath string `json:"disk_path"` - CreatedAt *time.Time `json:"created_at"` - RepositoryStorage string `json:"repository_storage"` -} - -func (s *ProjectsService) GetRepositoryStorage(pid interface{}, options ...RequestOptionFunc) (*ProjectReposityStorage, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/storage", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - prs := new(ProjectReposityStorage) - resp, err := s.client.Do(req, prs) - if err != nil { - return nil, resp, err - } - - return prs, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/protected_branches.go b/vendor/github.com/xanzy/go-gitlab/protected_branches.go deleted file mode 100644 index d13f57a608..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/protected_branches.go +++ /dev/null @@ -1,257 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen, Michael Lihs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// ProtectedBranchesService handles communication with the protected branch -// related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html -type ProtectedBranchesService struct { - client *Client -} - -// ProtectedBranch represents a protected branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches -type ProtectedBranch struct { - ID int `json:"id"` - Name string `json:"name"` - PushAccessLevels []*BranchAccessDescription `json:"push_access_levels"` - MergeAccessLevels []*BranchAccessDescription `json:"merge_access_levels"` - UnprotectAccessLevels []*BranchAccessDescription `json:"unprotect_access_levels"` - AllowForcePush bool `json:"allow_force_push"` - CodeOwnerApprovalRequired bool `json:"code_owner_approval_required"` -} - -// BranchAccessDescription represents the access description for a protected -// branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches -type BranchAccessDescription struct { - ID int `json:"id"` - AccessLevel AccessLevelValue `json:"access_level"` - AccessLevelDescription string `json:"access_level_description"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` -} - -// ListProtectedBranchesOptions represents the available ListProtectedBranches() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches -type ListProtectedBranchesOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` -} - -// ListProtectedBranches gets a list of protected branches from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#list-protected-branches -func (s *ProtectedBranchesService) ListProtectedBranches(pid interface{}, opt *ListProtectedBranchesOptions, options ...RequestOptionFunc) ([]*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var p []*ProtectedBranch - resp, err := s.client.Do(req, &p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// GetProtectedBranch gets a single protected branch or wildcard protected branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#get-a-single-protected-branch-or-wildcard-protected-branch -func (s *ProtectedBranchesService) GetProtectedBranch(pid interface{}, branch string, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - p := new(ProtectedBranch) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// ProtectRepositoryBranchesOptions represents the available -// ProtectRepositoryBranches() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#protect-repository-branches -type ProtectRepositoryBranchesOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - PushAccessLevel *AccessLevelValue `url:"push_access_level,omitempty" json:"push_access_level,omitempty"` - MergeAccessLevel *AccessLevelValue `url:"merge_access_level,omitempty" json:"merge_access_level,omitempty"` - UnprotectAccessLevel *AccessLevelValue `url:"unprotect_access_level,omitempty" json:"unprotect_access_level,omitempty"` - AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToPush *[]*BranchPermissionOptions `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` - AllowedToMerge *[]*BranchPermissionOptions `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` - AllowedToUnprotect *[]*BranchPermissionOptions `url:"allowed_to_unprotect,omitempty" json:"allowed_to_unprotect,omitempty"` - CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` -} - -// BranchPermissionOptions represents a branch permission option. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#protect-repository-branches -type BranchPermissionOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - DeployKeyID *int `url:"deploy_key_id,omitempty" json:"deploy_key_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` -} - -// ProtectRepositoryBranches protects a single repository branch or several -// project repository branches using a wildcard protected branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#protect-repository-branches -func (s *ProtectedBranchesService) ProtectRepositoryBranches(pid interface{}, opt *ProtectRepositoryBranchesOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(ProtectedBranch) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// UnprotectRepositoryBranches unprotects the given protected branch or wildcard -// protected branch. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#unprotect-repository-branches -func (s *ProtectedBranchesService) UnprotectRepositoryBranches(pid interface{}, branch string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// UpdateProtectedBranchOptions represents the available -// UpdateProtectedBranch() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch -type UpdateProtectedBranchOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` - AllowedToPush *[]*BranchPermissionOptions `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` - AllowedToMerge *[]*BranchPermissionOptions `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` - AllowedToUnprotect *[]*BranchPermissionOptions `url:"allowed_to_unprotect,omitempty" json:"allowed_to_unprotect,omitempty"` -} - -// UpdateProtectedBranch updates a protected branch. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch -func (s *ProtectedBranchesService) UpdateProtectedBranch(pid interface{}, branch string, opt *UpdateProtectedBranchOptions, options ...RequestOptionFunc) (*ProtectedBranch, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_branches/%s", PathEscape(project), url.PathEscape(branch)) - - req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) - if err != nil { - return nil, nil, err - } - - p := new(ProtectedBranch) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// RequireCodeOwnerApprovalsOptions represents the available -// RequireCodeOwnerApprovals() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch -type RequireCodeOwnerApprovalsOptions struct { - CodeOwnerApprovalRequired *bool `url:"code_owner_approval_required,omitempty" json:"code_owner_approval_required,omitempty"` -} - -// RequireCodeOwnerApprovals updates the code owner approval option. -// -// Deprecated: Use UpdateProtectedBranch() instead. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/protected_branches.html#update-a-protected-branch -func (s *ProtectedBranchesService) RequireCodeOwnerApprovals(pid interface{}, branch string, opt *RequireCodeOwnerApprovalsOptions, options ...RequestOptionFunc) (*Response, error) { - updateOptions := &UpdateProtectedBranchOptions{ - CodeOwnerApprovalRequired: opt.CodeOwnerApprovalRequired, - } - _, req, err := s.UpdateProtectedBranch(pid, branch, updateOptions, options...) - return req, err -} diff --git a/vendor/github.com/xanzy/go-gitlab/protected_environments.go b/vendor/github.com/xanzy/go-gitlab/protected_environments.go deleted file mode 100644 index 50ee31173a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/protected_environments.go +++ /dev/null @@ -1,282 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ProtectedEnvironmentsService handles communication with the protected -// environment methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html -type ProtectedEnvironmentsService struct { - client *Client -} - -// ProtectedEnvironment represents a protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html -type ProtectedEnvironment struct { - Name string `json:"name"` - DeployAccessLevels []*EnvironmentAccessDescription `json:"deploy_access_levels"` - RequiredApprovalCount int `json:"required_approval_count"` - ApprovalRules []*EnvironmentApprovalRule `json:"approval_rules"` -} - -// EnvironmentAccessDescription represents the access decription for a protected -// environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html -type EnvironmentAccessDescription struct { - ID int `json:"id"` - AccessLevel AccessLevelValue `json:"access_level"` - AccessLevelDescription string `json:"access_level_description"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - GroupInheritanceType int `json:"group_inheritance_type"` -} - -// EnvironmentApprovalRule represents the approval rules for a protected -// environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment -type EnvironmentApprovalRule struct { - ID int `json:"id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - AccessLevel AccessLevelValue `json:"access_level"` - AccessLevelDescription string `json:"access_level_description"` - RequiredApprovalCount int `json:"required_approvals"` - GroupInheritanceType int `json:"group_inheritance_type"` -} - -// ListProtectedEnvironmentsOptions represents the available -// ListProtectedEnvironments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#list-protected-environments -type ListProtectedEnvironmentsOptions ListOptions - -// ListProtectedEnvironments returns a list of protected environments from a -// project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#list-protected-environments -func (s *ProtectedEnvironmentsService) ListProtectedEnvironments(pid interface{}, opt *ListProtectedEnvironmentsOptions, options ...RequestOptionFunc) ([]*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pes []*ProtectedEnvironment - resp, err := s.client.Do(req, &pes) - if err != nil { - return nil, resp, err - } - - return pes, resp, nil -} - -// GetProtectedEnvironment returns a single protected environment or wildcard -// protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#get-a-single-protected-environment -func (s *ProtectedEnvironmentsService) GetProtectedEnvironment(pid interface{}, environment string, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pe := new(ProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil -} - -// ProtectRepositoryEnvironmentsOptions represents the available -// ProtectRepositoryEnvironments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment -type ProtectRepositoryEnvironmentsOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - DeployAccessLevels *[]*EnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` - ApprovalRules *[]*EnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` -} - -// EnvironmentAccessOptions represents the options for an access decription for -// a protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment -type EnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` -} - -// EnvironmentApprovalRuleOptions represents the approval rules for a protected -// environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment -type EnvironmentApprovalRuleOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` -} - -// ProtectRepositoryEnvironments protects a single repository environment or -// several project repository environments using wildcard protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#protect-a-single-environment -func (s *ProtectedEnvironmentsService) ProtectRepositoryEnvironments(pid interface{}, opt *ProtectRepositoryEnvironmentsOptions, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(ProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil -} - -// UpdateProtectedEnvironmentsOptions represents the available -// UpdateProtectedEnvironments() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment -type UpdateProtectedEnvironmentsOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - DeployAccessLevels *[]*UpdateEnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` - RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` - ApprovalRules *[]*UpdateEnvironmentApprovalRuleOptions `url:"approval_rules,omitempty" json:"approval_rules,omitempty"` -} - -// UpdateEnvironmentAccessOptions represents the options for updates to an -// access decription for a protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment -type UpdateEnvironmentAccessOptions struct { - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` -} - -// UpdateEnvironmentApprovalRuleOptions represents the updates to the approval -// rules for a protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment -type UpdateEnvironmentApprovalRuleOptions struct { - ID *int `url:"id,omitempty" json:"id,omitempty"` - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` - AccessLevelDescription *string `url:"access_level_description,omitempty" json:"access_level_description,omitempty"` - RequiredApprovalCount *int `url:"required_approvals,omitempty" json:"required_approvals,omitempty"` - GroupInheritanceType *int `url:"group_inheritance_type,omitempty" json:"group_inheritance_type,omitempty"` - Destroy *bool `url:"_destroy,omitempty" json:"_destroy,omitempty"` -} - -// UpdateProtectedEnvironments updates a single repository environment or -// several project repository environments using wildcard protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#update-a-protected-environment -func (s *ProtectedEnvironmentsService) UpdateProtectedEnvironments(pid interface{}, environment string, opt *UpdateProtectedEnvironmentsOptions, options ...RequestOptionFunc) (*ProtectedEnvironment, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - pe := new(ProtectedEnvironment) - resp, err := s.client.Do(req, pe) - if err != nil { - return nil, resp, err - } - - return pe, resp, nil -} - -// UnprotectEnvironment unprotects the given protected environment or wildcard -// protected environment. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_environments.html#unprotect-a-single-environment -func (s *ProtectedEnvironmentsService) UnprotectEnvironment(pid interface{}, environment string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/protected_environments/%s", PathEscape(project), PathEscape(environment)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/protected_tags.go b/vendor/github.com/xanzy/go-gitlab/protected_tags.go deleted file mode 100644 index 76e8ff4d2a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/protected_tags.go +++ /dev/null @@ -1,176 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ProtectedTagsService handles communication with the protected tag methods -// of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html -type ProtectedTagsService struct { - client *Client -} - -// ProtectedTag represents a protected tag. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html -type ProtectedTag struct { - Name string `json:"name"` - CreateAccessLevels []*TagAccessDescription `json:"create_access_levels"` -} - -// TagAccessDescription reperesents the access decription for a protected tag. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html -type TagAccessDescription struct { - ID int `json:"id"` - UserID int `json:"user_id"` - GroupID int `json:"group_id"` - AccessLevel AccessLevelValue `json:"access_level"` - AccessLevelDescription string `json:"access_level_description"` -} - -// ListProtectedTagsOptions represents the available ListProtectedTags() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#list-protected-tags -type ListProtectedTagsOptions ListOptions - -// ListProtectedTags returns a list of protected tags from a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#list-protected-tags -func (s *ProtectedTagsService) ListProtectedTags(pid interface{}, opt *ListProtectedTagsOptions, options ...RequestOptionFunc) ([]*ProtectedTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var pts []*ProtectedTag - resp, err := s.client.Do(req, &pts) - if err != nil { - return nil, resp, err - } - - return pts, resp, nil -} - -// GetProtectedTag returns a single protected tag or wildcard protected tag. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#get-a-single-protected-tag-or-wildcard-protected-tag -func (s *ProtectedTagsService) GetProtectedTag(pid interface{}, tag string, options ...RequestOptionFunc) (*ProtectedTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags/%s", PathEscape(project), PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - pt := new(ProtectedTag) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// ProtectRepositoryTagsOptions represents the available ProtectRepositoryTags() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#protect-repository-tags -type ProtectRepositoryTagsOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - CreateAccessLevel *AccessLevelValue `url:"create_access_level,omitempty" json:"create_access_level,omitempty"` - AllowedToCreate *[]*TagsPermissionOptions `url:"allowed_to_create,omitempty" json:"allowed_to_create,omitempty"` -} - -// TagsPermissionOptions represents a protected tag permission option. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#protect-repository-tags -type TagsPermissionOptions struct { - UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` -} - -// ProtectRepositoryTags protects a single repository tag or several project -// repository tags using a wildcard protected tag. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#protect-repository-tags -func (s *ProtectedTagsService) ProtectRepositoryTags(pid interface{}, opt *ProtectRepositoryTagsOptions, options ...RequestOptionFunc) (*ProtectedTag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - pt := new(ProtectedTag) - resp, err := s.client.Do(req, pt) - if err != nil { - return nil, resp, err - } - - return pt, resp, nil -} - -// UnprotectRepositoryTags unprotects the given protected tag or wildcard -// protected tag. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/protected_tags.html#unprotect-repository-tags -func (s *ProtectedTagsService) UnprotectRepositoryTags(pid interface{}, tag string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/protected_tags/%s", PathEscape(project), PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/releaselinks.go b/vendor/github.com/xanzy/go-gitlab/releaselinks.go deleted file mode 100644 index 8cde15f512..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/releaselinks.go +++ /dev/null @@ -1,201 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ReleaseLinksService handles communication with the release link methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html -type ReleaseLinksService struct { - client *Client -} - -// ReleaseLink represents a release link. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html -type ReleaseLink struct { - ID int `json:"id"` - Name string `json:"name"` - URL string `json:"url"` - DirectAssetURL string `json:"direct_asset_url"` - External bool `json:"external"` - LinkType LinkTypeValue `json:"link_type"` -} - -// ListReleaseLinksOptions represents ListReleaseLinks() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#list-links-of-a-release -type ListReleaseLinksOptions ListOptions - -// ListReleaseLinks gets assets as links from a Release. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#list-links-of-a-release -func (s *ReleaseLinksService) ListReleaseLinks(pid interface{}, tagName string, opt *ListReleaseLinksOptions, options ...RequestOptionFunc) ([]*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rls []*ReleaseLink - resp, err := s.client.Do(req, &rls) - if err != nil { - return nil, resp, err - } - - return rls, resp, nil -} - -// GetReleaseLink returns a link from release assets. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#get-a-release-link -func (s *ReleaseLinksService) GetReleaseLink(pid interface{}, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", - PathEscape(project), - PathEscape(tagName), - link) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil -} - -// CreateReleaseLinkOptions represents CreateReleaseLink() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link -type CreateReleaseLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` -} - -// CreateReleaseLink creates a link. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link -func (s *ReleaseLinksService) CreateReleaseLink(pid interface{}, tagName string, opt *CreateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil -} - -// UpdateReleaseLinkOptions represents UpdateReleaseLink() options. -// -// You have to specify at least one of Name of URL. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#update-a-release-link -type UpdateReleaseLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` -} - -// UpdateReleaseLink updates an asset link. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#update-a-release-link -func (s *ReleaseLinksService) UpdateReleaseLink(pid interface{}, tagName string, link int, opt *UpdateReleaseLinkOptions, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", - PathEscape(project), - PathEscape(tagName), - link) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil -} - -// DeleteReleaseLink deletes a link from release. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/links.html#delete-a-release-link -func (s *ReleaseLinksService) DeleteReleaseLink(pid interface{}, tagName string, link int, options ...RequestOptionFunc) (*ReleaseLink, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s/assets/links/%d", - PathEscape(project), - PathEscape(tagName), - link, - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - rl := new(ReleaseLink) - resp, err := s.client.Do(req, rl) - if err != nil { - return nil, resp, err - } - - return rl, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/releases.go b/vendor/github.com/xanzy/go-gitlab/releases.go deleted file mode 100644 index 97cbff7bb9..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/releases.go +++ /dev/null @@ -1,281 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ReleasesService handles communication with the releases methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/releases/index.html -type ReleasesService struct { - client *Client -} - -// Release represents a project release. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#list-releases -type Release struct { - TagName string `json:"tag_name"` - Name string `json:"name"` - Description string `json:"description"` - DescriptionHTML string `json:"description_html"` - CreatedAt *time.Time `json:"created_at"` - ReleasedAt *time.Time `json:"released_at"` - Author struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"author"` - Commit Commit `json:"commit"` - UpcomingRelease bool `json:"upcoming_release"` - CommitPath string `json:"commit_path"` - TagPath string `json:"tag_path"` - Assets struct { - Count int `json:"count"` - Sources []struct { - Format string `json:"format"` - URL string `json:"url"` - } `json:"sources"` - Links []*ReleaseLink `json:"links"` - } `json:"assets"` - Links struct { - ClosedIssueURL string `json:"closed_issues_url"` - ClosedMergeRequest string `json:"closed_merge_requests_url"` - EditURL string `json:"edit_url"` - MergedMergeRequest string `json:"merged_merge_requests_url"` - OpenedIssues string `json:"opened_issues_url"` - OpenedMergeRequest string `json:"opened_merge_requests_url"` - Self string `json:"self"` - } `json:"_links"` -} - -// ListReleasesOptions represents ListReleases() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#list-releases -type ListReleasesOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - IncludeHTMLDescription *bool `url:"include_html_description,omitempty" json:"include_html_description,omitempty"` -} - -// ListReleases gets a pagenated of releases accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#list-releases -func (s *ReleasesService) ListReleases(pid interface{}, opt *ListReleasesOptions, options ...RequestOptionFunc) ([]*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Release - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// GetRelease returns a single release, identified by a tag name. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#get-a-release-by-a-tag-name -func (s *ReleasesService) GetRelease(pid interface{}, tagName string, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// GetLatestRelease returns the latest release for the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/#get-the-latest-release -func (s *ReleasesService) GetLatestRelease(pid interface{}, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/permalink/latest", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, err -} - -// CreateReleaseOptions represents CreateRelease() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release -type CreateReleaseOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"` - TagMessage *string `url:"tag_message,omitempty" json:"tag_message,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - Milestones *[]string `url:"milestones,omitempty" json:"milestones,omitempty"` - Assets *ReleaseAssetsOptions `url:"assets,omitempty" json:"assets,omitempty"` - ReleasedAt *time.Time `url:"released_at,omitempty" json:"released_at,omitempty"` -} - -// ReleaseAssetsOptions represents release assets in CreateRelease() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release -type ReleaseAssetsOptions struct { - Links []*ReleaseAssetLinkOptions `url:"links,omitempty" json:"links,omitempty"` -} - -// ReleaseAssetLinkOptions represents release asset link in CreateRelease() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release -type ReleaseAssetLinkOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - URL *string `url:"url,omitempty" json:"url,omitempty"` - FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"` - DirectAssetPath *string `url:"direct_asset_path,omitempty" json:"direct_asset_path,omitempty"` - LinkType *LinkTypeValue `url:"link_type,omitempty" json:"link_type,omitempty"` -} - -// CreateRelease creates a release. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#create-a-release -func (s *ReleasesService) CreateRelease(pid interface{}, opts *CreateReleaseOptions, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdateReleaseOptions represents UpdateRelease() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#update-a-release -type UpdateReleaseOptions struct { - Name *string `url:"name" json:"name"` - Description *string `url:"description" json:"description"` - Milestones *[]string `url:"milestones,omitempty" json:"milestones,omitempty"` - ReleasedAt *time.Time `url:"released_at,omitempty" json:"released_at,omitempty"` -} - -// UpdateRelease updates a release. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#update-a-release -func (s *ReleasesService) UpdateRelease(pid interface{}, tagName string, opts *UpdateReleaseOptions, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodPut, u, opts, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DeleteRelease deletes a release. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/index.html#delete-a-release -func (s *ReleasesService) DeleteRelease(pid interface{}, tagName string, options ...RequestOptionFunc) (*Release, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/releases/%s", PathEscape(project), PathEscape(tagName)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(Release) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/repositories.go b/vendor/github.com/xanzy/go-gitlab/repositories.go deleted file mode 100644 index dde8761749..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/repositories.go +++ /dev/null @@ -1,421 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" -) - -// RepositoriesService handles communication with the repositories related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html -type RepositoriesService struct { - client *Client -} - -// TreeNode represents a GitLab repository file or directory. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html -type TreeNode struct { - ID string `json:"id"` - Name string `json:"name"` - Type string `json:"type"` - Path string `json:"path"` - Mode string `json:"mode"` -} - -func (t TreeNode) String() string { - return Stringify(t) -} - -// ListTreeOptions represents the available ListTree() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#list-repository-tree -type ListTreeOptions struct { - ListOptions - Path *string `url:"path,omitempty" json:"path,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - Recursive *bool `url:"recursive,omitempty" json:"recursive,omitempty"` -} - -// ListTree gets a list of repository files and directories in a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#list-repository-tree -func (s *RepositoriesService) ListTree(pid interface{}, opt *ListTreeOptions, options ...RequestOptionFunc) ([]*TreeNode, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tree", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var t []*TreeNode - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// Blob gets information about blob in repository like size and content. Note -// that blob content is Base64 encoded. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#get-a-blob-from-repository -func (s *RepositoriesService) Blob(pid interface{}, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/blobs/%s", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} - -// RawBlobContent gets the raw file contents for a blob by blob SHA. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#raw-blob-content -func (s *RepositoriesService) RawBlobContent(pid interface{}, sha string, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/blobs/%s/raw", PathEscape(project), url.PathEscape(sha)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} - -// ArchiveOptions represents the available Archive() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#get-file-archive -type ArchiveOptions struct { - Format *string `url:"-" json:"-"` - Path *string `url:"path,omitempty" json:"path,omitempty"` - SHA *string `url:"sha,omitempty" json:"sha,omitempty"` -} - -// Archive gets an archive of the repository. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#get-file-archive -func (s *RepositoriesService) Archive(pid interface{}, opt *ArchiveOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/archive", PathEscape(project)) - - // Set an optional format for the archive. - if opt != nil && opt.Format != nil { - u = fmt.Sprintf("%s.%s", u, *opt.Format) - } - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} - -// StreamArchive streams an archive of the repository to the provided -// io.Writer. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#get-file-archive -func (s *RepositoriesService) StreamArchive(pid interface{}, w io.Writer, opt *ArchiveOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/archive", PathEscape(project)) - - // Set an optional format for the archive. - if opt != nil && opt.Format != nil { - u = fmt.Sprintf("%s.%s", u, *opt.Format) - } - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, w) -} - -// Compare represents the result of a comparison of branches, tags or commits. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits -type Compare struct { - Commit *Commit `json:"commit"` - Commits []*Commit `json:"commits"` - Diffs []*Diff `json:"diffs"` - CompareTimeout bool `json:"compare_timeout"` - CompareSameRef bool `json:"compare_same_ref"` -} - -func (c Compare) String() string { - return Stringify(c) -} - -// CompareOptions represents the available Compare() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits -type CompareOptions struct { - From *string `url:"from,omitempty" json:"from,omitempty"` - To *string `url:"to,omitempty" json:"to,omitempty"` - Straight *bool `url:"straight,omitempty" json:"straight,omitempty"` - Unidiff *bool `url:"unidiff,omitempty" json:"unidiff,omitempty"` -} - -// Compare compares branches, tags or commits. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#compare-branches-tags-or-commits -func (s *RepositoriesService) Compare(pid interface{}, opt *CompareOptions, options ...RequestOptionFunc) (*Compare, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/compare", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Compare) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// Contributor represents a GitLap contributor. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html#contributors -type Contributor struct { - Name string `json:"name"` - Email string `json:"email"` - Commits int `json:"commits"` - Additions int `json:"additions"` - Deletions int `json:"deletions"` -} - -func (c Contributor) String() string { - return Stringify(c) -} - -// ListContributorsOptions represents the available ListContributors() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html#contributors -type ListContributorsOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// Contributors gets the repository contributors list. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repositories.html#contributors -func (s *RepositoriesService) Contributors(pid interface{}, opt *ListContributorsOptions, options ...RequestOptionFunc) ([]*Contributor, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/contributors", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var c []*Contributor - resp, err := s.client.Do(req, &c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// MergeBaseOptions represents the available MergeBase() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#merge-base -type MergeBaseOptions struct { - Ref *[]string `url:"refs[],omitempty" json:"refs,omitempty"` -} - -// MergeBase gets the common ancestor for 2 refs (commit SHAs, branch -// names or tags). -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#merge-base -func (s *RepositoriesService) MergeBase(pid interface{}, opt *MergeBaseOptions, options ...RequestOptionFunc) (*Commit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/merge_base", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - c := new(Commit) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} - -// AddChangelogOptions represents the available AddChangelog() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#add-changelog-data-to-a-changelog-file -type AddChangelogOptions struct { - Version *string `url:"version,omitempty" json:"version,omitempty"` - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - ConfigFile *string `url:"config_file,omitempty" json:"config_file,omitempty"` - Date *ISOTime `url:"date,omitempty" json:"date,omitempty"` - File *string `url:"file,omitempty" json:"file,omitempty"` - From *string `url:"from,omitempty" json:"from,omitempty"` - Message *string `url:"message,omitempty" json:"message,omitempty"` - To *string `url:"to,omitempty" json:"to,omitempty"` - Trailer *string `url:"trailer,omitempty" json:"trailer,omitempty"` -} - -// AddChangelog generates changelog data based on commits in a repository. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#add-changelog-data-to-a-changelog-file -func (s *RepositoriesService) AddChangelog(pid interface{}, opt *AddChangelogOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/changelog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ChangelogData represents the generated changelog data. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#generate-changelog-data -type ChangelogData struct { - Notes string `json:"notes"` -} - -func (c ChangelogData) String() string { - return Stringify(c) -} - -// GenerateChangelogDataOptions represents the available GenerateChangelogData() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#generate-changelog-data -type GenerateChangelogDataOptions struct { - Version *string `url:"version,omitempty" json:"version,omitempty"` - ConfigFile *string `url:"config_file,omitempty" json:"config_file,omitempty"` - Date *ISOTime `url:"date,omitempty" json:"date,omitempty"` - From *string `url:"from,omitempty" json:"from,omitempty"` - To *string `url:"to,omitempty" json:"to,omitempty"` - Trailer *string `url:"trailer,omitempty" json:"trailer,omitempty"` -} - -// GenerateChangelogData generates changelog data based on commits in a -// repository, without committing them to a changelog file. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/repositories.html#generate-changelog-data -func (s *RepositoriesService) GenerateChangelogData(pid interface{}, opt GenerateChangelogDataOptions, options ...RequestOptionFunc) (*ChangelogData, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/changelog", project) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - cd := new(ChangelogData) - resp, err := s.client.Do(req, cd) - if err != nil { - return nil, resp, err - } - - return cd, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/repository_files.go b/vendor/github.com/xanzy/go-gitlab/repository_files.go deleted file mode 100644 index 7ffaa93b56..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/repository_files.go +++ /dev/null @@ -1,385 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "net/http" - "strconv" - "time" -) - -// RepositoryFilesService handles communication with the repository files -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html -type RepositoryFilesService struct { - client *Client -} - -// File represents a GitLab repository file. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html -type File struct { - FileName string `json:"file_name"` - FilePath string `json:"file_path"` - Size int `json:"size"` - Encoding string `json:"encoding"` - Content string `json:"content"` - ExecuteFilemode bool `json:"execute_filemode"` - Ref string `json:"ref"` - BlobID string `json:"blob_id"` - CommitID string `json:"commit_id"` - SHA256 string `json:"content_sha256"` - LastCommitID string `json:"last_commit_id"` -} - -func (r File) String() string { - return Stringify(r) -} - -// GetFileOptions represents the available GetFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository -type GetFileOptions struct { - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -// GetFile allows you to receive information about a file in repository like -// name, size, content. Note that file content is Base64 encoded. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository -func (s *RepositoryFilesService) GetFile(pid interface{}, fileName string, opt *GetFileOptions, options ...RequestOptionFunc) (*File, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := new(File) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil -} - -// GetFileMetaDataOptions represents the available GetFileMetaData() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository -type GetFileMetaDataOptions struct { - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -// GetFileMetaData allows you to receive meta information about a file in -// repository like name, size. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository -func (s *RepositoryFilesService) GetFileMetaData(pid interface{}, fileName string, opt *GetFileMetaDataOptions, options ...RequestOptionFunc) (*File, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodHead, u, opt, options) - if err != nil { - return nil, nil, err - } - - resp, err := s.client.Do(req, nil) - if err != nil { - return nil, resp, err - } - - f := &File{ - BlobID: resp.Header.Get("X-Gitlab-Blob-Id"), - CommitID: resp.Header.Get("X-Gitlab-Commit-Id"), - Encoding: resp.Header.Get("X-Gitlab-Encoding"), - FileName: resp.Header.Get("X-Gitlab-File-Name"), - FilePath: resp.Header.Get("X-Gitlab-File-Path"), - ExecuteFilemode: resp.Header.Get("X-Gitlab-Execute-Filemode") == "true", - Ref: resp.Header.Get("X-Gitlab-Ref"), - SHA256: resp.Header.Get("X-Gitlab-Content-Sha256"), - LastCommitID: resp.Header.Get("X-Gitlab-Last-Commit-Id"), - } - - if sizeString := resp.Header.Get("X-Gitlab-Size"); sizeString != "" { - f.Size, err = strconv.Atoi(sizeString) - if err != nil { - return nil, resp, err - } - } - - return f, resp, nil -} - -// FileBlameRange represents one item of blame information. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html -type FileBlameRange struct { - Commit struct { - ID string `json:"id"` - ParentIDs []string `json:"parent_ids"` - Message string `json:"message"` - AuthoredDate *time.Time `json:"authored_date"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - CommittedDate *time.Time `json:"committed_date"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - } `json:"commit"` - Lines []string `json:"lines"` -} - -func (b FileBlameRange) String() string { - return Stringify(b) -} - -// GetFileBlameOptions represents the available GetFileBlame() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-file-blame-from-repository -type GetFileBlameOptions struct { - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - RangeStart *int `url:"range[start],omitempty" json:"range[start],omitempty"` - RangeEnd *int `url:"range[end],omitempty" json:"range[end],omitempty"` -} - -// GetFileBlame allows you to receive blame information. Each blame range -// contains lines and corresponding commit info. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-file-blame-from-repository -func (s *RepositoryFilesService) GetFileBlame(pid interface{}, file string, opt *GetFileBlameOptions, options ...RequestOptionFunc) ([]*FileBlameRange, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s/blame", - PathEscape(project), - PathEscape(file), - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var br []*FileBlameRange - resp, err := s.client.Do(req, &br) - if err != nil { - return nil, resp, err - } - - return br, resp, nil -} - -// GetRawFileOptions represents the available GetRawFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository -type GetRawFileOptions struct { - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - LFS *bool `url:"lfs,omitempty" json:"lfs,omitempty"` -} - -// GetRawFile allows you to receive the raw file in repository. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#get-raw-file-from-repository -func (s *RepositoryFilesService) GetRawFile(pid interface{}, fileName string, opt *GetRawFileOptions, options ...RequestOptionFunc) ([]byte, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s/raw", - PathEscape(project), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var f bytes.Buffer - resp, err := s.client.Do(req, &f) - if err != nil { - return nil, resp, err - } - - return f.Bytes(), resp, err -} - -// FileInfo represents file details of a GitLab repository file. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repository_files.html -type FileInfo struct { - FilePath string `json:"file_path"` - Branch string `json:"branch"` -} - -func (r FileInfo) String() string { - return Stringify(r) -} - -// CreateFileOptions represents the available CreateFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#create-new-file-in-repository -type CreateFileOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` - Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"` - AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` - AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` - ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` -} - -// CreateFile creates a new file in a repository. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#create-new-file-in-repository -func (s *RepositoryFilesService) CreateFile(pid interface{}, fileName string, opt *CreateFileOptions, options ...RequestOptionFunc) (*FileInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := new(FileInfo) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil -} - -// UpdateFileOptions represents the available UpdateFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#update-existing-file-in-repository -type UpdateFileOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` - Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"` - AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` - AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` - LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"` - ExecuteFilemode *bool `url:"execute_filemode,omitempty" json:"execute_filemode,omitempty"` -} - -// UpdateFile updates an existing file in a repository -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#update-existing-file-in-repository -func (s *RepositoryFilesService) UpdateFile(pid interface{}, fileName string, opt *UpdateFileOptions, options ...RequestOptionFunc) (*FileInfo, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - f := new(FileInfo) - resp, err := s.client.Do(req, f) - if err != nil { - return nil, resp, err - } - - return f, resp, nil -} - -// DeleteFileOptions represents the available DeleteFile() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#delete-existing-file-in-repository -type DeleteFileOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - StartBranch *string `url:"start_branch,omitempty" json:"start_branch,omitempty"` - AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"` - AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"` - CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` - LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"` -} - -// DeleteFile deletes an existing file in a repository -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_files.html#delete-existing-file-in-repository -func (s *RepositoryFilesService) DeleteFile(pid interface{}, fileName string, opt *DeleteFileOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/files/%s", - PathEscape(project), - PathEscape(fileName), - ) - - req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/repository_submodules.go b/vendor/github.com/xanzy/go-gitlab/repository_submodules.go deleted file mode 100644 index 7022a45521..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/repository_submodules.go +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// RepositorySubmodulesService handles communication with the repository -// submodules related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repository_submodules.html -type RepositorySubmodulesService struct { - client *Client -} - -// SubmoduleCommit represents a GitLab submodule commit. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/repository_submodules.html -type SubmoduleCommit struct { - ID string `json:"id"` - ShortID string `json:"short_id"` - Title string `json:"title"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - CreatedAt *time.Time `json:"created_at"` - Message string `json:"message"` - ParentIDs []string `json:"parent_ids"` - CommittedDate *time.Time `json:"committed_date"` - AuthoredDate *time.Time `json:"authored_date"` - Status *BuildStateValue `json:"status"` -} - -func (r SubmoduleCommit) String() string { - return Stringify(r) -} - -// UpdateSubmoduleOptions represents the available UpdateSubmodule() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_submodules.html#update-existing-submodule-reference-in-repository -type UpdateSubmoduleOptions struct { - Branch *string `url:"branch,omitempty" json:"branch,omitempty"` - CommitSHA *string `url:"commit_sha,omitempty" json:"commit_sha,omitempty"` - CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"` -} - -// UpdateSubmodule updates an existing submodule reference. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/repository_submodules.html#update-existing-submodule-reference-in-repository -func (s *RepositorySubmodulesService) UpdateSubmodule(pid interface{}, submodule string, opt *UpdateSubmoduleOptions, options ...RequestOptionFunc) (*SubmoduleCommit, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf( - "projects/%s/repository/submodules/%s", - PathEscape(project), - PathEscape(submodule), - ) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - sc := new(SubmoduleCommit) - resp, err := s.client.Do(req, sc) - if err != nil { - return nil, resp, err - } - - return sc, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/request_options.go b/vendor/github.com/xanzy/go-gitlab/request_options.go deleted file mode 100644 index d158047f69..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/request_options.go +++ /dev/null @@ -1,102 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "context" - "net/url" - - retryablehttp "github.com/hashicorp/go-retryablehttp" -) - -// RequestOptionFunc can be passed to all API requests to customize the API request. -type RequestOptionFunc func(*retryablehttp.Request) error - -// WithContext runs the request with the provided context -func WithContext(ctx context.Context) RequestOptionFunc { - return func(req *retryablehttp.Request) error { - *req = *req.WithContext(ctx) - return nil - } -} - -// WithHeader takes a header name and value and appends it to the request headers. -func WithHeader(name, value string) RequestOptionFunc { - return func(req *retryablehttp.Request) error { - req.Header.Set(name, value) - return nil - } -} - -// WithHeaders takes a map of header name/value pairs and appends them to the -// request headers. -func WithHeaders(headers map[string]string) RequestOptionFunc { - return func(req *retryablehttp.Request) error { - for k, v := range headers { - req.Header.Set(k, v) - } - return nil - } -} - -// WithKeysetPaginationParameters takes a "next" link from the Link header of a -// response to a keyset-based paginated request and modifies the values of each -// query parameter in the request with its corresponding response parameter. -func WithKeysetPaginationParameters(nextLink string) RequestOptionFunc { - return func(req *retryablehttp.Request) error { - nextUrl, err := url.Parse(nextLink) - if err != nil { - return err - } - q := req.URL.Query() - for k, values := range nextUrl.Query() { - q.Del(k) - for _, v := range values { - q.Add(k, v) - } - } - req.URL.RawQuery = q.Encode() - return nil - } -} - -// WithSudo takes either a username or user ID and sets the SUDO request header. -func WithSudo(uid interface{}) RequestOptionFunc { - return func(req *retryablehttp.Request) error { - user, err := parseID(uid) - if err != nil { - return err - } - req.Header.Set("SUDO", user) - return nil - } -} - -// WithToken takes a token which is then used when making this one request. -func WithToken(authType AuthType, token string) RequestOptionFunc { - return func(req *retryablehttp.Request) error { - switch authType { - case JobToken: - req.Header.Set("JOB-TOKEN", token) - case OAuthToken: - req.Header.Set("Authorization", "Bearer "+token) - case PrivateToken: - req.Header.Set("PRIVATE-TOKEN", token) - } - return nil - } -} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_group.go b/vendor/github.com/xanzy/go-gitlab/resource_group.go deleted file mode 100644 index b11cd8be7a..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/resource_group.go +++ /dev/null @@ -1,165 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ResourceGroupService handles communication with the resource -// group related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html -type ResourceGroupService struct { - client *Client -} - -// ResourceGrouop represents a GitLab Project Resource Group. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html -type ResourceGroup struct { - ID int `json:"id"` - Key string `json:"key"` - ProcessMode string `json:"process_mode"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` -} - -// Gets a string representation of a ResourceGroup -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html -func (rg ResourceGroup) String() string { - return Stringify(rg) -} - -// GetAllResourceGroupsForAProject allows you to get all resource -// groups associated with a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html#get-all-resource-groups-for-a-project -func (s *ResourceGroupService) GetAllResourceGroupsForAProject(pid interface{}, options ...RequestOptionFunc) ([]*ResourceGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var rgs []*ResourceGroup - resp, err := s.client.Do(req, &rgs) - if err != nil { - return nil, resp, err - } - - return rgs, resp, nil -} - -// GetASpecificResourceGroup allows you to get a specific -// resource group for a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html#get-a-specific-resource-group -func (s *ResourceGroupService) GetASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - rg := new(ResourceGroup) - resp, err := s.client.Do(req, rg) - if err != nil { - return nil, resp, err - } - - return rg, resp, nil -} - -// ListUpcomingJobsForASpecificResourceGroup allows you to get all -// upcoming jobs for a specific resource group for a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html#list-upcoming-jobs-for-a-specific-resource-group -func (s *ResourceGroupService) ListUpcomingJobsForASpecificResourceGroup(pid interface{}, key string, options ...RequestOptionFunc) ([]*Job, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups/%s/upcoming_jobs", PathEscape(project), key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var js []*Job - resp, err := s.client.Do(req, &js) - if err != nil { - return nil, resp, err - } - - return js, resp, nil -} - -// EditAnExistingResourceGroupOptions represents the available -// EditAnExistingResourceGroup options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group -type EditAnExistingResourceGroupOptions struct { - ProcessMode *ResourceGroupProcessMode `url:"process_mode,omitempty" json:"process_mode,omitempty"` -} - -// EditAnExistingResourceGroup allows you to edit a specific -// resource group for a given project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_groups.html#edit-an-existing-resource-group -func (s *ResourceGroupService) EditAnExistingResourceGroup(pid interface{}, key string, opts *EditAnExistingResourceGroupOptions, options ...RequestOptionFunc) (*ResourceGroup, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/resource_groups/%s", PathEscape(project), key) - - req, err := s.client.NewRequest(http.MethodPut, u, opts, options) - if err != nil { - return nil, nil, err - } - - rg := new(ResourceGroup) - resp, err := s.client.Do(req, rg) - if err != nil { - return nil, resp, err - } - - return rg, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go b/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go deleted file mode 100644 index 142cb9e6ee..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/resource_iteration_events.go +++ /dev/null @@ -1,122 +0,0 @@ -// -// Copyright 2023, Hakki Ceylan, Yavuz Turk -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ResourceIterationEventsService handles communication with the event related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_iteration_events.html -type ResourceIterationEventsService struct { - client *Client -} - -// IterationEvent represents a resource iteration event. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_iteration_events.html -type IterationEvent struct { - ID int `json:"id"` - User *BasicUser `json:"user"` - CreatedAt *time.Time `json:"created_at"` - ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` - Iteration *Iteration `json:"iteration"` - Action string `json:"action"` -} - -// Iteration represents a project issue iteration. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_iteration_events.html -type Iteration struct { - ID int `json:"id"` - IID int `json:"iid"` - Sequence int `json:"sequence"` - GroupID int `json:"group_id"` - Title string `json:"title"` - Description string `json:"description"` - State int `json:"state"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - DueDate *ISOTime `json:"due_date"` - StartDate *ISOTime `json:"start_date"` - WebURL string `json:"web_url"` -} - -// ListIterationEventsOptions represents the options for all resource state -// events list methods. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_iteration_events.html#list-project-issue-iteration-events -type ListIterationEventsOptions struct { - ListOptions -} - -// ListIssueIterationEvents retrieves resource iteration events for the -// specified project and issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_iteration_events.html#list-project-issue-iteration-events -func (s *ResourceIterationEventsService) ListIssueIterationEvents(pid interface{}, issue int, opt *ListIterationEventsOptions, options ...RequestOptionFunc) ([]*IterationEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_iteration_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ies []*IterationEvent - resp, err := s.client.Do(req, &ies) - if err != nil { - return nil, resp, err - } - - return ies, resp, nil -} - -// GetIssueIterationEvent gets a single issue iteration event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_iteration_events.html#get-single-issue-iteration-event -func (s *ResourceIterationEventsService) GetIssueIterationEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*IterationEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_iteration_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ie := new(IterationEvent) - resp, err := s.client.Do(req, ie) - if err != nil { - return nil, resp, err - } - - return ie, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_label_events.go b/vendor/github.com/xanzy/go-gitlab/resource_label_events.go deleted file mode 100644 index 46c96fccad..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/resource_label_events.go +++ /dev/null @@ -1,220 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ResourceLabelEventsService handles communication with the event related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_label_events.html -type ResourceLabelEventsService struct { - client *Client -} - -// LabelEvent represents a resource label event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event -type LabelEvent struct { - ID int `json:"id"` - Action string `json:"action"` - CreatedAt *time.Time `json:"created_at"` - ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` - User struct { - ID int `json:"id"` - Name string `json:"name"` - Username string `json:"username"` - State string `json:"state"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` - } `json:"user"` - Label struct { - ID int `json:"id"` - Name string `json:"name"` - Color string `json:"color"` - TextColor string `json:"text_color"` - Description string `json:"description"` - } `json:"label"` -} - -// ListLabelEventsOptions represents the options for all resource label events -// list methods. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events -type ListLabelEventsOptions struct { - ListOptions -} - -// ListIssueLabelEvents retrieves resource label events for the -// specified project and issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-issue-label-events -func (s *ResourceLabelEventsService) ListIssueLabelEvents(pid interface{}, issue int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ls []*LabelEvent - resp, err := s.client.Do(req, &ls) - if err != nil { - return nil, resp, err - } - - return ls, resp, nil -} - -// GetIssueLabelEvent gets a single issue-label-event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-issue-label-event -func (s *ResourceLabelEventsService) GetIssueLabelEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_label_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(LabelEvent) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// ListGroupEpicLabelEvents retrieves resource label events for the specified -// group and epic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#list-group-epic-label-events -func (s *ResourceLabelEventsService) ListGroupEpicLabelEvents(gid interface{}, epic int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events", PathEscape(group), epic) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ls []*LabelEvent - resp, err := s.client.Do(req, &ls) - if err != nil { - return nil, resp, err - } - - return ls, resp, nil -} - -// GetGroupEpicLabelEvent gets a single group epic label event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-epic-label-event -func (s *ResourceLabelEventsService) GetGroupEpicLabelEvent(gid interface{}, epic int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/epics/%d/resource_label_events/%d", PathEscape(group), epic, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(LabelEvent) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// ListMergeRequestsLabelEvents retrieves resource label events for the specified -// project and merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#list-project-merge-request-label-events -func (s *ResourceLabelEventsService) ListMergeRequestsLabelEvents(pid interface{}, request int, opt *ListLabelEventsOptions, options ...RequestOptionFunc) ([]*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events", PathEscape(project), request) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ls []*LabelEvent - resp, err := s.client.Do(req, &ls) - if err != nil { - return nil, resp, err - } - - return ls, resp, nil -} - -// GetMergeRequestLabelEvent gets a single merge request label event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_label_events.html#get-single-merge-request-label-event -func (s *ResourceLabelEventsService) GetMergeRequestLabelEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*LabelEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_label_events/%d", PathEscape(project), request, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - l := new(LabelEvent) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go b/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go deleted file mode 100644 index 22925bb0a0..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go +++ /dev/null @@ -1,155 +0,0 @@ -// -// Copyright 2022, Mai Lapyst -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ResourceMilestoneEventsService handles communication with the event related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_milestone_events.html -type ResourceMilestoneEventsService struct { - client *Client -} - -// MilestoneEvent represents a resource milestone event. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_milestone_events.html -type MilestoneEvent struct { - ID int `json:"id"` - User *BasicUser `json:"user"` - CreatedAt *time.Time `json:"created_at"` - ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` - Milestone *Milestone `json:"milestone"` - Action string `json:"action"` -} - -// ListMilestoneEventsOptions represents the options for all resource state events -// list methods. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-issue-milestone-events -type ListMilestoneEventsOptions struct { - ListOptions -} - -// ListIssueMilestoneEvents retrieves resource milestone events for the specified -// project and issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-issue-milestone-events -func (s *ResourceMilestoneEventsService) ListIssueMilestoneEvents(pid interface{}, issue int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mes []*MilestoneEvent - resp, err := s.client.Do(req, &mes) - if err != nil { - return nil, resp, err - } - - return mes, resp, nil -} - -// GetIssueMilestoneEvent gets a single issue milestone event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_milestone_events.html#get-single-issue-milestone-event -func (s *ResourceMilestoneEventsService) GetIssueMilestoneEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - me := new(MilestoneEvent) - resp, err := s.client.Do(req, me) - if err != nil { - return nil, resp, err - } - - return me, resp, nil -} - -// ListMergeMilestoneEvents retrieves resource milestone events for the specified -// project and merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-merge-request-milestone-events -func (s *ResourceMilestoneEventsService) ListMergeMilestoneEvents(pid interface{}, request int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events", PathEscape(project), request) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var mes []*MilestoneEvent - resp, err := s.client.Do(req, &mes) - if err != nil { - return nil, resp, err - } - - return mes, resp, nil -} - -// GetMergeRequestMilestoneEvent gets a single merge request milestone event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_milestone_events.html#get-single-merge-request-milestone-event -func (s *ResourceMilestoneEventsService) GetMergeRequestMilestoneEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events/%d", PathEscape(project), request, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - me := new(MilestoneEvent) - resp, err := s.client.Do(req, me) - if err != nil { - return nil, resp, err - } - - return me, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_state_events.go b/vendor/github.com/xanzy/go-gitlab/resource_state_events.go deleted file mode 100644 index 867fd6d7e6..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/resource_state_events.go +++ /dev/null @@ -1,154 +0,0 @@ -// -// Copyright 2021, Matthias Simon -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ResourceStateEventsService handles communication with the event related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_state_events.html -type ResourceStateEventsService struct { - client *Client -} - -// StateEvent represents a resource state event. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_state_events.html -type StateEvent struct { - ID int `json:"id"` - User *BasicUser `json:"user"` - CreatedAt *time.Time `json:"created_at"` - ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` - State EventTypeValue `json:"state"` -} - -// ListStateEventsOptions represents the options for all resource state events -// list methods. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_state_events.html#list-project-issue-state-events -type ListStateEventsOptions struct { - ListOptions -} - -// ListIssueStateEvents retrieves resource state events for the specified -// project and issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_state_events.html#list-project-issue-state-events -func (s *ResourceStateEventsService) ListIssueStateEvents(pid interface{}, issue int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_state_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ses []*StateEvent - resp, err := s.client.Do(req, &ses) - if err != nil { - return nil, resp, err - } - - return ses, resp, nil -} - -// GetIssueStateEvent gets a single issue-state-event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_state_events.html#get-single-issue-state-event -func (s *ResourceStateEventsService) GetIssueStateEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_state_events/%d", PathEscape(project), issue, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - se := new(StateEvent) - resp, err := s.client.Do(req, se) - if err != nil { - return nil, resp, err - } - - return se, resp, nil -} - -// ListMergeStateEvents retrieves resource state events for the specified -// project and merge request. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_state_events.html#list-project-merge-request-state-events -func (s *ResourceStateEventsService) ListMergeStateEvents(pid interface{}, request int, opt *ListStateEventsOptions, options ...RequestOptionFunc) ([]*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_state_events", PathEscape(project), request) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ses []*StateEvent - resp, err := s.client.Do(req, &ses) - if err != nil { - return nil, resp, err - } - - return ses, resp, nil -} - -// GetMergeRequestStateEvent gets a single merge request state event. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_state_events.html#get-single-merge-request-state-event -func (s *ResourceStateEventsService) GetMergeRequestStateEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*StateEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_state_events/%d", PathEscape(project), request, event) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - se := new(StateEvent) - resp, err := s.client.Do(req, se) - if err != nil { - return nil, resp, err - } - - return se, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go b/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go deleted file mode 100644 index 1251836fff..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/resource_weight_events.go +++ /dev/null @@ -1,80 +0,0 @@ -// -// Copyright 2021, Matthias Simon -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// ResourceWeightEventsService handles communication with the event related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_weight_events.html -type ResourceWeightEventsService struct { - client *Client -} - -// WeightEvent represents a resource weight event. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/resource_weight_events.html -type WeightEvent struct { - ID int `json:"id"` - User *BasicUser `json:"user"` - CreatedAt *time.Time `json:"created_at"` - ResourceType string `json:"resource_type"` - ResourceID int `json:"resource_id"` - State EventTypeValue `json:"state"` - IssueID int `json:"issue_id"` - Weight int `json:"weight"` -} - -// ListWeightEventsOptions represents the options for all resource weight events -// list methods. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_weight_events.html#list-project-issue-weight-events -type ListWeightEventsOptions struct { - ListOptions -} - -// ListIssueWeightEvents retrieves resource weight events for the specified -// project and issue. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/resource_weight_events.html#list-project-issue-weight-events -func (s *ResourceWeightEventsService) ListIssueWeightEvents(pid interface{}, issue int, opt *ListWeightEventsOptions, options ...RequestOptionFunc) ([]*WeightEvent, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/issues/%d/resource_weight_events", PathEscape(project), issue) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var wes []*WeightEvent - resp, err := s.client.Do(req, &wes) - if err != nil { - return nil, resp, err - } - - return wes, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/runners.go b/vendor/github.com/xanzy/go-gitlab/runners.go deleted file mode 100644 index 5224cf91b8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/runners.go +++ /dev/null @@ -1,597 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// RunnersService handles communication with the runner related methods of the -// GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/runners.html -type RunnersService struct { - client *Client -} - -// Runner represents a GitLab CI Runner. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/runners.html -type Runner struct { - ID int `json:"id"` - Description string `json:"description"` - Active bool `json:"active"` - Paused bool `json:"paused"` - IsShared bool `json:"is_shared"` - IPAddress string `json:"ip_address"` - RunnerType string `json:"runner_type"` - Name string `json:"name"` - Online bool `json:"online"` - Status string `json:"status"` - Token string `json:"token"` - TokenExpiresAt *time.Time `json:"token_expires_at"` -} - -// RunnerDetails represents the GitLab CI runner details. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/runners.html -type RunnerDetails struct { - Paused bool `json:"paused"` - Architecture string `json:"architecture"` - Description string `json:"description"` - ID int `json:"id"` - IPAddress string `json:"ip_address"` - IsShared bool `json:"is_shared"` - RunnerType string `json:"runner_type"` - ContactedAt *time.Time `json:"contacted_at"` - Name string `json:"name"` - Online bool `json:"online"` - Status string `json:"status"` - Platform string `json:"platform"` - Projects []struct { - ID int `json:"id"` - Name string `json:"name"` - NameWithNamespace string `json:"name_with_namespace"` - Path string `json:"path"` - PathWithNamespace string `json:"path_with_namespace"` - } `json:"projects"` - Token string `json:"token"` - Revision string `json:"revision"` - TagList []string `json:"tag_list"` - RunUntagged bool `json:"run_untagged"` - Version string `json:"version"` - Locked bool `json:"locked"` - AccessLevel string `json:"access_level"` - MaximumTimeout int `json:"maximum_timeout"` - Groups []struct { - ID int `json:"id"` - Name string `json:"name"` - WebURL string `json:"web_url"` - } `json:"groups"` - - // Deprecated: Use Paused instead. (Deprecated in GitLab 14.8) - Active bool `json:"active"` -} - -// ListRunnersOptions represents the available ListRunners() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-owned-runners -type ListRunnersOptions struct { - ListOptions - Type *string `url:"type,omitempty" json:"type,omitempty"` - Status *string `url:"status,omitempty" json:"status,omitempty"` - Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` - TagList *[]string `url:"tag_list,comma,omitempty" json:"tag_list,omitempty"` - - // Deprecated: Use Type or Status instead. - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` -} - -// ListRunners gets a list of runners accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-owned-runners -func (s *RunnersService) ListRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "runners", opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// ListAllRunners gets a list of all runners in the GitLab instance. Access is -// restricted to users with admin privileges. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-all-runners -func (s *RunnersService) ListAllRunners(opt *ListRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "runners/all", opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// GetRunnerDetails returns details for given runner. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#get-runners-details -func (s *RunnersService) GetRunnerDetails(rid interface{}, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("runners/%s", runner) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - rs := new(RunnerDetails) - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// UpdateRunnerDetailsOptions represents the available UpdateRunnerDetails() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#update-runners-details -type UpdateRunnerDetailsOptions struct { - Description *string `url:"description,omitempty" json:"description,omitempty"` - Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` - TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` - RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` - Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` - AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` - - // Deprecated: Use Paused instead. (Deprecated in GitLab 14.8) - Active *bool `url:"active,omitempty" json:"active,omitempty"` -} - -// UpdateRunnerDetails updates details for a given runner. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#update-runners-details -func (s *RunnersService) UpdateRunnerDetails(rid interface{}, opt *UpdateRunnerDetailsOptions, options ...RequestOptionFunc) (*RunnerDetails, *Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("runners/%s", runner) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - rs := new(RunnerDetails) - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// RemoveRunner removes a runner. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner -func (s *RunnersService) RemoveRunner(rid interface{}, options ...RequestOptionFunc) (*Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("runners/%s", runner) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListRunnerJobsOptions represents the available ListRunnerJobs() -// options. Status can be one of: running, success, failed, canceled. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-runners-jobs -type ListRunnerJobsOptions struct { - ListOptions - Status *string `url:"status,omitempty" json:"status,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListRunnerJobs gets a list of jobs that are being processed or were processed by specified Runner. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-runners-jobs -func (s *RunnersService) ListRunnerJobs(rid interface{}, opt *ListRunnerJobsOptions, options ...RequestOptionFunc) ([]*Job, *Response, error) { - runner, err := parseID(rid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("runners/%s/jobs", runner) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Job - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// ListProjectRunnersOptions represents the available ListProjectRunners() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-projects-runners -type ListProjectRunnersOptions ListRunnersOptions - -// ListProjectRunners gets a list of runners accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-projects-runners -func (s *RunnersService) ListProjectRunners(pid interface{}, opt *ListProjectRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/runners", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// EnableProjectRunnerOptions represents the available EnableProjectRunner() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#enable-a-runner-in-project -type EnableProjectRunnerOptions struct { - RunnerID int `json:"runner_id"` -} - -// EnableProjectRunner enables an available specific runner in the project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#enable-a-runner-in-project -func (s *RunnersService) EnableProjectRunner(pid interface{}, opt *EnableProjectRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/runners", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - r := new(Runner) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DisableProjectRunner disables a specific runner from project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#disable-a-runner-from-project -func (s *RunnersService) DisableProjectRunner(pid interface{}, runner int, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/runners/%d", PathEscape(project), runner) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListGroupsRunnersOptions represents the available ListGroupsRunners() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-groups-runners -type ListGroupsRunnersOptions struct { - ListOptions - Type *string `url:"type,omitempty" json:"type,omitempty"` - Status *string `url:"status,omitempty" json:"status,omitempty"` - TagList *[]string `url:"tag_list,comma,omitempty" json:"tag_list,omitempty"` -} - -// ListGroupsRunners lists all runners (specific and shared) available in the -// group as well it’s ancestor groups. Shared runners are listed if at least one -// shared runner is defined. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#list-groups-runners -func (s *RunnersService) ListGroupsRunners(gid interface{}, opt *ListGroupsRunnersOptions, options ...RequestOptionFunc) ([]*Runner, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/runners", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var rs []*Runner - resp, err := s.client.Do(req, &rs) - if err != nil { - return nil, resp, err - } - - return rs, resp, nil -} - -// RegisterNewRunnerOptions represents the available RegisterNewRunner() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner -type RegisterNewRunnerOptions struct { - Token *string `url:"token" json:"token"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Info *RegisterNewRunnerInfoOptions `url:"info,omitempty" json:"info,omitempty"` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` - Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` - RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` - TagList *[]string `url:"tag_list[],omitempty" json:"tag_list,omitempty"` - AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` - MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` -} - -// RegisterNewRunnerInfoOptions represents the info hashmap parameter in -// RegisterNewRunnerOptions. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner -type RegisterNewRunnerInfoOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Version *string `url:"version,omitempty" json:"version,omitempty"` - Revision *string `url:"revision,omitempty" json:"revision,omitempty"` - Platform *string `url:"platform,omitempty" json:"platform,omitempty"` - Architecture *string `url:"architecture,omitempty" json:"architecture,omitempty"` -} - -// RegisterNewRunner registers a new Runner for the instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#register-a-new-runner -func (s *RunnersService) RegisterNewRunner(opt *RegisterNewRunnerOptions, options ...RequestOptionFunc) (*Runner, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "runners", opt, options) - if err != nil { - return nil, nil, err - } - - r := new(Runner) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// DeleteRegisteredRunnerOptions represents the available -// DeleteRegisteredRunner() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner-by-authentication-token -type DeleteRegisteredRunnerOptions struct { - Token *string `url:"token" json:"token"` -} - -// DeleteRegisteredRunner deletes a Runner by Token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner-by-authentication-token -func (s *RunnersService) DeleteRegisteredRunner(opt *DeleteRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodDelete, "runners", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteRegisteredRunnerByID deletes a runner by ID. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#delete-a-runner-by-id -func (s *RunnersService) DeleteRegisteredRunnerByID(rid int, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodDelete, fmt.Sprintf("runners/%d", rid), nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// VerifyRegisteredRunnerOptions represents the available -// VerifyRegisteredRunner() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#verify-authentication-for-a-registered-runner -type VerifyRegisteredRunnerOptions struct { - Token *string `url:"token" json:"token"` -} - -// VerifyRegisteredRunner registers a new runner for the instance. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#verify-authentication-for-a-registered-runner -func (s *RunnersService) VerifyRegisteredRunner(opt *VerifyRegisteredRunnerOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "runners/verify", opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -type RunnerRegistrationToken struct { - Token *string `url:"token" json:"token"` - TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` -} - -// ResetInstanceRunnerRegistrationToken resets the instance runner registration -// token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#reset-instances-runner-registration-token -func (s *RunnersService) ResetInstanceRunnerRegistrationToken(options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "runners/reset_registration_token", nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerRegistrationToken) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// ResetGroupRunnerRegistrationToken resets a group's runner registration token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#reset-groups-runner-registration-token -func (s *RunnersService) ResetGroupRunnerRegistrationToken(gid interface{}, options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("groups/%s/runners/reset_registration_token", PathEscape(group)) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerRegistrationToken) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// ResetGroupRunnerRegistrationToken resets a projects's runner registration token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#reset-projects-runner-registration-token -func (s *RunnersService) ResetProjectRunnerRegistrationToken(pid interface{}, options ...RequestOptionFunc) (*RunnerRegistrationToken, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/runners/reset_registration_token", PathEscape(project)) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerRegistrationToken) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -type RunnerAuthenticationToken struct { - Token *string `url:"token" json:"token"` - TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` -} - -// ResetRunnerAuthenticationToken resets a runner's authentication token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/runners.html#reset-runners-authentication-token-by-using-the-runner-id -func (s *RunnersService) ResetRunnerAuthenticationToken(rid int, options ...RequestOptionFunc) (*RunnerAuthenticationToken, *Response, error) { - u := fmt.Sprintf("runners/%d/reset_authentication_token", rid) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - r := new(RunnerAuthenticationToken) - resp, err := s.client.Do(req, &r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/search.go b/vendor/github.com/xanzy/go-gitlab/search.go deleted file mode 100644 index 133592bd12..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/search.go +++ /dev/null @@ -1,359 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// SearchService handles communication with the search related methods of the -// GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html -type SearchService struct { - client *Client -} - -// SearchOptions represents the available options for all search methods. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html -type SearchOptions struct { - ListOptions - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -type searchOptions struct { - SearchOptions - Scope string `url:"scope" json:"scope"` - Search string `url:"search" json:"search"` -} - -// Projects searches the expression within projects -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-projects -func (s *SearchService) Projects(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - var ps []*Project - resp, err := s.search("projects", query, &ps, opt, options...) - return ps, resp, err -} - -// ProjectsByGroup searches the expression within projects for -// the specified group -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#group-search-api -func (s *SearchService) ProjectsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) { - var ps []*Project - resp, err := s.searchByGroup(gid, "projects", query, &ps, opt, options...) - return ps, resp, err -} - -// Issues searches the expression within issues -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-issues -func (s *SearchService) Issues(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - var is []*Issue - resp, err := s.search("issues", query, &is, opt, options...) - return is, resp, err -} - -// IssuesByGroup searches the expression within issues for -// the specified group -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-issues-1 -func (s *SearchService) IssuesByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - var is []*Issue - resp, err := s.searchByGroup(gid, "issues", query, &is, opt, options...) - return is, resp, err -} - -// IssuesByProject searches the expression within issues for -// the specified project -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-issues-2 -func (s *SearchService) IssuesByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Issue, *Response, error) { - var is []*Issue - resp, err := s.searchByProject(pid, "issues", query, &is, opt, options...) - return is, resp, err -} - -// MergeRequests searches the expression within merge requests -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-merge_requests -func (s *SearchService) MergeRequests(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - var ms []*MergeRequest - resp, err := s.search("merge_requests", query, &ms, opt, options...) - return ms, resp, err -} - -// MergeRequestsByGroup searches the expression within merge requests for -// the specified group -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-merge_requests-1 -func (s *SearchService) MergeRequestsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - var ms []*MergeRequest - resp, err := s.searchByGroup(gid, "merge_requests", query, &ms, opt, options...) - return ms, resp, err -} - -// MergeRequestsByProject searches the expression within merge requests for -// the specified project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-merge_requests-2 -func (s *SearchService) MergeRequestsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { - var ms []*MergeRequest - resp, err := s.searchByProject(pid, "merge_requests", query, &ms, opt, options...) - return ms, resp, err -} - -// Milestones searches the expression within milestones -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-milestones -func (s *SearchService) Milestones(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - var ms []*Milestone - resp, err := s.search("milestones", query, &ms, opt, options...) - return ms, resp, err -} - -// MilestonesByGroup searches the expression within milestones for -// the specified group -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-milestones-1 -func (s *SearchService) MilestonesByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - var ms []*Milestone - resp, err := s.searchByGroup(gid, "milestones", query, &ms, opt, options...) - return ms, resp, err -} - -// MilestonesByProject searches the expression within milestones for -// the specified project -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-milestones-2 -func (s *SearchService) MilestonesByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Milestone, *Response, error) { - var ms []*Milestone - resp, err := s.searchByProject(pid, "milestones", query, &ms, opt, options...) - return ms, resp, err -} - -// SnippetTitles searches the expression within snippet titles -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-snippet_titles -func (s *SearchService) SnippetTitles(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - var ss []*Snippet - resp, err := s.search("snippet_titles", query, &ss, opt, options...) - return ss, resp, err -} - -// SnippetBlobs searches the expression within snippet blobs -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-snippet_blobs -func (s *SearchService) SnippetBlobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - var ss []*Snippet - resp, err := s.search("snippet_blobs", query, &ss, opt, options...) - return ss, resp, err -} - -// NotesByProject searches the expression within notes for the specified -// project -// -// GitLab API docs: // https://docs.gitlab.com/ee/api/search.html#scope-notes -func (s *SearchService) NotesByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Note, *Response, error) { - var ns []*Note - resp, err := s.searchByProject(pid, "notes", query, &ns, opt, options...) - return ns, resp, err -} - -// WikiBlobs searches the expression within all wiki blobs -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-wiki_blobs -func (s *SearchService) WikiBlobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - var ws []*Wiki - resp, err := s.search("wiki_blobs", query, &ws, opt, options...) - return ws, resp, err -} - -// WikiBlobsByGroup searches the expression within wiki blobs for -// specified group -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-wiki_blobs-premium-1 -func (s *SearchService) WikiBlobsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - var ws []*Wiki - resp, err := s.searchByGroup(gid, "wiki_blobs", query, &ws, opt, options...) - return ws, resp, err -} - -// WikiBlobsByProject searches the expression within wiki blobs for -// the specified project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/search.html#scope-wiki_blobs-premium-2 -func (s *SearchService) WikiBlobsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - var ws []*Wiki - resp, err := s.searchByProject(pid, "wiki_blobs", query, &ws, opt, options...) - return ws, resp, err -} - -// Commits searches the expression within all commits -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-commits -func (s *SearchService) Commits(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - var cs []*Commit - resp, err := s.search("commits", query, &cs, opt, options...) - return cs, resp, err -} - -// CommitsByGroup searches the expression within commits for the specified -// group -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-commits-premium-1 -func (s *SearchService) CommitsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - var cs []*Commit - resp, err := s.searchByGroup(gid, "commits", query, &cs, opt, options...) - return cs, resp, err -} - -// CommitsByProject searches the expression within commits for the -// specified project -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-commits-premium-2 -func (s *SearchService) CommitsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Commit, *Response, error) { - var cs []*Commit - resp, err := s.searchByProject(pid, "commits", query, &cs, opt, options...) - return cs, resp, err -} - -// Blob represents a single blob. -type Blob struct { - Basename string `json:"basename"` - Data string `json:"data"` - Path string `json:"path"` - Filename string `json:"filename"` - ID string `json:"id"` - Ref string `json:"ref"` - Startline int `json:"startline"` - ProjectID int `json:"project_id"` -} - -// Blobs searches the expression within all blobs -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-blobs -func (s *SearchService) Blobs(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { - var bs []*Blob - resp, err := s.search("blobs", query, &bs, opt, options...) - return bs, resp, err -} - -// BlobsByGroup searches the expression within blobs for the specified -// group -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-blobs-premium-1 -func (s *SearchService) BlobsByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { - var bs []*Blob - resp, err := s.searchByGroup(gid, "blobs", query, &bs, opt, options...) - return bs, resp, err -} - -// BlobsByProject searches the expression within blobs for the specified -// project -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-blobs-premium-2 -func (s *SearchService) BlobsByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*Blob, *Response, error) { - var bs []*Blob - resp, err := s.searchByProject(pid, "blobs", query, &bs, opt, options...) - return bs, resp, err -} - -// Users searches the expression within all users -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-users -func (s *SearchService) Users(query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - var ret []*User - resp, err := s.search("users", query, &ret, opt, options...) - return ret, resp, err -} - -// UsersByGroup searches the expression within users for the specified -// group -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-users-1 -func (s *SearchService) UsersByGroup(gid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - var ret []*User - resp, err := s.searchByGroup(gid, "users", query, &ret, opt, options...) - return ret, resp, err -} - -// UsersByProject searches the expression within users for the -// specified project -// -// GitLab API docs: https://docs.gitlab.com/ee/api/search.html#scope-users-2 -func (s *SearchService) UsersByProject(pid interface{}, query string, opt *SearchOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - var ret []*User - resp, err := s.searchByProject(pid, "users", query, &ret, opt, options...) - return ret, resp, err -} - -func (s *SearchService) search(scope, query string, result interface{}, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { - opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} - - req, err := s.client.NewRequest(http.MethodGet, "search", opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, result) -} - -func (s *SearchService) searchByGroup(gid interface{}, scope, query string, result interface{}, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { - group, err := parseID(gid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("groups/%s/-/search", PathEscape(group)) - - opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, result) -} - -func (s *SearchService) searchByProject(pid interface{}, scope, query string, result interface{}, opt *SearchOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/-/search", PathEscape(project)) - - opts := &searchOptions{SearchOptions: *opt, Scope: scope, Search: query} - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, result) -} diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go deleted file mode 100644 index fcaa71ecc4..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/services.go +++ /dev/null @@ -1,2179 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "time" -) - -// ServicesService handles communication with the services related methods of -// the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html -type ServicesService struct { - client *Client -} - -// Service represents a GitLab service. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html -type Service struct { - ID int `json:"id"` - Title string `json:"title"` - Slug string `json:"slug"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - Active bool `json:"active"` - PushEvents bool `json:"push_events"` - IssuesEvents bool `json:"issues_events"` - AlertEvents bool `json:"alert_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - CommitEvents bool `json:"commit_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - CommentOnEventEnabled bool `json:"comment_on_event_enabled"` - TagPushEvents bool `json:"tag_push_events"` - NoteEvents bool `json:"note_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - PipelineEvents bool `json:"pipeline_events"` - JobEvents bool `json:"job_events"` - WikiPageEvents bool `json:"wiki_page_events"` - VulnerabilityEvents bool `json:"vulnerability_events"` - DeploymentEvents bool `json:"deployment_events"` -} - -// ListServices gets a list of all active services. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/integrations.html#list-all-active-integrations -func (s *ServicesService) ListServices(pid interface{}, options ...RequestOptionFunc) ([]*Service, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var svcs []*Service - resp, err := s.client.Do(req, &svcs) - if err != nil { - return nil, resp, err - } - - return svcs, resp, nil -} - -// CustomIssueTrackerService represents Custom Issue Tracker service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#custom-issue-tracker -type CustomIssueTrackerService struct { - Service - Properties *CustomIssueTrackerServiceProperties `json:"properties"` -} - -// CustomIssueTrackerServiceProperties represents Custom Issue Tracker specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#custom-issue-tracker -type CustomIssueTrackerServiceProperties struct { - ProjectURL string `json:"project_url,omitempty"` - IssuesURL string `json:"issues_url,omitempty"` - NewIssueURL string `json:"new_issue_url,omitempty"` -} - -// GetCustomIssueTrackerService gets Custom Issue Tracker service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-custom-issue-tracker-settings -func (s *ServicesService) GetCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*CustomIssueTrackerService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(CustomIssueTrackerService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetCustomIssueTrackerServiceOptions represents the available SetCustomIssueTrackerService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker -type SetCustomIssueTrackerServiceOptions struct { - NewIssueURL *string `url:"new_issue_url,omitempty" json:"new_issue_url,omitempty"` - IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` - ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` -} - -// SetCustomIssueTrackerService sets Custom Issue Tracker service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-a-custom-issue-tracker -func (s *ServicesService) SetCustomIssueTrackerService(pid interface{}, opt *SetCustomIssueTrackerServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteCustomIssueTrackerService deletes Custom Issue Tracker service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-a-custom-issue-tracker -func (s *ServicesService) DeleteCustomIssueTrackerService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/custom-issue-tracker", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DataDogService represents DataDog service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#datadog -type DataDogService struct { - Service - Properties *DataDogServiceProperties `json:"properties"` -} - -// DataDogServiceProperties represents DataDog specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#datadog -type DataDogServiceProperties struct { - APIURL string `url:"api_url,omitempty" json:"api_url,omitempty"` - DataDogEnv string `url:"datadog_env,omitempty" json:"datadog_env,omitempty"` - DataDogService string `url:"datadog_service,omitempty" json:"datadog_service,omitempty"` - DataDogSite string `url:"datadog_site,omitempty" json:"datadog_site,omitempty"` - DataDogTags string `url:"datadog_tags,omitempty" json:"datadog_tags,omitempty"` - ArchiveTraceEvents bool `url:"archive_trace_events,omitempty" json:"archive_trace_events,omitempty"` -} - -// GetDataDogService gets DataDog service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-datadog-settings -func (s *ServicesService) GetDataDogService(pid interface{}, options ...RequestOptionFunc) (*DataDogService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(DataDogService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetDataDogServiceOptions represents the available SetDataDogService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog -type SetDataDogServiceOptions struct { - APIKey *string `url:"api_key,omitempty" json:"api_key,omitempty"` - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - DataDogEnv *string `url:"datadog_env,omitempty" json:"datadog_env,omitempty"` - DataDogService *string `url:"datadog_service,omitempty" json:"datadog_service,omitempty"` - DataDogSite *string `url:"datadog_site,omitempty" json:"datadog_site,omitempty"` - DataDogTags *string `url:"datadog_tags,omitempty" json:"datadog_tags,omitempty"` - ArchiveTraceEvents *bool `url:"archive_trace_events,omitempty" json:"archive_trace_events,omitempty"` -} - -// SetDataDogService sets DataDog service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-datadog -func (s *ServicesService) SetDataDogService(pid interface{}, opt *SetDataDogServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteDataDogService deletes the DataDog service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-datadog -func (s *ServicesService) DeleteDataDogService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/datadog", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DiscordService represents Discord service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications -type DiscordService struct { - Service - Properties *DiscordServiceProperties `json:"properties"` -} - -// DiscordServiceProperties represents Discord specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#discord-notifications -type DiscordServiceProperties struct { - BranchesToBeNotified string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - NotifyOnlyBrokenPipelines bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` -} - -// GetDiscordService gets Discord service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-discord-notifications-settings -func (s *ServicesService) GetDiscordService(pid interface{}, options ...RequestOptionFunc) (*DiscordService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(DiscordService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetDiscordServiceOptions represents the available SetDiscordService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications -type SetDiscordServiceOptions struct { - WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialIssuesChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` - GroupConfidentialMentionsEvents *bool `url:"group_confidential_mentions_events,omitempty" json:"group_confidential_mentions_events,omitempty"` - GroupConfidentialMentionsChannel *string `url:"group_confidential_mentions_channel,omitempty" json:"group_confidential_mentions_channel,omitempty"` - GroupMentionsEvents *bool `url:"group_mentions_events,omitempty" json:"group_mentions_events,omitempty"` - GroupMentionsChannel *string `url:"group_mentions_channel,omitempty" json:"group_mentions_channel,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` -} - -// SetDiscordService sets Discord service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-discord-notifications -func (s *ServicesService) SetDiscordService(pid interface{}, opt *SetDiscordServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteDiscordService deletes Discord service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-discord-notifications -func (s *ServicesService) DeleteDiscordService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/discord", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DroneCIService represents Drone CI service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#drone -type DroneCIService struct { - Service - Properties *DroneCIServiceProperties `json:"properties"` -} - -// DroneCIServiceProperties represents Drone CI specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#drone -type DroneCIServiceProperties struct { - DroneURL string `json:"drone_url"` - EnableSSLVerification bool `json:"enable_ssl_verification"` -} - -// GetDroneCIService gets Drone CI service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-drone-settings -func (s *ServicesService) GetDroneCIService(pid interface{}, options ...RequestOptionFunc) (*DroneCIService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(DroneCIService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetDroneCIServiceOptions represents the available SetDroneCIService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-drone -type SetDroneCIServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty"` - DroneURL *string `url:"drone_url,omitempty" json:"drone_url,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` -} - -// SetDroneCIService sets Drone CI service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-drone -func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteDroneCIService deletes Drone CI service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-drone -func (s *ServicesService) DeleteDroneCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/drone-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// EmailsOnPushService represents Emails on Push service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#emails-on-push -type EmailsOnPushService struct { - Service - Properties *EmailsOnPushServiceProperties `json:"properties"` -} - -// EmailsOnPushServiceProperties represents Emails on Push specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#emails-on-push -type EmailsOnPushServiceProperties struct { - Recipients string `json:"recipients"` - DisableDiffs bool `json:"disable_diffs"` - SendFromCommitterEmail bool `json:"send_from_committer_email"` - PushEvents bool `json:"push_events"` - TagPushEvents bool `json:"tag_push_events"` - BranchesToBeNotified string `json:"branches_to_be_notified"` -} - -// GetEmailsOnPushService gets Emails on Push service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-emails-on-push-integration-settings -func (s *ServicesService) GetEmailsOnPushService(pid interface{}, options ...RequestOptionFunc) (*EmailsOnPushService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(EmailsOnPushService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetEmailsOnPushServiceOptions represents the available SetEmailsOnPushService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push -type SetEmailsOnPushServiceOptions struct { - Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` - DisableDiffs *bool `url:"disable_diffs,omitempty" json:"disable_diffs,omitempty"` - SendFromCommitterEmail *bool `url:"send_from_committer_email,omitempty" json:"send_from_committer_email,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` -} - -// SetEmailsOnPushService sets Emails on Push service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-emails-on-push -func (s *ServicesService) SetEmailsOnPushService(pid interface{}, opt *SetEmailsOnPushServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteEmailsOnPushService deletes Emails on Push service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-emails-on-push -func (s *ServicesService) DeleteEmailsOnPushService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/emails-on-push", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ExternalWikiService represents External Wiki service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#external-wiki -type ExternalWikiService struct { - Service - Properties *ExternalWikiServiceProperties `json:"properties"` -} - -// ExternalWikiServiceProperties represents External Wiki specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#external-wiki -type ExternalWikiServiceProperties struct { - ExternalWikiURL string `json:"external_wiki_url"` -} - -// GetExternalWikiService gets External Wiki service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-external-wiki-settings -func (s *ServicesService) GetExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*ExternalWikiService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(ExternalWikiService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetExternalWikiServiceOptions represents the available SetExternalWikiService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki -type SetExternalWikiServiceOptions struct { - ExternalWikiURL *string `url:"external_wiki_url,omitempty" json:"external_wiki_url,omitempty"` -} - -// SetExternalWikiService sets External Wiki service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-an-external-wiki -func (s *ServicesService) SetExternalWikiService(pid interface{}, opt *SetExternalWikiServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteExternalWikiService deletes External Wiki service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-an-external-wiki -func (s *ServicesService) DeleteExternalWikiService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/external-wiki", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GithubService represents Github service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#github -type GithubService struct { - Service - Properties *GithubServiceProperties `json:"properties"` -} - -// GithubServiceProperties represents Github specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#github -type GithubServiceProperties struct { - RepositoryURL string `json:"repository_url"` - StaticContext bool `json:"static_context"` -} - -// GetGithubService gets Github service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-github-settings -func (s *ServicesService) GetGithubService(pid interface{}, options ...RequestOptionFunc) (*GithubService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(GithubService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetGithubServiceOptions represents the available SetGithubService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-github -type SetGithubServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty"` - RepositoryURL *string `url:"repository_url,omitempty" json:"repository_url,omitempty"` - StaticContext *bool `url:"static_context,omitempty" json:"static_context,omitempty"` -} - -// SetGithubService sets Github service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-github -func (s *ServicesService) SetGithubService(pid interface{}, opt *SetGithubServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteGithubService deletes Github service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-github -func (s *ServicesService) DeleteGithubService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/github", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SlackApplication represents GitLab for slack application settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app -type SlackApplication struct { - Service - Properties *SlackApplicationProperties `json:"properties"` -} - -// SlackApplicationProperties represents GitLab for slack application specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#gitlab-for-slack-app -type SlackApplicationProperties struct { - Channel string `json:"channel"` - NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` - BranchesToBeNotified string `json:"branches_to_be_notified"` - AlertEvents bool `json:"alert_events"` - IssuesEvents bool `json:"issues_events"` - ConfidentialIssuesEvents bool `json:"confidential_issues_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - NoteEvents bool `json:"note_events"` - ConfidentialNoteEvents bool `json:"confidential_note_events"` - DeploymentEvents bool `json:"deployment_events"` - IncidentsEvents bool `json:"incidents_events"` - PipelineEvents bool `json:"pipeline_events"` - PushEvents bool `json:"push_events"` - TagPushEvents bool `json:"tag_push_events"` - VulnerabilityEvents bool `json:"vulnerability_events"` - WikiPageEvents bool `json:"wiki_page_events"` - - // Deprecated: This parameter has been replaced with BranchesToBeNotified. - NotifyOnlyDefaultBranch bool `json:"notify_only_default_branch"` -} - -// GetSlackApplication gets the GitLab for Slack app integration settings for a -// project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-gitlab-for-slack-app-settings -func (s *ServicesService) GetSlackApplication(pid interface{}, options ...RequestOptionFunc) (*SlackApplication, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackApplication) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetSlackApplicationOptions represents the available SetSlackApplication() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app -type SetSlackApplicationOptions struct { - Channel *string `url:"channel,omitempty" json:"channel,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - IncidentsEvents *bool `url:"incidents_events,omitempty" json:"incidents_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - VulnerabilityEvents *bool `url:"vulnerability_events,omitempty" json:"vulnerability_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - - // Deprecated: This parameter has been replaced with BranchesToBeNotified. - NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` -} - -// SetSlackApplication update the GitLab for Slack app integration for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-gitlab-for-slack-app -func (s *ServicesService) SetSlackApplication(pid interface{}, opt *SetSlackApplicationOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DisableSlackApplication disable the GitLab for Slack app integration for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-gitlab-for-slack-app -func (s *ServicesService) DisableSlackApplication(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/gitlab-slack-application", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SetGitLabCIServiceOptions represents the available SetGitLabCIService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-gitlab-ci-service -type SetGitLabCIServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty"` - ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` -} - -// SetGitLabCIService sets GitLab CI service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-gitlab-ci-service -func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/gitlab-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteGitLabCIService deletes GitLab CI service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-gitlab-ci-service -func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/gitlab-ci", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SetHipChatServiceOptions represents the available SetHipChatService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-hipchat-service -type SetHipChatServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty" ` - Room *string `url:"room,omitempty" json:"room,omitempty"` -} - -// SetHipChatService sets HipChat service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-hipchat-service -func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/hipchat", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteHipChatService deletes HipChat service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-hipchat-service -func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/hipchat", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// JenkinsCIService represents Jenkins CI service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#jenkins -type JenkinsCIService struct { - Service - Properties *JenkinsCIServiceProperties `json:"properties"` -} - -// JenkinsCIServiceProperties represents Jenkins CI specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#jenkins -type JenkinsCIServiceProperties struct { - URL string `json:"jenkins_url"` - EnableSSLVerification bool `json:"enable_ssl_verification"` - ProjectName string `json:"project_name"` - Username string `json:"username"` -} - -// GetJenkinsCIService gets Jenkins CI service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-jenkins-settings -func (s *ServicesService) GetJenkinsCIService(pid interface{}, options ...RequestOptionFunc) (*JenkinsCIService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(JenkinsCIService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetJenkinsCIServiceOptions represents the available SetJenkinsCIService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#jenkins -type SetJenkinsCIServiceOptions struct { - URL *string `url:"jenkins_url,omitempty" json:"jenkins_url,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` - ProjectName *string `url:"project_name,omitempty" json:"project_name,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` -} - -// SetJenkinsCIService sets Jenkins service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-jenkins -func (s *ServicesService) SetJenkinsCIService(pid interface{}, opt *SetJenkinsCIServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteJenkinsCIService deletes Jenkins CI service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-jenkins -func (s *ServicesService) DeleteJenkinsCIService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/jenkins", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// JiraService represents Jira service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#jira -type JiraService struct { - Service - Properties *JiraServiceProperties `json:"properties"` -} - -// JiraServiceProperties represents Jira specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#jira -type JiraServiceProperties struct { - URL string `json:"url"` - APIURL string `json:"api_url"` - Username string `json:"username" ` - Password string `json:"password" ` - Active bool `json:"active"` - JiraAuthType int `json:"jira_auth_type"` - JiraIssuePrefix string `json:"jira_issue_prefix"` - JiraIssueRegex string `json:"jira_issue_regex"` - JiraIssueTransitionAutomatic bool `json:"jira_issue_transition_automatic"` - JiraIssueTransitionID string `json:"jira_issue_transition_id"` - CommitEvents bool `json:"commit_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - CommentOnEventEnabled bool `json:"comment_on_event_enabled"` - IssuesEnabled bool `json:"issues_enabled"` - ProjectKeys []string `json:"project_keys" ` - UseInheritedSettings bool `json:"use_inherited_settings"` - - // Deprecated: This parameter was removed in GitLab 17.0 - ProjectKey string `json:"project_key" ` -} - -// UnmarshalJSON decodes the Jira Service Properties. -// -// This allows support of JiraIssueTransitionID for both type string (>11.9) and float64 (<11.9) -func (p *JiraServiceProperties) UnmarshalJSON(b []byte) error { - type Alias JiraServiceProperties - raw := struct { - *Alias - JiraIssueTransitionID interface{} `json:"jira_issue_transition_id"` - }{ - Alias: (*Alias)(p), - } - - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - - switch id := raw.JiraIssueTransitionID.(type) { - case nil: - // No action needed. - case string: - p.JiraIssueTransitionID = id - case float64: - p.JiraIssueTransitionID = strconv.Itoa(int(id)) - default: - return fmt.Errorf("failed to unmarshal JiraTransitionID of type: %T", id) - } - - return nil -} - -// GetJiraService gets Jira service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-jira-service-settings -func (s *ServicesService) GetJiraService(pid interface{}, options ...RequestOptionFunc) (*JiraService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(JiraService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetJiraServiceOptions represents the available SetJiraService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service -type SetJiraServiceOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty" ` - Password *string `url:"password,omitempty" json:"password,omitempty" ` - Active *bool `url:"active,omitempty" json:"active,omitempty"` - JiraAuthType *int `url:"jira_auth_type,omitempty" json:"jira_auth_type,omitempty"` - JiraIssuePrefix *string `url:"jira_issue_prefix,omitempty" json:"jira_issue_prefix,omitempty"` - JiraIssueRegex *string `url:"jira_issue_regex,omitempty" json:"jira_issue_regex,omitempty"` - JiraIssueTransitionAutomatic *bool `url:"jira_issue_transition_automatic,omitempty" json:"jira_issue_transition_automatic,omitempty"` - JiraIssueTransitionID *string `url:"jira_issue_transition_id,omitempty" json:"jira_issue_transition_id,omitempty"` - CommitEvents *bool `url:"commit_events,omitempty" json:"commit_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - CommentOnEventEnabled *bool `url:"comment_on_event_enabled,omitempty" json:"comment_on_event_enabled,omitempty"` - IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"` - ProjectKeys *[]string `url:"project_keys,comma,omitempty" json:"project_keys,omitempty" ` - UseInheritedSettings *bool `url:"use_inherited_settings,omitempty" json:"use_inherited_settings,omitempty"` - - // Deprecated: This parameter was removed in GitLab 17.0 - ProjectKey *string `url:"project_key,omitempty" json:"project_key,omitempty" ` -} - -// SetJiraService sets Jira service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-jira-service -func (s *ServicesService) SetJiraService(pid interface{}, opt *SetJiraServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteJiraService deletes Jira service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-jira-service -func (s *ServicesService) DeleteJiraService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/integrations/jira", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// MattermostService represents Mattermost service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#mattermost-notifications -type MattermostService struct { - Service - Properties *MattermostServiceProperties `json:"properties"` -} - -// MattermostServiceProperties represents Mattermost specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#mattermost-notifications -type MattermostServiceProperties struct { - WebHook string `json:"webhook"` - Username string `json:"username"` - Channel string `json:"channel"` - NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` - BranchesToBeNotified string `json:"branches_to_be_notified"` - ConfidentialIssueChannel string `json:"confidential_issue_channel"` - ConfidentialNoteChannel string `json:"confidential_note_channel"` - IssueChannel string `json:"issue_channel"` - MergeRequestChannel string `json:"merge_request_channel"` - NoteChannel string `json:"note_channel"` - TagPushChannel string `json:"tag_push_channel"` - PipelineChannel string `json:"pipeline_channel"` - PushChannel string `json:"push_channel"` - VulnerabilityChannel string `json:"vulnerability_channel"` - WikiPageChannel string `json:"wiki_page_channel"` -} - -// GetMattermostService gets Mattermost service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-slack-service-settings -func (s *ServicesService) GetMattermostService(pid interface{}, options ...RequestOptionFunc) (*MattermostService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(MattermostService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetMattermostServiceOptions represents the available SetMattermostService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service -type SetMattermostServiceOptions struct { - WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Channel *string `url:"channel,omitempty" json:"channel,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` - PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` - IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` - ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` - MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` - NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` - PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` - WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` -} - -// MattermostSlashCommandsService represents Mattermost slash commands settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#mattermost-slash-commands -type MattermostSlashCommandsService struct { - Service - Properties *MattermostSlashCommandsProperties `json:"properties"` -} - -// MattermostSlashCommandsProperties represents Mattermost slash commands specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#mattermost-slash-commands -type MattermostSlashCommandsProperties struct { - Token string `json:"token"` - Username string `json:"username,omitempty"` -} - -// GetMattermostSlashCommandsService gets Slack Mattermost commands service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-mattermost-slash-command-integration-settings -func (s *ServicesService) GetMattermostSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*MattermostSlashCommandsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(MattermostSlashCommandsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetMattermostSlashCommandsServiceOptions represents the available SetSlackSlashCommandsService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-mattermost-slash-command-integration-settings -type SetMattermostSlashCommandsServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` -} - -// SetMattermostSlashCommandsService sets Mattermost slash commands service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-slash-command-integration -func (s *ServicesService) SetMattermostSlashCommandsService(pid interface{}, opt *SetMattermostSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteMattermostSlashCommandsService deletes Mattermost slash commands service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-mattermost-slash-command-integration -func (s *ServicesService) DeleteMattermostSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SetMattermostService sets Mattermost service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-mattermost-notifications-service -func (s *ServicesService) SetMattermostService(pid interface{}, opt *SetMattermostServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteMattermostService deletes Mattermost service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-mattermost-notifications-service -func (s *ServicesService) DeleteMattermostService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/mattermost", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// MicrosoftTeamsService represents Microsoft Teams service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#microsoft-teams -type MicrosoftTeamsService struct { - Service - Properties *MicrosoftTeamsServiceProperties `json:"properties"` -} - -// MicrosoftTeamsServiceProperties represents Microsoft Teams specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#microsoft-teams -type MicrosoftTeamsServiceProperties struct { - WebHook string `json:"webhook"` - NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` - BranchesToBeNotified string `json:"branches_to_be_notified"` - IssuesEvents BoolValue `json:"issues_events"` - ConfidentialIssuesEvents BoolValue `json:"confidential_issues_events"` - MergeRequestsEvents BoolValue `json:"merge_requests_events"` - TagPushEvents BoolValue `json:"tag_push_events"` - NoteEvents BoolValue `json:"note_events"` - ConfidentialNoteEvents BoolValue `json:"confidential_note_events"` - PipelineEvents BoolValue `json:"pipeline_events"` - WikiPageEvents BoolValue `json:"wiki_page_events"` -} - -// GetMicrosoftTeamsService gets MicrosoftTeams service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-microsoft-teams-service-settings -func (s *ServicesService) GetMicrosoftTeamsService(pid interface{}, options ...RequestOptionFunc) (*MicrosoftTeamsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(MicrosoftTeamsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetMicrosoftTeamsServiceOptions represents the available SetMicrosoftTeamsService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service -type SetMicrosoftTeamsServiceOptions struct { - WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` -} - -// SetMicrosoftTeamsService sets Microsoft Teams service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#create-edit-microsoft-teams-service -func (s *ServicesService) SetMicrosoftTeamsService(pid interface{}, opt *SetMicrosoftTeamsServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - return s.client.Do(req, nil) -} - -// DeleteMicrosoftTeamsService deletes Microsoft Teams service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-microsoft-teams-service -func (s *ServicesService) DeleteMicrosoftTeamsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/microsoft-teams", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// PipelinesEmailService represents Pipelines Email service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails -type PipelinesEmailService struct { - Service - Properties *PipelinesEmailProperties `json:"properties"` -} - -// PipelinesEmailProperties represents PipelinesEmail specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails -type PipelinesEmailProperties struct { - Recipients string `json:"recipients"` - NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` - NotifyOnlyDefaultBranch BoolValue `json:"notify_only_default_branch"` - BranchesToBeNotified string `json:"branches_to_be_notified"` -} - -// GetPipelinesEmailService gets Pipelines Email service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-pipeline-emails-service-settings -func (s *ServicesService) GetPipelinesEmailService(pid interface{}, options ...RequestOptionFunc) (*PipelinesEmailService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(PipelinesEmailService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetPipelinesEmailServiceOptions represents the available -// SetPipelinesEmailService() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails -type SetPipelinesEmailServiceOptions struct { - Recipients *string `url:"recipients,omitempty" json:"recipients,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` - AddPusher *bool `url:"add_pusher,omitempty" json:"add_pusher,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` -} - -// SetPipelinesEmailService sets Pipelines Email service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#pipeline-emails -func (s *ServicesService) SetPipelinesEmailService(pid interface{}, opt *SetPipelinesEmailServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeletePipelinesEmailService deletes Pipelines Email service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-pipeline-emails-service -func (s *ServicesService) DeletePipelinesEmailService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/pipelines-email", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// PrometheusService represents Prometheus service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#prometheus -type PrometheusService struct { - Service - Properties *PrometheusServiceProperties `json:"properties"` -} - -// PrometheusServiceProperties represents Prometheus specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#prometheus -type PrometheusServiceProperties struct { - APIURL string `json:"api_url"` - GoogleIAPAudienceClientID string `json:"google_iap_audience_client_id"` - GoogleIAPServiceAccountJSON string `json:"google_iap_service_account_json"` -} - -// GetPrometheusService gets Prometheus service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-prometheus-service-settings -func (s *ServicesService) GetPrometheusService(pid interface{}, options ...RequestOptionFunc) (*PrometheusService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(PrometheusService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetPrometheusServiceOptions represents the available SetPrometheusService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service -type SetPrometheusServiceOptions struct { - APIURL *string `url:"api_url,omitempty" json:"api_url,omitempty"` - GoogleIAPAudienceClientID *string `url:"google_iap_audience_client_id,omitempty" json:"google_iap_audience_client_id,omitempty"` - GoogleIAPServiceAccountJSON *string `url:"google_iap_service_account_json,omitempty" json:"google_iap_service_account_json,omitempty"` -} - -// SetPrometheusService sets Prometheus service for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-prometheus-service -func (s *ServicesService) SetPrometheusService(pid interface{}, opt *SetPrometheusServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeletePrometheusService deletes Prometheus service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-prometheus-service -func (s *ServicesService) DeletePrometheusService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/prometheus", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SlackService represents Slack service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#slack -type SlackService struct { - Service - Properties *SlackServiceProperties `json:"properties"` -} - -// SlackServiceProperties represents Slack specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#slack -type SlackServiceProperties struct { - WebHook string `json:"webhook"` - Username string `json:"username"` - Channel string `json:"channel"` - NotifyOnlyBrokenPipelines BoolValue `json:"notify_only_broken_pipelines"` - NotifyOnlyDefaultBranch BoolValue `json:"notify_only_default_branch"` - BranchesToBeNotified string `json:"branches_to_be_notified"` - AlertChannel string `json:"alert_channel"` - ConfidentialIssueChannel string `json:"confidential_issue_channel"` - ConfidentialNoteChannel string `json:"confidential_note_channel"` - DeploymentChannel string `json:"deployment_channel"` - IssueChannel string `json:"issue_channel"` - MergeRequestChannel string `json:"merge_request_channel"` - NoteChannel string `json:"note_channel"` - TagPushChannel string `json:"tag_push_channel"` - PipelineChannel string `json:"pipeline_channel"` - PushChannel string `json:"push_channel"` - VulnerabilityChannel string `json:"vulnerability_channel"` - WikiPageChannel string `json:"wiki_page_channel"` -} - -// GetSlackService gets Slack service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-slack-service-settings -func (s *ServicesService) GetSlackService(pid interface{}, options ...RequestOptionFunc) (*SlackService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetSlackServiceOptions represents the available SetSlackService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service -type SetSlackServiceOptions struct { - WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - Channel *string `url:"channel,omitempty" json:"channel,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - NotifyOnlyDefaultBranch *bool `url:"notify_only_default_branch,omitempty" json:"notify_only_default_branch,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - AlertChannel *string `url:"alert_channel,omitempty" json:"alert_channel,omitempty"` - AlertEvents *bool `url:"alert_events,omitempty" json:"alert_events,omitempty"` - ConfidentialIssueChannel *string `url:"confidential_issue_channel,omitempty" json:"confidential_issue_channel,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - ConfidentialNoteChannel *string `url:"confidential_note_channel,omitempty" json:"confidential_note_channel,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - DeploymentChannel *string `url:"deployment_channel,omitempty" json:"deployment_channel,omitempty"` - DeploymentEvents *bool `url:"deployment_events,omitempty" json:"deployment_events,omitempty"` - IssueChannel *string `url:"issue_channel,omitempty" json:"issue_channel,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - MergeRequestChannel *string `url:"merge_request_channel,omitempty" json:"merge_request_channel,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - NoteChannel *string `url:"note_channel,omitempty" json:"note_channel,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - PipelineChannel *string `url:"pipeline_channel,omitempty" json:"pipeline_channel,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - PushChannel *string `url:"push_channel,omitempty" json:"push_channel,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushChannel *string `url:"tag_push_channel,omitempty" json:"tag_push_channel,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - WikiPageChannel *string `url:"wiki_page_channel,omitempty" json:"wiki_page_channel,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` -} - -// SetSlackService sets Slack service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#edit-slack-service -func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteSlackService deletes Slack service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-slack-service -func (s *ServicesService) DeleteSlackService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/slack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// SlackSlashCommandsService represents Slack slash commands settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#slack-slash-commands -type SlackSlashCommandsService struct { - Service - Properties *SlackSlashCommandsProperties `json:"properties"` -} - -// SlackSlashCommandsProperties represents Slack slash commands specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#slack-slash-commands -type SlackSlashCommandsProperties struct { - Token string `json:"token"` -} - -// GetSlackSlashCommandsService gets Slack slash commands service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-slack-slash-command-integration-settings -func (s *ServicesService) GetSlackSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*SlackSlashCommandsService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(SlackSlashCommandsService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetSlackSlashCommandsServiceOptions represents the available SetSlackSlashCommandsService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-slack-slash-command-service -type SetSlackSlashCommandsServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty"` -} - -// SetSlackSlashCommandsService sets Slack slash commands service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/13.12/ee/api/integrations.html#createedit-slack-slash-command-service -func (s *ServicesService) SetSlackSlashCommandsService(pid interface{}, opt *SetSlackSlashCommandsServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteSlackSlashCommandsService deletes Slack slash commands service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/13.12/ee/api/integrations.html#delete-slack-slash-command-service -func (s *ServicesService) DeleteSlackSlashCommandsService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/slack-slash-commands", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// TelegramService represents Telegram service settings. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#telegram -type TelegramService struct { - Service - Properties *TelegramServiceProperties `json:"properties"` -} - -// TelegramServiceProperties represents Telegram specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram -type TelegramServiceProperties struct { - Room string `json:"room"` - NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"` - BranchesToBeNotified string `json:"branches_to_be_notified"` -} - -// GetTelegramService gets MicrosoftTeams service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-telegram-settings -func (s *ServicesService) GetTelegramService(pid interface{}, options ...RequestOptionFunc) (*TelegramService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(TelegramService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetTelegramServiceOptions represents the available SetTelegramService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram -type SetTelegramServiceOptions struct { - Token *string `url:"token,omitempty" json:"token,omitempty"` - Room *string `url:"room,omitempty" json:"room,omitempty"` - NotifyOnlyBrokenPipelines *bool `url:"notify_only_broken_pipelines,omitempty" json:"notify_only_broken_pipelines,omitempty"` - BranchesToBeNotified *string `url:"branches_to_be_notified,omitempty" json:"branches_to_be_notified,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - IssuesEvents *bool `url:"issues_events,omitempty" json:"issues_events,omitempty"` - ConfidentialIssuesEvents *bool `url:"confidential_issues_events,omitempty" json:"confidential_issues_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"` - ConfidentialNoteEvents *bool `url:"confidential_note_events,omitempty" json:"confidential_note_events,omitempty"` - PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"` - WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"` -} - -// SetTelegramService sets Telegram service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#set-up-telegram -func (s *ServicesService) SetTelegramService(pid interface{}, opt *SetTelegramServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteTelegramService deletes Telegram service for project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#disable-telegram -func (s *ServicesService) DeleteTelegramService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/telegram", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// YouTrackService represents YouTrack service settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#youtrack -type YouTrackService struct { - Service - Properties *YouTrackServiceProperties `json:"properties"` -} - -// YouTrackServiceProperties represents YouTrack specific properties. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#youtrack -type YouTrackServiceProperties struct { - IssuesURL string `json:"issues_url"` - ProjectURL string `json:"project_url"` - Description string `json:"description"` - PushEvents bool `json:"push_events"` -} - -// GetYouTrackService gets YouTrack service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#get-youtrack-service-settings -func (s *ServicesService) GetYouTrackService(pid interface{}, options ...RequestOptionFunc) (*YouTrackService, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - svc := new(YouTrackService) - resp, err := s.client.Do(req, svc) - if err != nil { - return nil, resp, err - } - - return svc, resp, nil -} - -// SetYouTrackServiceOptions represents the available SetYouTrackService() -// options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service -type SetYouTrackServiceOptions struct { - IssuesURL *string `url:"issues_url,omitempty" json:"issues_url,omitempty"` - ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` -} - -// SetYouTrackService sets YouTrack service for a project -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#createedit-youtrack-service -func (s *ServicesService) SetYouTrackService(pid interface{}, opt *SetYouTrackServiceOptions, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteYouTrackService deletes YouTrack service settings for a project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/integrations.html#delete-youtrack-service -func (s *ServicesService) DeleteYouTrackService(pid interface{}, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/services/youtrack", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go deleted file mode 100644 index f4d67a4f04..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/settings.go +++ /dev/null @@ -1,965 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "net/http" - "time" -) - -// SettingsService handles communication with the application SettingsService -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html -type SettingsService struct { - client *Client -} - -// Settings represents the GitLab application settings. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/settings.html -// -// The available parameters have been modeled directly after the code, as the -// documentation seems to be inaccurate. -// -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/settings.rb -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/entities/application_setting.rb#L5 -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/app/helpers/application_settings_helper.rb#L192 -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 -// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 -type Settings struct { - ID int `json:"id"` - AbuseNotificationEmail string `json:"abuse_notification_email"` - AdminMode bool `json:"admin_mode"` - AfterSignOutPath string `json:"after_sign_out_path"` - AfterSignUpText string `json:"after_sign_up_text"` - AkismetAPIKey string `json:"akismet_api_key"` - AkismetEnabled bool `json:"akismet_enabled"` - AllowAccountDeletion bool `json:"allow_account_deletion"` - AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` - AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` - AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` - AllowProjectCreationForGuestAndBelow bool `json:"allow_project_creation_for_guest_and_below"` - AllowRunnerRegistrationToken bool `json:"allow_runner_registration_token"` - ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` - ASCIIDocMaxIncludes int `json:"asciidoc_max_includes"` - AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` - AssetProxyEnabled bool `json:"asset_proxy_enabled"` - AssetProxyURL string `json:"asset_proxy_url"` - AssetProxySecretKey string `json:"asset_proxy_secret_key"` - AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` - AutoBanUserOnExcessiveProjectsDownload bool `json:"auto_ban_user_on_excessive_projects_download"` - AutoDevOpsDomain string `json:"auto_devops_domain"` - AutoDevOpsEnabled bool `json:"auto_devops_enabled"` - AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` - BulkImportConcurrentPipelineBatchLimit int `json:"bulk_import_concurrent_pipeline_batch_limit"` - BulkImportEnabled bool `json:"bulk_import_enabled"` - BulkImportMaxDownloadFileSize int `json:"bulk_import_max_download_file_size"` - CanCreateGroup bool `json:"can_create_group"` - CheckNamespacePlan bool `json:"check_namespace_plan"` - CIMaxIncludes int `json:"ci_max_includes"` - CIMaxTotalYAMLSizeBytes int `json:"ci_max_total_yaml_size_bytes"` - CommitEmailHostname string `json:"commit_email_hostname"` - ConcurrentBitbucketImportJobsLimit int `json:"concurrent_bitbucket_import_jobs_limit"` - ConcurrentBitbucketServerImportJobsLimit int `json:"concurrent_bitbucket_server_import_jobs_limit"` - ConcurrentGitHubImportJobsLimit int `json:"concurrent_github_import_jobs_limit"` - ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` - ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` - ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` - ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` - ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` - ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` - ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` - ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` - ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` - ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` - ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` - ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` - CreatedAt *time.Time `json:"created_at"` - CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` - DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` - DSAKeyRestriction int `json:"dsa_key_restriction"` - DeactivateDormantUsers bool `json:"deactivate_dormant_users"` - DeactivateDormantUsersPeriod int `json:"deactivate_dormant_users_period"` - DecompressArchiveFileTimeout int `json:"decompress_archive_file_timeout"` - DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` - DefaultBranchName string `json:"default_branch_name"` - DefaultBranchProtection int `json:"default_branch_protection"` - DefaultBranchProtectionDefaults BranchProtectionDefaults `json:"default_branch_protection_defaults,omitempty"` - DefaultCiConfigPath string `json:"default_ci_config_path"` - DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` - DefaultPreferredLanguage string `json:"default_preferred_language"` - DefaultProjectCreation int `json:"default_project_creation"` - DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` - DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` - DefaultProjectsLimit int `json:"default_projects_limit"` - DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` - DefaultSyntaxHighlightingTheme int `json:"default_syntax_highlighting_theme"` - DelayedGroupDeletion bool `json:"delayed_group_deletion"` - DelayedProjectDeletion bool `json:"delayed_project_deletion"` - DeleteInactiveProjects bool `json:"delete_inactive_projects"` - DeleteUnconfirmedUsers bool `json:"delete_unconfirmed_users"` - DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` - DiagramsnetEnabled bool `json:"diagramsnet_enabled"` - DiagramsnetURL string `json:"diagramsnet_url"` - DiffMaxFiles int `json:"diff_max_files"` - DiffMaxLines int `json:"diff_max_lines"` - DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` - DisableAdminOAuthScopes bool `json:"disable_admin_oauth_scopes"` - DisableFeedToken bool `json:"disable_feed_token"` - DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` - DisablePersonalAccessTokens bool `json:"disable_personal_access_tokens"` - DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` - DomainAllowlist []string `json:"domain_allowlist"` - DomainDenylist []string `json:"domain_denylist"` - DomainDenylistEnabled bool `json:"domain_denylist_enabled"` - DownstreamPipelineTriggerLimitPerProjectUserSHA int `json:"downstream_pipeline_trigger_limit_per_project_user_sha"` - DuoFeaturesEnabled bool `json:"duo_features_enabled"` - ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` - ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` - EKSAccessKeyID string `json:"eks_access_key_id"` - EKSAccountID string `json:"eks_account_id"` - EKSIntegrationEnabled bool `json:"eks_integration_enabled"` - EKSSecretAccessKey string `json:"eks_secret_access_key"` - Ed25519KeyRestriction int `json:"ed25519_key_restriction"` - Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` - ElasticsearchAWS bool `json:"elasticsearch_aws"` - ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` - ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` - ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` - ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` - ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` - ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` - ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` - ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` - ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` - ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` - ElasticsearchIndexing bool `json:"elasticsearch_indexing"` - ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` - ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` - ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` - ElasticsearchMaxCodeIndexingConcurrency int `json:"elasticsearch_max_code_indexing_concurrency"` - ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` - ElasticsearchPassword string `json:"elasticsearch_password"` - ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` - ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` - ElasticsearchReplicas int `json:"elasticsearch_replicas"` - ElasticsearchRequeueWorkers bool `json:"elasticsearch_requeue_workers"` - ElasticsearchSearch bool `json:"elasticsearch_search"` - ElasticsearchShards int `json:"elasticsearch_shards"` - ElasticsearchURL []string `json:"elasticsearch_url"` - ElasticsearchUsername string `json:"elasticsearch_username"` - ElasticsearchWorkerNumberOfShards int `json:"elasticsearch_worker_number_of_shards"` - EmailAdditionalText string `json:"email_additional_text"` - EmailAuthorInBody bool `json:"email_author_in_body"` - EmailConfirmationSetting string `json:"email_confirmation_setting"` - EmailRestrictions string `json:"email_restrictions"` - EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` - EnableArtifactExternalRedirectWarningPage bool `json:"enable_artifact_external_redirect_warning_page"` - EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` - EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` - EnforcePATExpiration bool `json:"enforce_pat_expiration"` - EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` - EnforceTerms bool `json:"enforce_terms"` - ExternalAuthClientCert string `json:"external_auth_client_cert"` - ExternalAuthClientKey string `json:"external_auth_client_key"` - ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` - ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` - ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` - ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` - ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` - ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` - ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` - ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` - FailedLoginAttemptsUnlockPeriodInMinutes int `json:"failed_login_attempts_unlock_period_in_minutes"` - FileTemplateProjectID int `json:"file_template_project_id"` - FirstDayOfWeek int `json:"first_day_of_week"` - FlocEnabled bool `json:"floc_enabled"` - GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` - GeoStatusTimeout int `json:"geo_status_timeout"` - GitRateLimitUsersAlertlist []string `json:"git_rate_limit_users_alertlist"` - GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` - GitalyTimeoutDefault int `json:"gitaly_timeout_default"` - GitalyTimeoutFast int `json:"gitaly_timeout_fast"` - GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` - GitlabDedicatedInstance bool `json:"gitlab_dedicated_instance"` - GitlabEnvironmentToolkitInstance bool `json:"gitlab_environment_toolkit_instance"` - GitlabShellOperationLimit int `json:"gitlab_shell_operation_limit"` - GitpodEnabled bool `json:"gitpod_enabled"` - GitpodURL string `json:"gitpod_url"` - GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` - GloballyAllowedIPs string `json:"globally_allowed_ips"` - GrafanaEnabled bool `json:"grafana_enabled"` - GrafanaURL string `json:"grafana_url"` - GravatarEnabled bool `json:"gravatar_enabled"` - GroupDownloadExportLimit int `json:"group_download_export_limit"` - GroupExportLimit int `json:"group_export_limit"` - GroupImportLimit int `json:"group_import_limit"` - GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` - GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` - HTMLEmailsEnabled bool `json:"html_emails_enabled"` - HashedStorageEnabled bool `json:"hashed_storage_enabled"` - HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` - HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` - HelpPageSupportURL string `json:"help_page_support_url"` - HelpPageText string `json:"help_page_text"` - HelpText string `json:"help_text"` - HideThirdPartyOffers bool `json:"hide_third_party_offers"` - HomePageURL string `json:"home_page_url"` - HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` - HousekeepingEnabled bool `json:"housekeeping_enabled"` - HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` - HousekeepingGcPeriod int `json:"housekeeping_gc_period"` - HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` - HousekeepingOptimizeRepositoryPeriod int `json:"housekeeping_optimize_repository_period"` - ImportSources []string `json:"import_sources"` - InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` - InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` - InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` - IncludeOptionalMetricsInServicePing bool `json:"include_optional_metrics_in_service_ping"` - InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` - InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` - IssuesCreateLimit int `json:"issues_create_limit"` - JiraConnectApplicationKey string `json:"jira_connect_application_key"` - JiraConnectPublicKeyStorageEnabled bool `json:"jira_connect_public_key_storage_enabled"` - JiraConnectProxyURL string `json:"jira_connect_proxy_url"` - KeepLatestArtifact bool `json:"keep_latest_artifact"` - KrokiEnabled bool `json:"kroki_enabled"` - KrokiFormats map[string]bool `json:"kroki_formats"` - KrokiURL string `json:"kroki_url"` - LocalMarkdownVersion int `json:"local_markdown_version"` - LockDuoFeaturesEnabled bool `json:"lock_duo_features_enabled"` - LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` - LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` - MailgunEventsEnabled bool `json:"mailgun_events_enabled"` - MailgunSigningKey string `json:"mailgun_signing_key"` - MaintenanceMode bool `json:"maintenance_mode"` - MaintenanceModeMessage string `json:"maintenance_mode_message"` - MavenPackageRequestsForwarding bool `json:"maven_package_requests_forwarding"` - MaxArtifactsSize int `json:"max_artifacts_size"` - MaxAttachmentSize int `json:"max_attachment_size"` - MaxDecompressedArchiveSize int `json:"max_decompressed_archive_size"` - MaxExportSize int `json:"max_export_size"` - MaxImportRemoteFileSize int `json:"max_import_remote_file_size"` - MaxImportSize int `json:"max_import_size"` - MaxLoginAttempts int `json:"max_login_attempts"` - MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` - MaxPagesSize int `json:"max_pages_size"` - MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` - MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` - MaxTerraformStateSizeBytes int `json:"max_terraform_state_size_bytes"` - MaxYAMLDepth int `json:"max_yaml_depth"` - MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` - MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` - MinimumPasswordLength int `json:"minimum_password_length"` - MirrorAvailable bool `json:"mirror_available"` - MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` - MirrorMaxCapacity int `json:"mirror_max_capacity"` - MirrorMaxDelay int `json:"mirror_max_delay"` - NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` - NotesCreateLimit int `json:"notes_create_limit"` - NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` - NugetSkipMetadataURLValidation bool `json:"nuget_skip_metadata_url_validation"` - OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` - OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` - PackageMetadataPURLTypes []int `json:"package_metadata_purl_types"` - PackageRegistryAllowAnyoneToPullOption bool `json:"package_registry_allow_anyone_to_pull_option"` - PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` - PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` - PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` - PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` - PasswordNumberRequired bool `json:"password_number_required"` - PasswordSymbolRequired bool `json:"password_symbol_required"` - PasswordUppercaseRequired bool `json:"password_uppercase_required"` - PasswordLowercaseRequired bool `json:"password_lowercase_required"` - PerformanceBarAllowedGroupID int `json:"performance_bar_allowed_group_id"` - PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` - PerformanceBarEnabled bool `json:"performance_bar_enabled"` - PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` - PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` - PlantumlEnabled bool `json:"plantuml_enabled"` - PlantumlURL string `json:"plantuml_url"` - PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` - PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` - PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` - ProjectDownloadExportLimit int `json:"project_download_export_limit"` - ProjectExportEnabled bool `json:"project_export_enabled"` - ProjectExportLimit int `json:"project_export_limit"` - ProjectImportLimit int `json:"project_import_limit"` - ProjectJobsAPIRateLimit int `json:"project_jobs_api_rate_limit"` - ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` - ProjectsAPIRateLimitUnauthenticated int `json:"projects_api_rate_limit_unauthenticated"` - PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` - ProtectedCIVariables bool `json:"protected_ci_variables"` - PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` - PushEventActivitiesLimit int `json:"push_event_activities_limit"` - PushEventHooksLimit int `json:"push_event_hooks_limit"` - PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` - RSAKeyRestriction int `json:"rsa_key_restriction"` - RateLimitingResponseText string `json:"rate_limiting_response_text"` - RawBlobRequestLimit int `json:"raw_blob_request_limit"` - RecaptchaEnabled bool `json:"recaptcha_enabled"` - RecaptchaPrivateKey string `json:"recaptcha_private_key"` - RecaptchaSiteKey string `json:"recaptcha_site_key"` - ReceiveMaxInputSize int `json:"receive_max_input_size"` - ReceptiveClusterAgentsEnabled bool `json:"receptive_cluster_agents_enabled"` - RememberMeEnabled bool `json:"remember_me_enabled"` - RepositoryChecksEnabled bool `json:"repository_checks_enabled"` - RepositorySizeLimit int `json:"repository_size_limit"` - RepositoryStorages []string `json:"repository_storages"` - RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` - RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` - RequireAdminTwoFactorAuthentication bool `json:"require_admin_two_factor_authentication"` - RequirePersonalAccessTokenExpiry bool `json:"require_personal_access_token_expiry"` - RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` - RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` - RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` - SearchRateLimit int `json:"search_rate_limit"` - SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` - SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` - SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` - SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` - SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` - SecurityApprovalPoliciesLimit int `json:"security_approval_policies_limit"` - SecurityPolicyGlobalGroupApproversEnabled bool `json:"security_policy_global_group_approvers_enabled"` - SecurityTXTContent string `json:"security_txt_content"` - SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` - SentryClientsideDSN string `json:"sentry_clientside_dsn"` - SentryDSN string `json:"sentry_dsn"` - SentryEnabled bool `json:"sentry_enabled"` - SentryEnvironment string `json:"sentry_environment"` - ServiceAccessTokensExpirationEnforced bool `json:"service_access_tokens_expiration_enforced"` - SessionExpireDelay int `json:"session_expire_delay"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - SharedRunnersMinutes int `json:"shared_runners_minutes"` - SharedRunnersText string `json:"shared_runners_text"` - SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` - SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` - SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` - SignInText string `json:"sign_in_text"` - SignupEnabled bool `json:"signup_enabled"` - SilentAdminExportsEnabled bool `json:"silent_admin_exports_enabled"` - SilentModeEnabled bool `json:"silent_mode_enabled"` - SlackAppEnabled bool `json:"slack_app_enabled"` - SlackAppID string `json:"slack_app_id"` - SlackAppSecret string `json:"slack_app_secret"` - SlackAppSigningSecret string `json:"slack_app_signing_secret"` - SlackAppVerificationToken string `json:"slack_app_verification_token"` - SnippetSizeLimit int `json:"snippet_size_limit"` - SnowplowAppID string `json:"snowplow_app_id"` - SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` - SnowplowCookieDomain string `json:"snowplow_cookie_domain"` - SnowplowDatabaseCollectorHostname string `json:"snowplow_database_collector_hostname"` - SnowplowEnabled bool `json:"snowplow_enabled"` - SourcegraphEnabled bool `json:"sourcegraph_enabled"` - SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` - SourcegraphURL string `json:"sourcegraph_url"` - SpamCheckAPIKey string `json:"spam_check_api_key"` - SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` - SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` - StaticObjectsExternalStorageAuthToken string `json:"static_objects_external_storage_auth_token"` - StaticObjectsExternalStorageURL string `json:"static_objects_external_storage_url"` - SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` - TerminalMaxSessionTime int `json:"terminal_max_session_time"` - Terms string `json:"terms"` - ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` - ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` - ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` - ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` - ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` - ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` - ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` - ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` - ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` - ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` - ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` - ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` - ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` - ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` - ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` - ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` - ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` - ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` - ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` - ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` - ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` - ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` - ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` - ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` - ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` - ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` - ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` - TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - UnconfirmedUsersDeleteAfterDays int `json:"unconfirmed_users_delete_after_days"` - UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` - UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` - UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` - UpdateRunnerVersionsEnabled bool `json:"update_runner_versions_enabled"` - UpdatedAt *time.Time `json:"updated_at"` - UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` - UsagePingEnabled bool `json:"usage_ping_enabled"` - UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` - UseClickhouseForAnalytics bool `json:"use_clickhouse_for_analytics"` - UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` - UserDefaultExternal bool `json:"user_default_external"` - UserDefaultInternalRegex string `json:"user_default_internal_regex"` - UserDefaultsToPrivateProfile bool `json:"user_defaults_to_private_profile"` - UserOauthApplications bool `json:"user_oauth_applications"` - UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` - UsersGetByIDLimit int `json:"users_get_by_id_limit"` - UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` - ValidRunnerRegistrars []string `json:"valid_runner_registrars"` - VersionCheckEnabled bool `json:"version_check_enabled"` - WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` - WhatsNewVariant string `json:"whats_new_variant"` - WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` - - // Deprecated: Use AbuseNotificationEmail instead. - AdminNotificationEmail string `json:"admin_notification_email"` - // Deprecated: Use AllowLocalRequestsFromWebHooksAndServices instead. - AllowLocalRequestsFromHooksAndServices bool `json:"allow_local_requests_from_hooks_and_services"` - // Deprecated: Use AssetProxyAllowlist instead. - AssetProxyWhitelist []string `json:"asset_proxy_whitelist"` - // Deprecated: Use ThrottleUnauthenticatedWebEnabled or ThrottleUnauthenticatedAPIEnabled instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` - // Deprecated: Use ThrottleUnauthenticatedWebPeriodInSeconds or ThrottleUnauthenticatedAPIPeriodInSeconds instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` - // Deprecated: Use ThrottleUnauthenticatedWebRequestsPerPeriod or ThrottleUnauthenticatedAPIRequestsPerPeriod instead. (Deprecated in GitLab 14.3) - ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` - // Deprecated: Replaced by SearchRateLimit in GitLab 14.9 (removed in 15.0). - UserEmailLookupLimit int `json:"user_email_lookup_limit"` -} - -// BranchProtectionDefaults represents default Git protected branch permissions. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults -type BranchProtectionDefaults struct { - AllowedToPush []int `json:"allowed_to_push,omitempty"` - AllowForcePush bool `json:"allow_force_push,omitempty"` - AllowedToMerge []int `json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush bool `json:"developer_can_initial_push,omitempty"` -} - -// Settings requires a custom unmarshaller in order to properly unmarshal -// `container_registry_import_created_before` which is either a time.Time or -// an empty string if no value is set. -func (s *Settings) UnmarshalJSON(data []byte) error { - type Alias Settings - - raw := make(map[string]interface{}) - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - - // If empty string, remove the value to leave it nil in the response. - if v, ok := raw["container_registry_import_created_before"]; ok && v == "" { - delete(raw, "container_registry_import_created_before") - - data, err = json.Marshal(raw) - if err != nil { - return err - } - } - - return json.Unmarshal(data, (*Alias)(s)) -} - -func (s Settings) String() string { - return Stringify(s) -} - -// GetSettings gets the current application settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/settings.html#get-current-application-settings -func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "application/settings", nil, options) - if err != nil { - return nil, nil, err - } - - as := new(Settings) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} - -// UpdateSettingsOptions represents the available UpdateSettings() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/settings.html#change-application-settings -type UpdateSettingsOptions struct { - AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` - AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` - AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` - AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` - AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` - AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` - AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` - AllowAccountDeletion *bool `url:"allow_account_deletion,omitempty" json:"allow_account_deletion,omitempty"` - AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` - AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` - AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` - AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` - AllowProjectCreationForGuestAndBelow *bool `url:"allow_project_creation_for_guest_and_below,omitempty" json:"allow_project_creation_for_guest_and_below,omitempty"` - AllowRunnerRegistrationToken *bool `url:"allow_runner_registration_token,omitempty" json:"allow_runner_registration_token,omitempty"` - ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` - ASCIIDocMaxIncludes *int `url:"asciidoc_max_includes,omitempty" json:"asciidoc_max_includes,omitempty"` - AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` - AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` - AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` - AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` - AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` - AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` - AutoBanUserOnExcessiveProjectsDownload *bool `url:"auto_ban_user_on_excessive_projects_download,omitempty" json:"auto_ban_user_on_excessive_projects_download,omitempty"` - AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` - AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` - BulkImportConcurrentPipelineBatchLimit *int `url:"bulk_import_concurrent_pipeline_batch_limit,omitempty" json:"bulk_import_concurrent_pipeline_batch_limit,omitempty"` - BulkImportEnabled *bool `url:"bulk_import_enabled,omitempty" json:"bulk_import_enabled,omitempty"` - BulkImportMaxDownloadFileSize *int `url:"bulk_import_max_download_file_size,omitempty" json:"bulk_import_max_download_file_size,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` - CIMaxIncludes *int `url:"ci_max_includes,omitempty" json:"ci_max_includes,omitempty"` - CIMaxTotalYAMLSizeBytes *int `url:"ci_max_total_yaml_size_bytes,omitempty" json:"ci_max_total_yaml_size_bytes,omitempty"` - CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` - ConcurrentBitbucketImportJobsLimit *int `url:"concurrent_bitbucket_import_jobs_limit,omitempty" json:"concurrent_bitbucket_import_jobs_limit,omitempty"` - ConcurrentBitbucketServerImportJobsLimit *int `url:"concurrent_bitbucket_server_import_jobs_limit,omitempty" json:"concurrent_bitbucket_server_import_jobs_limit,omitempty"` - ConcurrentGitHubImportJobsLimit *int `url:"concurrent_github_import_jobs_limit,omitempty" json:"concurrent_github_import_jobs_limit,omitempty"` - ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` - ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` - ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` - ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` - ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` - ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` - ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` - ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` - ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` - ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` - ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` - ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` - CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` - DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` - DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` - DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` - DeactivateDormantUsersPeriod *int `url:"deactivate_dormant_users_period,omitempty" json:"deactivate_dormant_users_period,omitempty"` - DecompressArchiveFileTimeout *int `url:"decompress_archive_file_timeout,omitempty" json:"decompress_archive_file_timeout,omitempty"` - DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` - DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` - DefaultBranchProtectionDefaults *BranchProtectionDefaultsOptions `url:"default_branch_protection_defaults,omitempty" json:"default_branch_protection_defaults,omitempty"` - DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` - DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` - DefaultPreferredLanguage *string `url:"default_preferred_language,omitempty" json:"default_preferred_language,omitempty"` - DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` - DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` - DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` - DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` - DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` - DefaultSyntaxHighlightingTheme *int `url:"default_syntax_highlighting_theme,omitempty" json:"default_syntax_highlighting_theme,omitempty"` - DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` - DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` - DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` - DeleteUnconfirmedUsers *bool `url:"delete_unconfirmed_users,omitempty" json:"delete_unconfirmed_users,omitempty"` - DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` - DiagramsnetEnabled *bool `url:"diagramsnet_enabled,omitempty" json:"diagramsnet_enabled,omitempty"` - DiagramsnetURL *string `url:"diagramsnet_url,omitempty" json:"diagramsnet_url,omitempty"` - DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` - DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` - DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` - DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` - DisableAdminOAuthScopes *bool `url:"disable_admin_oauth_scopes,omitempty" json:"disable_admin_oauth_scopes,omitempty"` - DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` - DisablePersonalAccessTokens *bool `url:"disable_personal_access_tokens,omitempty" json:"disable_personal_access_tokens,omitempty"` - DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` - DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` - DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` - DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` - DownstreamPipelineTriggerLimitPerProjectUserSHA *int `url:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty" json:"downstream_pipeline_trigger_limit_per_project_user_sha,omitempty"` - DuoFeaturesEnabled *bool `url:"duo_features_enabled,omitempty" json:"duo_features_enabled,omitempty"` - ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` - ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` - EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` - EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` - EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` - EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` - Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` - Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` - ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` - ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` - ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` - ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` - ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` - ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` - ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` - ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` - ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` - ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` - ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` - ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` - ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` - ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` - ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` - ElasticsearchMaxCodeIndexingConcurrency *int `url:"elasticsearch_max_code_indexing_concurrency,omitempty" json:"elasticsearch_max_code_indexing_concurrency,omitempty"` - ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` - ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` - ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` - ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` - ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` - ElasticsearchRequeueWorkers *bool `url:"elasticsearch_requeue_workers,omitempty" json:"elasticsearch_requeue_workers,omitempty"` - ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` - ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` - ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` - ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` - ElasticsearchWorkerNumberOfShards *int `url:"elasticsearch_worker_number_of_shards,omitempty" json:"elasticsearch_worker_number_of_shards,omitempty"` - EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` - EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` - EmailConfirmationSetting *string `url:"email_confirmation_setting,omitempty" json:"email_confirmation_setting,omitempty"` - EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` - EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` - EnableArtifactExternalRedirectWarningPage *bool `url:"enable_artifact_external_redirect_warning_page,omitempty" json:"enable_artifact_external_redirect_warning_page,omitempty"` - EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` - EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` - EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` - EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` - EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` - ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` - ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` - ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` - ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` - ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` - ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` - ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` - ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` - ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` - ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` - FailedLoginAttemptsUnlockPeriodInMinutes *int `url:"failed_login_attempts_unlock_period_in_minutes,omitempty" json:"failed_login_attempts_unlock_period_in_minutes,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` - FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` - GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` - GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` - GitRateLimitUsersAlertlist *[]string `url:"git_rate_limit_users_alertlist,omitempty" json:"git_rate_limit_users_alertlist,omitempty"` - GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` - GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` - GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` - GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` - GitlabDedicatedInstance *bool `url:"gitlab_dedicated_instance,omitempty" json:"gitlab_dedicated_instance,omitempty"` - GitlabEnvironmentToolkitInstance *bool `url:"gitlab_environment_toolkit_instance,omitempty" json:"gitlab_environment_toolkit_instance,omitempty"` - GitlabShellOperationLimit *int `url:"gitlab_shell_operation_limit,omitempty" json:"gitlab_shell_operation_limit,omitempty"` - GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` - GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` - GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` - GloballyAllowedIPs *string `url:"globally_allowed_ips,omitempty" json:"globally_allowed_ips,omitempty"` - GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` - GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` - GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` - GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` - GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` - GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` - GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` - GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` - HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` - HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` - HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` - HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` - HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` - HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` - HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` - HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` - HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` - HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` - HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` - HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` - HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` - HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` - HousekeepingOptimizeRepositoryPeriod *int `url:"housekeeping_optimize_repository_period,omitempty" json:"housekeeping_optimize_repository_period,omitempty"` - ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` - InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` - InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` - InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` - IncludeOptionalMetricsInServicePing *bool `url:"include_optional_metrics_in_service_ping,omitempty" json:"include_optional_metrics_in_service_ping,omitempty"` - InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` - InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` - IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` - JiraConnectApplicationKey *string `url:"jira_connect_application_key,omitempty" json:"jira_connect_application_key,omitempty"` - JiraConnectPublicKeyStorageEnabled *bool `url:"jira_connect_public_key_storage_enabled,omitempty" json:"jira_connect_public_key_storage_enabled,omitempty"` - JiraConnectProxyURL *string `url:"jira_connect_proxy_url,omitempty" json:"jira_connect_proxy_url,omitempty"` - KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` - KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` - KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` - KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` - LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` - LockDuoFeaturesEnabled *bool `url:"lock_duo_features_enabled,omitempty" json:"lock_duo_features_enabled,omitempty"` - LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` - LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` - MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` - MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` - MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` - MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` - MavenPackageRequestsForwarding *bool `url:"maven_package_requests_forwarding,omitempty" json:"maven_package_requests_forwarding,omitempty"` - MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` - MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` - MaxDecompressedArchiveSize *int `url:"max_decompressed_archive_size,omitempty" json:"max_decompressed_archive_size,omitempty"` - MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` - MaxImportRemoteFileSize *int `url:"max_import_remote_file_size,omitempty" json:"max_import_remote_file_size,omitempty"` - MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` - MaxLoginAttempts *int `url:"max_login_attempts,omitempty" json:"max_login_attempts,omitempty"` - MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` - MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` - MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` - MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` - MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` - MaxTerraformStateSizeBytes *int `url:"max_terraform_state_size_bytes,omitempty" json:"max_terraform_state_size_bytes,omitempty"` - MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` - MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` - MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` - MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` - MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` - MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` - MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` - MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` - NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` - NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` - NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` - NugetSkipMetadataURLValidation *bool `url:"nuget_skip_metadata_url_validation,omitempty" json:"nuget_skip_metadata_url_validation,omitempty"` - OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` - OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` - PackageMetadataPURLTypes *[]int `url:"package_metadata_purl_types,omitempty" json:"package_metadata_purl_types,omitempty"` - PackageRegistryAllowAnyoneToPullOption *bool `url:"package_registry_allow_anyone_to_pull_option,omitempty" json:"package_registry_allow_anyone_to_pull_option,omitempty"` - PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` - PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` - PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` - PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` - PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` - PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` - PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` - PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` - PerformanceBarAllowedGroupID *int `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` - PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` - PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` - PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` - PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` - PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` - PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` - PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` - PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` - PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` - ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` - ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` - ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` - ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` - ProjectJobsAPIRateLimit *int `url:"project_jobs_api_rate_limit,omitempty" json:"project_jobs_api_rate_limit,omitempty"` - ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` - ProjectsAPIRateLimitUnauthenticated *int `url:"projects_api_rate_limit_unauthenticated,omitempty" json:"projects_api_rate_limit_unauthenticated,omitempty"` - PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` - ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` - PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` - PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` - PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` - PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` - RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` - RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` - RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` - RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` - RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` - RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` - ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` - ReceptiveClusterAgentsEnabled *bool `url:"receptive_cluster_agents_enabled,omitempty" json:"receptive_cluster_agents_enabled,omitempty"` - RememberMeEnabled *bool `url:"remember_me_enabled,omitempty" json:"remember_me_enabled,omitempty"` - RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` - RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` - RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` - RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` - RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` - RequireAdminTwoFactorAuthentication *bool `url:"require_admin_two_factor_authentication,omitempty" json:"require_admin_two_factor_authentication,omitempty"` - RequirePersonalAccessTokenExpiry *bool `url:"require_personal_access_token_expiry,omitempty" json:"require_personal_access_token_expiry,omitempty"` - RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` - RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` - SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` - SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` - SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` - SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` - SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` - SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` - SecurityApprovalPoliciesLimit *int `url:"security_approval_policies_limit,omitempty" json:"security_approval_policies_limit,omitempty"` - SecurityPolicyGlobalGroupApproversEnabled *bool `url:"security_policy_global_group_approvers_enabled,omitempty" json:"security_policy_global_group_approvers_enabled,omitempty"` - SecurityTXTContent *string `url:"security_txt_content,omitempty" json:"security_txt_content,omitempty"` - SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` - SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` - SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` - SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` - SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` - ServiceAccessTokensExpirationEnforced *bool `url:"service_access_tokens_expiration_enforced,omitempty" json:"service_access_tokens_expiration_enforced,omitempty"` - SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` - SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` - SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` - SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` - SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` - SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` - SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` - SilentAdminExportsEnabled *bool `url:"silent_admin_exports_enabled,omitempty" json:"silent_admin_exports_enabled,omitempty"` - SilentModeEnabled *bool `url:"silent_mode_enabled,omitempty" json:"silent_mode_enabled,omitempty"` - SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` - SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` - SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` - SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` - SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` - SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` - SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` - SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` - SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` - SnowplowDatabaseCollectorHostname *string `url:"snowplow_database_collector_hostname,omitempty" json:"snowplow_database_collector_hostname,omitempty"` - SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` - SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` - SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` - SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` - SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` - SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` - SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` - StaticObjectsExternalStorageAuthToken *string `url:"static_objects_external_storage_auth_token,omitempty" json:"static_objects_external_storage_auth_token,omitempty"` - StaticObjectsExternalStorageURL *string `url:"static_objects_external_storage_url,omitempty" json:"static_objects_external_storage_url,omitempty"` - SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` - TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` - Terms *string `url:"terms,omitempty" json:"terms,omitempty"` - ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` - ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` - ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` - ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` - ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` - ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` - ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` - ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` - ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` - ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` - ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` - ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` - ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` - ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` - ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` - ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` - ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` - ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` - ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` - ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` - ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` - ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` - ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` - ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` - ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` - ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` - ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` - ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` - ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` - ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` - TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - UnconfirmedUsersDeleteAfterDays *int `url:"unconfirmed_users_delete_after_days,omitempty" json:"unconfirmed_users_delete_after_days,omitempty"` - UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` - UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` - UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` - UpdateRunnerVersionsEnabled *bool `url:"update_runner_versions_enabled,omitempty" json:"update_runner_versions_enabled,omitempty"` - UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` - UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` - UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` - UseClickhouseForAnalytics *bool `url:"use_clickhouse_for_analytics,omitempty" json:"use_clickhouse_for_analytics,omitempty"` - UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` - UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` - UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` - UserDefaultsToPrivateProfile *bool `url:"user_defaults_to_private_profile,omitempty" json:"user_defaults_to_private_profile,omitempty"` - UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` - UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` - UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` - UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` - UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` - ValidRunnerRegistrars *[]string `url:"valid_runner_registrars,omitempty" json:"valid_runner_registrars,omitempty"` - VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` - WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` - WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` - WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` -} - -// BranchProtectionDefaultsOptions represents default Git protected branch permissions options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-default_branch_protection_defaults -type BranchProtectionDefaultsOptions struct { - AllowedToPush *[]int `url:"allowed_to_push,omitempty" json:"allowed_to_push,omitempty"` - AllowForcePush *bool `url:"allow_force_push,omitempty" json:"allow_force_push,omitempty"` - AllowedToMerge *[]int `url:"allowed_to_merge,omitempty" json:"allowed_to_merge,omitempty"` - DeveloperCanInitialPush *bool `url:"developer_can_initial_push,omitempty" json:"developer_can_initial_push,omitempty"` -} - -// UpdateSettings updates the application settings. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/settings.html#change-application-settings -func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...RequestOptionFunc) (*Settings, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "application/settings", opt, options) - if err != nil { - return nil, nil, err - } - - as := new(Settings) - resp, err := s.client.Do(req, as) - if err != nil { - return nil, resp, err - } - - return as, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go b/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go deleted file mode 100644 index d7a7834b66..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/sidekiq_metrics.go +++ /dev/null @@ -1,157 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "net/http" - "time" -) - -// SidekiqService handles communication with the sidekiq service -// -// GitLab API docs: https://docs.gitlab.com/ee/api/sidekiq_metrics.html -type SidekiqService struct { - client *Client -} - -// QueueMetrics represents the GitLab sidekiq queue metrics. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-queue-metrics -type QueueMetrics struct { - Queues map[string]struct { - Backlog int `json:"backlog"` - Latency int `json:"latency"` - } `json:"queues"` -} - -// GetQueueMetrics lists information about all the registered queues, -// their backlog and their latency. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-queue-metrics -func (s *SidekiqService) GetQueueMetrics(options ...RequestOptionFunc) (*QueueMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/queue_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - q := new(QueueMetrics) - resp, err := s.client.Do(req, q) - if err != nil { - return nil, resp, err - } - - return q, resp, nil -} - -// ProcessMetrics represents the GitLab sidekiq process metrics. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-process-metrics -type ProcessMetrics struct { - Processes []struct { - Hostname string `json:"hostname"` - Pid int `json:"pid"` - Tag string `json:"tag"` - StartedAt *time.Time `json:"started_at"` - Queues []string `json:"queues"` - Labels []string `json:"labels"` - Concurrency int `json:"concurrency"` - Busy int `json:"busy"` - } `json:"processes"` -} - -// GetProcessMetrics lists information about all the Sidekiq workers registered -// to process your queues. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-process-metrics -func (s *SidekiqService) GetProcessMetrics(options ...RequestOptionFunc) (*ProcessMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/process_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - p := new(ProcessMetrics) - resp, err := s.client.Do(req, p) - if err != nil { - return nil, resp, err - } - - return p, resp, nil -} - -// JobStats represents the GitLab sidekiq job stats. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-job-statistics -type JobStats struct { - Jobs struct { - Processed int `json:"processed"` - Failed int `json:"failed"` - Enqueued int `json:"enqueued"` - } `json:"jobs"` -} - -// GetJobStats list information about the jobs that Sidekiq has performed. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-the-current-job-statistics -func (s *SidekiqService) GetJobStats(options ...RequestOptionFunc) (*JobStats, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/job_stats", nil, options) - if err != nil { - return nil, nil, err - } - - j := new(JobStats) - resp, err := s.client.Do(req, j) - if err != nil { - return nil, resp, err - } - - return j, resp, nil -} - -// CompoundMetrics represents the GitLab sidekiq compounded stats. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-a-compound-response-of-all-the-previously-mentioned-metrics -type CompoundMetrics struct { - QueueMetrics - ProcessMetrics - JobStats -} - -// GetCompoundMetrics lists all the currently available information about Sidekiq. -// Get a compound response of all the previously mentioned metrics -// -// GitLab API docs: https://docs.gitlab.com/ee/api/sidekiq_metrics.html#get-a-compound-response-of-all-the-previously-mentioned-metrics -func (s *SidekiqService) GetCompoundMetrics(options ...RequestOptionFunc) (*CompoundMetrics, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "/sidekiq/compound_metrics", nil, options) - if err != nil { - return nil, nil, err - } - - c := new(CompoundMetrics) - resp, err := s.client.Do(req, c) - if err != nil { - return nil, resp, err - } - - return c, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go b/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go deleted file mode 100644 index 00761ec2ed..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/snippet_repository_storage_move.go +++ /dev/null @@ -1,203 +0,0 @@ -// -// Copyright 2023, Nick Westbury -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// SnippetRepositoryStorageMoveService handles communication with the -// snippets related methods of the GitLab API. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html -type SnippetRepositoryStorageMoveService struct { - client *Client -} - -// SnippetRepositoryStorageMove represents the status of a repository move. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html -type SnippetRepositoryStorageMove struct { - ID int `json:"id"` - CreatedAt *time.Time `json:"created_at"` - State string `json:"state"` - SourceStorageName string `json:"source_storage_name"` - DestinationStorageName string `json:"destination_storage_name"` - Snippet *RepositorySnippet `json:"snippet"` -} - -type RepositorySnippet struct { - ID int `json:"id"` - Title string `json:"title"` - Description string `json:"description"` - Visibility VisibilityValue `json:"visibility"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` - ProjectID int `json:"project_id"` - WebURL string `json:"web_url"` - RawURL string `json:"raw_url"` - SSHURLToRepo string `json:"ssh_url_to_repo"` - HTTPURLToRepo string `json:"http_url_to_repo"` -} - -// RetrieveAllSnippetStorageMovesOptions represents the available -// RetrieveAllStorageMoves() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet -type RetrieveAllSnippetStorageMovesOptions ListOptions - -// RetrieveAllStorageMoves retrieves all snippet repository storage moves -// accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet -func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMoves(opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippet_repository_storage_moves", opts, options) - if err != nil { - return nil, nil, err - } - - var ssms []*SnippetRepositoryStorageMove - resp, err := s.client.Do(req, &ssms) - if err != nil { - return nil, resp, err - } - - return ssms, resp, err -} - -// RetrieveAllStorageMovesForSnippet retrieves all repository storage moves for -// a single snippet accessible by the authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#retrieve-all-repository-storage-moves-for-a-snippet -func (s SnippetRepositoryStorageMoveService) RetrieveAllStorageMovesForSnippet(snippet int, opts RetrieveAllSnippetStorageMovesOptions, options ...RequestOptionFunc) ([]*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, opts, options) - if err != nil { - return nil, nil, err - } - - var ssms []*SnippetRepositoryStorageMove - resp, err := s.client.Do(req, &ssms) - if err != nil { - return nil, resp, err - } - - return ssms, resp, err -} - -// GetStorageMove gets a single snippet repository storage move. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#get-a-single-snippet-repository-storage-move -func (s SnippetRepositoryStorageMoveService) GetStorageMove(repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippet_repository_storage_moves/%d", repositoryStorage) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ssm := new(SnippetRepositoryStorageMove) - resp, err := s.client.Do(req, ssm) - if err != nil { - return nil, resp, err - } - - return ssm, resp, err -} - -// GetStorageMoveForSnippet gets a single repository storage move for a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#get-a-single-repository-storage-move-for-a-snippet -func (s SnippetRepositoryStorageMoveService) GetStorageMoveForSnippet(snippet int, repositoryStorage int, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippets/%d/repository_storage_moves/%d", snippet, repositoryStorage) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ssm := new(SnippetRepositoryStorageMove) - resp, err := s.client.Do(req, ssm) - if err != nil { - return nil, resp, err - } - - return ssm, resp, err -} - -// ScheduleStorageMoveForSnippetOptions represents the available -// ScheduleStorageMoveForSnippet() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-snippet -type ScheduleStorageMoveForSnippetOptions struct { - DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` -} - -// ScheduleStorageMoveForSnippet schedule a repository to be moved for a snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-a-repository-storage-move-for-a-snippet -func (s SnippetRepositoryStorageMoveService) ScheduleStorageMoveForSnippet(snippet int, opts ScheduleStorageMoveForSnippetOptions, options ...RequestOptionFunc) (*SnippetRepositoryStorageMove, *Response, error) { - u := fmt.Sprintf("snippets/%d/repository_storage_moves", snippet) - - req, err := s.client.NewRequest(http.MethodPost, u, opts, options) - if err != nil { - return nil, nil, err - } - - ssm := new(SnippetRepositoryStorageMove) - resp, err := s.client.Do(req, ssm) - if err != nil { - return nil, resp, err - } - - return ssm, resp, err -} - -// ScheduleAllSnippetStorageMovesOptions represents the available -// ScheduleAllStorageMoves() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard -type ScheduleAllSnippetStorageMovesOptions struct { - SourceStorageName *string `url:"source_storage_name,omitempty" json:"source_storage_name,omitempty"` - DestinationStorageName *string `url:"destination_storage_name,omitempty" json:"destination_storage_name,omitempty"` -} - -// ScheduleAllStorageMoves schedules all snippet repositories to be moved. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippet_repository_storage_moves.html#schedule-repository-storage-moves-for-all-snippets-on-a-storage-shard -func (s SnippetRepositoryStorageMoveService) ScheduleAllStorageMoves(opts ScheduleAllSnippetStorageMovesOptions, options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "snippet_repository_storage_moves", opts, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/snippets.go b/vendor/github.com/xanzy/go-gitlab/snippets.go deleted file mode 100644 index 3cb482773c..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/snippets.go +++ /dev/null @@ -1,314 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "net/http" - "time" -) - -// SnippetsService handles communication with the snippets -// related methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/snippets.html -type SnippetsService struct { - client *Client -} - -// Snippet represents a GitLab snippet. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/snippets.html -type Snippet struct { - ID int `json:"id"` - Title string `json:"title"` - FileName string `json:"file_name"` - Description string `json:"description"` - Visibility string `json:"visibility"` - Author struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - } `json:"author"` - UpdatedAt *time.Time `json:"updated_at"` - CreatedAt *time.Time `json:"created_at"` - ProjectID int `json:"project_id"` - WebURL string `json:"web_url"` - RawURL string `json:"raw_url"` - Files []struct { - Path string `json:"path"` - RawURL string `json:"raw_url"` - } `json:"files"` - RepositoryStorage string `json:"repository_storage"` -} - -func (s Snippet) String() string { - return Stringify(s) -} - -// ListSnippetsOptions represents the available ListSnippets() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets-for-a-user -type ListSnippetsOptions ListOptions - -// ListSnippets gets a list of snippets. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets-for-a-user -func (s *SnippetsService) ListSnippets(opt *ListSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippets", opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// GetSnippet gets a single snippet -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#get-a-single-snippet -func (s *SnippetsService) GetSnippet(snippet int, options ...RequestOptionFunc) (*Snippet, *Response, error) { - u := fmt.Sprintf("snippets/%d", snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// SnippetContent gets a single snippet’s raw contents. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#single-snippet-contents -func (s *SnippetsService) SnippetContent(snippet int, options ...RequestOptionFunc) ([]byte, *Response, error) { - u := fmt.Sprintf("snippets/%d/raw", snippet) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} - -// SnippetFileContent returns the raw file content as plain text. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#snippet-repository-file-content -func (s *SnippetsService) SnippetFileContent(snippet int, ref, filename string, options ...RequestOptionFunc) ([]byte, *Response, error) { - filepath := PathEscape(filename) - u := fmt.Sprintf("snippets/%d/files/%s/%s/raw", snippet, ref, filepath) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var b bytes.Buffer - resp, err := s.client.Do(req, &b) - if err != nil { - return nil, resp, err - } - - return b.Bytes(), resp, err -} - -// CreateSnippetFileOptions represents the create snippet file options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#create-new-snippet -type CreateSnippetFileOptions struct { - FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` -} - -// CreateSnippetOptions represents the available CreateSnippet() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#create-new-snippet -type CreateSnippetOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - Files *[]*CreateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` -} - -// CreateSnippet creates a new snippet. The user must have permission -// to create new snippets. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#create-new-snippet -func (s *SnippetsService) CreateSnippet(opt *CreateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "snippets", opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// UpdateSnippetFileOptions represents the update snippet file options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#update-snippet -type UpdateSnippetFileOptions struct { - Action *string `url:"action,omitempty" json:"action,omitempty"` - FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - PreviousPath *string `url:"previous_path,omitempty" json:"previous_path,omitempty"` -} - -// UpdateSnippetOptions represents the available UpdateSnippet() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#update-snippet -type UpdateSnippetOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Content *string `url:"content,omitempty" json:"content,omitempty"` - Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"` - Files *[]*UpdateSnippetFileOptions `url:"files,omitempty" json:"files,omitempty"` -} - -// UpdateSnippet updates an existing snippet. The user must have -// permission to change an existing snippet. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#update-snippet -func (s *SnippetsService) UpdateSnippet(snippet int, opt *UpdateSnippetOptions, options ...RequestOptionFunc) (*Snippet, *Response, error) { - u := fmt.Sprintf("snippets/%d", snippet) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - ps := new(Snippet) - resp, err := s.client.Do(req, ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// DeleteSnippet deletes an existing snippet. This is an idempotent -// function and deleting a non-existent snippet still returns a 200 OK status -// code. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#delete-snippet -func (s *SnippetsService) DeleteSnippet(snippet int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("snippets/%d", snippet) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ExploreSnippetsOptions represents the available ExploreSnippets() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#list-all-public-snippets -type ExploreSnippetsOptions ListOptions - -// ExploreSnippets gets the list of public snippets. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#list-all-public-snippets -func (s *SnippetsService) ExploreSnippets(opt *ExploreSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippets/public", opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} - -// ListAllSnippetsOptions represents the available ListAllSnippets() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets -type ListAllSnippetsOptions struct { - ListOptions - CreatedAfter *ISOTime `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *ISOTime `url:"created_before,omitempty" json:"created_before,omitempty"` - RepositoryStorage *string `url:"repository_storage,omitempty" json:"repository_storage,omitempty"` -} - -// ListAllSnippets gets all snippets the current user has access to. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/snippets.html#list-all-snippets -func (s *SnippetsService) ListAllSnippets(opt *ListAllSnippetsOptions, options ...RequestOptionFunc) ([]*Snippet, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "snippets/all", opt, options) - if err != nil { - return nil, nil, err - } - - var ps []*Snippet - resp, err := s.client.Do(req, &ps) - if err != nil { - return nil, resp, err - } - - return ps, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/strings.go b/vendor/github.com/xanzy/go-gitlab/strings.go deleted file mode 100644 index efbd10ffd5..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/strings.go +++ /dev/null @@ -1,93 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "fmt" - "reflect" -) - -// Stringify attempts to create a reasonable string representation of types in -// the Gitlab library. It does things like resolve pointers to their values -// and omits struct fields with nil values. -func Stringify(message interface{}) string { - var buf bytes.Buffer - v := reflect.ValueOf(message) - stringifyValue(&buf, v) - return buf.String() -} - -// stringifyValue was heavily inspired by the goprotobuf library. -func stringifyValue(buf *bytes.Buffer, val reflect.Value) { - if val.Kind() == reflect.Ptr && val.IsNil() { - buf.WriteString("") - return - } - - v := reflect.Indirect(val) - - switch v.Kind() { - case reflect.String: - fmt.Fprintf(buf, `"%s"`, v) - case reflect.Slice: - buf.WriteByte('[') - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteByte(' ') - } - - stringifyValue(buf, v.Index(i)) - } - - buf.WriteByte(']') - return - case reflect.Struct: - if v.Type().Name() != "" { - buf.WriteString(v.Type().String()) - } - - buf.WriteByte('{') - - var sep bool - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - if fv.Kind() == reflect.Ptr && fv.IsNil() { - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - continue - } - - if sep { - buf.WriteString(", ") - } else { - sep = true - } - - buf.WriteString(v.Type().Field(i).Name) - buf.WriteByte(':') - stringifyValue(buf, fv) - } - - buf.WriteByte('}') - default: - if v.CanInterface() { - fmt.Fprint(buf, v.Interface()) - } - } -} diff --git a/vendor/github.com/xanzy/go-gitlab/system_hooks.go b/vendor/github.com/xanzy/go-gitlab/system_hooks.go deleted file mode 100644 index 1f151fa930..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/system_hooks.go +++ /dev/null @@ -1,176 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// SystemHooksService handles communication with the system hooks related -// methods of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/system_hooks.html -type SystemHooksService struct { - client *Client -} - -// Hook represents a GitLap system hook. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/system_hooks.html -type Hook struct { - ID int `json:"id"` - URL string `json:"url"` - CreatedAt *time.Time `json:"created_at"` - PushEvents bool `json:"push_events"` - TagPushEvents bool `json:"tag_push_events"` - MergeRequestsEvents bool `json:"merge_requests_events"` - RepositoryUpdateEvents bool `json:"repository_update_events"` - EnableSSLVerification bool `json:"enable_ssl_verification"` -} - -func (h Hook) String() string { - return Stringify(h) -} - -// ListHooks gets a list of system hooks. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/system_hooks.html#list-system-hooks -func (s *SystemHooksService) ListHooks(options ...RequestOptionFunc) ([]*Hook, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "hooks", nil, options) - if err != nil { - return nil, nil, err - } - - var h []*Hook - resp, err := s.client.Do(req, &h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// GetHook get a single system hook. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/system_hooks.html#get-system-hook -func (s *SystemHooksService) GetHook(hook int, options ...RequestOptionFunc) (*Hook, *Response, error) { - u := fmt.Sprintf("hooks/%d", hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var h *Hook - resp, err := s.client.Do(req, &h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// AddHookOptions represents the available AddHook() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/system_hooks.html#add-new-system-hook -type AddHookOptions struct { - URL *string `url:"url,omitempty" json:"url,omitempty"` - Token *string `url:"token,omitempty" json:"token,omitempty"` - PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"` - TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"` - MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"` - RepositoryUpdateEvents *bool `url:"repository_update_events,omitempty" json:"repository_update_events,omitempty"` - EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"` -} - -// AddHook adds a new system hook hook. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/system_hooks.html#add-new-system-hook -func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...RequestOptionFunc) (*Hook, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "hooks", opt, options) - if err != nil { - return nil, nil, err - } - - h := new(Hook) - resp, err := s.client.Do(req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// HookEvent represents an event trigger by a GitLab system hook. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/system_hooks.html -type HookEvent struct { - EventName string `json:"event_name"` - Name string `json:"name"` - Path string `json:"path"` - ProjectID int `json:"project_id"` - OwnerName string `json:"owner_name"` - OwnerEmail string `json:"owner_email"` -} - -func (h HookEvent) String() string { - return Stringify(h) -} - -// TestHook tests a system hook. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/system_hooks.html#test-system-hook -func (s *SystemHooksService) TestHook(hook int, options ...RequestOptionFunc) (*HookEvent, *Response, error) { - u := fmt.Sprintf("hooks/%d", hook) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - h := new(HookEvent) - resp, err := s.client.Do(req, h) - if err != nil { - return nil, resp, err - } - - return h, resp, nil -} - -// DeleteHook deletes a system hook. This is an idempotent API function and -// returns 200 OK even if the hook is not available. If the hook is deleted it -// is also returned as JSON. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/system_hooks.html#delete-system-hook -func (s *SystemHooksService) DeleteHook(hook int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("hooks/%d", hook) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/tags.go b/vendor/github.com/xanzy/go-gitlab/tags.go deleted file mode 100644 index 18a710d185..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/tags.go +++ /dev/null @@ -1,248 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// TagsService handles communication with the tags related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/tags.html -type TagsService struct { - client *Client -} - -// Tag represents a GitLab tag. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/tags.html -type Tag struct { - Commit *Commit `json:"commit"` - Release *ReleaseNote `json:"release"` - Name string `json:"name"` - Message string `json:"message"` - Protected bool `json:"protected"` - Target string `json:"target"` -} - -// ReleaseNote represents a GitLab version release. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/tags.html -type ReleaseNote struct { - TagName string `json:"tag_name"` - Description string `json:"description"` -} - -func (t Tag) String() string { - return Stringify(t) -} - -// ListTagsOptions represents the available ListTags() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#list-project-repository-tags -type ListTagsOptions struct { - ListOptions - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` -} - -// ListTags gets a list of tags from a project, sorted by name in reverse -// alphabetical order. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#list-project-repository-tags -func (s *TagsService) ListTags(pid interface{}, opt *ListTagsOptions, options ...RequestOptionFunc) ([]*Tag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var t []*Tag - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// GetTag a specific repository tag determined by its name. It returns 200 together -// with the tag information if the tag exists. It returns 404 if the tag does not exist. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#get-a-single-repository-tag -func (s *TagsService) GetTag(pid interface{}, tag string, options ...RequestOptionFunc) (*Tag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var t *Tag - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreateTagOptions represents the available CreateTag() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#create-a-new-tag -type CreateTagOptions struct { - TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` - Message *string `url:"message,omitempty" json:"message,omitempty"` - - // Deprecated: Use the Releases API instead. (Deprecated in GitLab 11.7) - ReleaseDescription *string `url:"release_description:omitempty" json:"release_description,omitempty"` -} - -// CreateTag creates a new tag in the repository that points to the supplied ref. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#create-a-new-tag -func (s *TagsService) CreateTag(pid interface{}, opt *CreateTagOptions, options ...RequestOptionFunc) (*Tag, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(Tag) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// DeleteTag deletes a tag of a repository with given name. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#delete-a-tag -func (s *TagsService) DeleteTag(pid interface{}, tag string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// CreateReleaseNoteOptions represents the available CreateReleaseNote() options. -// -// Deprecated: This feature was deprecated in GitLab 11.7. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#create-a-new-release -type CreateReleaseNoteOptions struct { - Description *string `url:"description:omitempty" json:"description,omitempty"` -} - -// CreateReleaseNote Add release notes to the existing git tag. -// If there already exists a release for the given tag, status code 409 is returned. -// -// Deprecated: This feature was deprecated in GitLab 11.7. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#create-a-new-release -func (s *TagsService) CreateReleaseNote(pid interface{}, tag string, opt *CreateReleaseNoteOptions, options ...RequestOptionFunc) (*ReleaseNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s/release", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - r := new(ReleaseNote) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// UpdateReleaseNoteOptions represents the available UpdateReleaseNote() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#update-a-release -type UpdateReleaseNoteOptions struct { - Description *string `url:"description:omitempty" json:"description,omitempty"` -} - -// UpdateReleaseNote Updates the release notes of a given release. -// -// Deprecated: This feature was deprecated in GitLab 11.7. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/tags.html#update-a-release -func (s *TagsService) UpdateReleaseNote(pid interface{}, tag string, opt *UpdateReleaseNoteOptions, options ...RequestOptionFunc) (*ReleaseNote, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/repository/tags/%s/release", PathEscape(project), url.PathEscape(tag)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - r := new(ReleaseNote) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/time_stats.go b/vendor/github.com/xanzy/go-gitlab/time_stats.go deleted file mode 100644 index 0ce2d6751f..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/time_stats.go +++ /dev/null @@ -1,180 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// timeStatsService handles communication with the time tracking related -// methods of the GitLab API. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -type timeStatsService struct { - client *Client -} - -// TimeStats represents the time estimates and time spent for an issue. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -type TimeStats struct { - HumanTimeEstimate string `json:"human_time_estimate"` - HumanTotalTimeSpent string `json:"human_total_time_spent"` - TimeEstimate int `json:"time_estimate"` - TotalTimeSpent int `json:"total_time_spent"` -} - -func (t TimeStats) String() string { - return Stringify(t) -} - -// SetTimeEstimateOptions represents the available SetTimeEstimate() -// options. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -type SetTimeEstimateOptions struct { - Duration *string `url:"duration,omitempty" json:"duration,omitempty"` -} - -// setTimeEstimate sets the time estimate for a single project issue. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -func (s *timeStatsService) setTimeEstimate(pid interface{}, entity string, issue int, opt *SetTimeEstimateOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/time_estimate", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// resetTimeEstimate resets the time estimate for a single project issue. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -func (s *timeStatsService) resetTimeEstimate(pid interface{}, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/reset_time_estimate", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// AddSpentTimeOptions represents the available AddSpentTime() options. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -type AddSpentTimeOptions struct { - Duration *string `url:"duration,omitempty" json:"duration,omitempty"` - Summary *string `url:"summary,omitempty" json:"summary,omitempty"` -} - -// addSpentTime adds spent time for a single project issue. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -func (s *timeStatsService) addSpentTime(pid interface{}, entity string, issue int, opt *AddSpentTimeOptions, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/add_spent_time", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// resetSpentTime resets the spent time for a single project issue. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -func (s *timeStatsService) resetSpentTime(pid interface{}, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/reset_spent_time", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// getTimeSpent gets the spent time for a single project issue. -// -// GitLab docs: https://docs.gitlab.com/ee/workflow/time_tracking.html -func (s *timeStatsService) getTimeSpent(pid interface{}, entity string, issue int, options ...RequestOptionFunc) (*TimeStats, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/%s/%d/time_stats", PathEscape(project), entity, issue) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(TimeStats) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/todos.go b/vendor/github.com/xanzy/go-gitlab/todos.go deleted file mode 100644 index 2e26b70779..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/todos.go +++ /dev/null @@ -1,163 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" - "time" -) - -// TodosService handles communication with the todos related methods of -// the Gitlab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html -type TodosService struct { - client *Client -} - -// Todo represents a GitLab todo. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html -type Todo struct { - ID int `json:"id"` - Project *BasicProject `json:"project"` - Author *BasicUser `json:"author"` - ActionName TodoAction `json:"action_name"` - TargetType TodoTargetType `json:"target_type"` - Target *TodoTarget `json:"target"` - TargetURL string `json:"target_url"` - Body string `json:"body"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` -} - -func (t Todo) String() string { - return Stringify(t) -} - -// TodoTarget represents a todo target of type Issue or MergeRequest -type TodoTarget struct { - Assignees []*BasicUser `json:"assignees"` - Assignee *BasicUser `json:"assignee"` - Author *BasicUser `json:"author"` - CreatedAt *time.Time `json:"created_at"` - Description string `json:"description"` - Downvotes int `json:"downvotes"` - ID interface{} `json:"id"` - IID int `json:"iid"` - Labels []string `json:"labels"` - Milestone *Milestone `json:"milestone"` - ProjectID int `json:"project_id"` - State string `json:"state"` - Subscribed bool `json:"subscribed"` - TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` - Title string `json:"title"` - UpdatedAt *time.Time `json:"updated_at"` - Upvotes int `json:"upvotes"` - UserNotesCount int `json:"user_notes_count"` - WebURL string `json:"web_url"` - - // Only available for type Issue - Confidential bool `json:"confidential"` - DueDate string `json:"due_date"` - HasTasks bool `json:"has_tasks"` - Links *IssueLinks `json:"_links"` - MovedToID int `json:"moved_to_id"` - TimeStats *TimeStats `json:"time_stats"` - Weight int `json:"weight"` - - // Only available for type MergeRequest - MergedAt *time.Time `json:"merged_at"` - ApprovalsBeforeMerge int `json:"approvals_before_merge"` - ForceRemoveSourceBranch bool `json:"force_remove_source_branch"` - MergeCommitSHA string `json:"merge_commit_sha"` - MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"` - MergeStatus string `json:"merge_status"` - Reference string `json:"reference"` - Reviewers []*BasicUser `json:"reviewers"` - SHA string `json:"sha"` - ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"` - SourceBranch string `json:"source_branch"` - SourceProjectID int `json:"source_project_id"` - Squash bool `json:"squash"` - TargetBranch string `json:"target_branch"` - TargetProjectID int `json:"target_project_id"` - WorkInProgress bool `json:"work_in_progress"` - - // Only available for type DesignManagement::Design - FileName string `json:"filename"` - ImageURL string `json:"image_url"` -} - -// ListTodosOptions represents the available ListTodos() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html#get-a-list-of-to-do-items -type ListTodosOptions struct { - ListOptions - Action *TodoAction `url:"action,omitempty" json:"action,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Type *string `url:"type,omitempty" json:"type,omitempty"` -} - -// ListTodos lists all todos created by authenticated user. -// When no filter is applied, it returns all pending todos for the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/todos.html#get-a-list-of-to-do-items -func (s *TodosService) ListTodos(opt *ListTodosOptions, options ...RequestOptionFunc) ([]*Todo, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "todos", opt, options) - if err != nil { - return nil, nil, err - } - - var t []*Todo - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// MarkTodoAsDone marks a single pending todo given by its ID for the current user as done. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html#mark-a-to-do-item-as-done -func (s *TodosService) MarkTodoAsDone(id int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("todos/%d/mark_as_done", id) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// MarkAllTodosAsDone marks all pending todos for the current user as done. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html#mark-all-to-do-items-as-done -func (s *TodosService) MarkAllTodosAsDone(options ...RequestOptionFunc) (*Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "todos/mark_as_done", nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/topics.go b/vendor/github.com/xanzy/go-gitlab/topics.go deleted file mode 100644 index 719589f589..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/topics.go +++ /dev/null @@ -1,222 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - - retryablehttp "github.com/hashicorp/go-retryablehttp" -) - -// TopicsService handles communication with the topics related methods -// of the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html -type TopicsService struct { - client *Client -} - -// Topic represents a GitLab project topic. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html -type Topic struct { - ID int `json:"id"` - Name string `json:"name"` - Title string `json:"title"` - Description string `json:"description"` - TotalProjectsCount uint64 `json:"total_projects_count"` - AvatarURL string `json:"avatar_url"` -} - -func (t Topic) String() string { - return Stringify(t) -} - -// ListTopicsOptions represents the available ListTopics() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html#list-topics -type ListTopicsOptions struct { - ListOptions - Search *string `url:"search,omitempty" json:"search,omitempty"` -} - -// ListTopics returns a list of project topics in the GitLab instance ordered -// by number of associated projects. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html#list-topics -func (s *TopicsService) ListTopics(opt *ListTopicsOptions, options ...RequestOptionFunc) ([]*Topic, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "topics", opt, options) - if err != nil { - return nil, nil, err - } - - var t []*Topic - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// GetTopic gets a project topic by ID. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/topics.html#get-a-topic -func (s *TopicsService) GetTopic(topic int, options ...RequestOptionFunc) (*Topic, *Response, error) { - u := fmt.Sprintf("topics/%d", topic) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(Topic) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreateTopicOptions represents the available CreateTopic() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/topics.html#create-a-project-topic -type CreateTopicOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Avatar *TopicAvatar `url:"-" json:"-"` -} - -// TopicAvatar represents a GitLab topic avatar. -type TopicAvatar struct { - Filename string - Image io.Reader -} - -// MarshalJSON implements the json.Marshaler interface. -func (a *TopicAvatar) MarshalJSON() ([]byte, error) { - if a.Filename == "" && a.Image == nil { - return []byte(`""`), nil - } - type alias TopicAvatar - return json.Marshal((*alias)(a)) -} - -// CreateTopic creates a new project topic. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/topics.html#create-a-project-topic -func (s *TopicsService) CreateTopic(opt *CreateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "topics", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "topics", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - t := new(Topic) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// UpdateTopicOptions represents the available UpdateTopic() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/topics.html#update-a-project-topic -type UpdateTopicOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Avatar *TopicAvatar `url:"-" json:"avatar,omitempty"` -} - -// UpdateTopic updates a project topic. Only available to administrators. -// -// To remove a topic avatar set the TopicAvatar.Filename to an empty string -// and set TopicAvatar.Image to nil. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/topics.html#update-a-project-topic -func (s *TopicsService) UpdateTopic(topic int, opt *UpdateTopicOptions, options ...RequestOptionFunc) (*Topic, *Response, error) { - u := fmt.Sprintf("topics/%d", topic) - - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - t := new(Topic) - resp, err := s.client.Do(req, t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// DeleteTopic deletes a project topic. Only available to administrators. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/topics.html#delete-a-project-topic -func (s *TopicsService) DeleteTopic(topic int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("topics/%d", topic) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/xanzy/go-gitlab/types.go b/vendor/github.com/xanzy/go-gitlab/types.go deleted file mode 100644 index 9ce13d735c..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/types.go +++ /dev/null @@ -1,979 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -// Ptr is a helper that returns a pointer to v. -func Ptr[T any](v T) *T { - return &v -} - -// AccessControlValue represents an access control value within GitLab, -// used for managing access to certain project features. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html -type AccessControlValue string - -// List of available access control values. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html -const ( - DisabledAccessControl AccessControlValue = "disabled" - EnabledAccessControl AccessControlValue = "enabled" - PrivateAccessControl AccessControlValue = "private" - PublicAccessControl AccessControlValue = "public" -) - -// AccessControl is a helper routine that allocates a new AccessControlValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func AccessControl(v AccessControlValue) *AccessControlValue { - return Ptr(v) -} - -// AccessLevelValue represents a permission level within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html -type AccessLevelValue int - -// List of available access levels. -// -// GitLab API docs: https://docs.gitlab.com/ee/user/permissions.html -const ( - NoPermissions AccessLevelValue = 0 - MinimalAccessPermissions AccessLevelValue = 5 - GuestPermissions AccessLevelValue = 10 - ReporterPermissions AccessLevelValue = 20 - DeveloperPermissions AccessLevelValue = 30 - MaintainerPermissions AccessLevelValue = 40 - OwnerPermissions AccessLevelValue = 50 - AdminPermissions AccessLevelValue = 60 - - // Deprecated: Renamed to MaintainerPermissions in GitLab 11.0. - MasterPermissions AccessLevelValue = 40 - // Deprecated: Renamed to OwnerPermissions. - OwnerPermission AccessLevelValue = 50 -) - -// AccessLevel is a helper routine that allocates a new AccessLevelValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func AccessLevel(v AccessLevelValue) *AccessLevelValue { - return Ptr(v) -} - -// UserIDValue represents a user ID value within GitLab. -type UserIDValue string - -// List of available user ID values. -const ( - UserIDAny UserIDValue = "Any" - UserIDNone UserIDValue = "None" -) - -// ApproverIDsValue represents an approver ID value within GitLab. -type ApproverIDsValue struct { - value interface{} -} - -// ApproverIDs is a helper routine that creates a new ApproverIDsValue. -func ApproverIDs(v interface{}) *ApproverIDsValue { - switch v.(type) { - case UserIDValue, []int: - return &ApproverIDsValue{value: v} - default: - panic("Unsupported value passed as approver ID") - } -} - -// EncodeValues implements the query.Encoder interface. -func (a *ApproverIDsValue) EncodeValues(key string, v *url.Values) error { - switch value := a.value.(type) { - case UserIDValue: - v.Set(key, string(value)) - case []int: - v.Del(key) - v.Del(key + "[]") - for _, id := range value { - v.Add(key+"[]", strconv.Itoa(id)) - } - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (a ApproverIDsValue) MarshalJSON() ([]byte, error) { - return json.Marshal(a.value) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (a *ApproverIDsValue) UnmarshalJSON(bytes []byte) error { - return json.Unmarshal(bytes, a.value) -} - -// AssigneeIDValue represents an assignee ID value within GitLab. -type AssigneeIDValue struct { - value interface{} -} - -// AssigneeID is a helper routine that creates a new AssigneeIDValue. -func AssigneeID(v interface{}) *AssigneeIDValue { - switch v.(type) { - case UserIDValue, int: - return &AssigneeIDValue{value: v} - default: - panic("Unsupported value passed as assignee ID") - } -} - -// EncodeValues implements the query.Encoder interface. -func (a *AssigneeIDValue) EncodeValues(key string, v *url.Values) error { - switch value := a.value.(type) { - case UserIDValue: - v.Set(key, string(value)) - case int: - v.Set(key, strconv.Itoa(value)) - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (a AssigneeIDValue) MarshalJSON() ([]byte, error) { - return json.Marshal(a.value) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (a *AssigneeIDValue) UnmarshalJSON(bytes []byte) error { - return json.Unmarshal(bytes, a.value) -} - -// ReviewerIDValue represents a reviewer ID value within GitLab. -type ReviewerIDValue struct { - value interface{} -} - -// ReviewerID is a helper routine that creates a new ReviewerIDValue. -func ReviewerID(v interface{}) *ReviewerIDValue { - switch v.(type) { - case UserIDValue, int: - return &ReviewerIDValue{value: v} - default: - panic("Unsupported value passed as reviewer ID") - } -} - -// EncodeValues implements the query.Encoder interface. -func (a *ReviewerIDValue) EncodeValues(key string, v *url.Values) error { - switch value := a.value.(type) { - case UserIDValue: - v.Set(key, string(value)) - case int: - v.Set(key, strconv.Itoa(value)) - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (a ReviewerIDValue) MarshalJSON() ([]byte, error) { - return json.Marshal(a.value) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (a *ReviewerIDValue) UnmarshalJSON(bytes []byte) error { - return json.Unmarshal(bytes, a.value) -} - -// AvailabilityValue represents an availability value within GitLab. -type AvailabilityValue string - -// List of available availability values. -// -// Undocummented, see code at: -// https://gitlab.com/gitlab-org/gitlab-foss/-/blob/master/app/models/user_status.rb#L22 -const ( - NotSet AvailabilityValue = "not_set" - Busy AvailabilityValue = "busy" -) - -// Availability is a helper routine that allocates a new AvailabilityValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func Availability(v AvailabilityValue) *AvailabilityValue { - return Ptr(v) -} - -// BuildStateValue represents a GitLab build state. -type BuildStateValue string - -// These constants represent all valid build states. -const ( - Created BuildStateValue = "created" - WaitingForResource BuildStateValue = "waiting_for_resource" - Preparing BuildStateValue = "preparing" - Pending BuildStateValue = "pending" - Running BuildStateValue = "running" - Success BuildStateValue = "success" - Failed BuildStateValue = "failed" - Canceled BuildStateValue = "canceled" - Skipped BuildStateValue = "skipped" - Manual BuildStateValue = "manual" - Scheduled BuildStateValue = "scheduled" -) - -// BuildState is a helper routine that allocates a new BuildStateValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func BuildState(v BuildStateValue) *BuildStateValue { - return Ptr(v) -} - -// CommentEventAction identifies if a comment has been newly created or updated. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-events -type CommentEventAction string - -const ( - CommentEventActionCreate CommentEventAction = "create" - CommentEventActionUpdate CommentEventAction = "update" -) - -// ContainerRegistryStatus represents the status of a Container Registry. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/container_registry.html#list-registry-repositories -type ContainerRegistryStatus string - -// ContainerRegistryStatus represents all valid statuses of a Container Registry. -// -// Undocumented, see code at: -// https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/container_repository.rb?ref_type=heads#L35 -const ( - ContainerRegistryStatusDeleteScheduled ContainerRegistryStatus = "delete_scheduled" - ContainerRegistryStatusDeleteFailed ContainerRegistryStatus = "delete_failed" - ContainerRegistryStatusDeleteOngoing ContainerRegistryStatus = "delete_ongoing" -) - -// DeploymentApprovalStatus represents a Gitlab deployment approval status. -type DeploymentApprovalStatus string - -// These constants represent all valid deployment approval statuses. -const ( - DeploymentApprovalStatusApproved DeploymentApprovalStatus = "approved" - DeploymentApprovalStatusRejected DeploymentApprovalStatus = "rejected" -) - -// DeploymentStatusValue represents a Gitlab deployment status. -type DeploymentStatusValue string - -// These constants represent all valid deployment statuses. -const ( - DeploymentStatusCreated DeploymentStatusValue = "created" - DeploymentStatusRunning DeploymentStatusValue = "running" - DeploymentStatusSuccess DeploymentStatusValue = "success" - DeploymentStatusFailed DeploymentStatusValue = "failed" - DeploymentStatusCanceled DeploymentStatusValue = "canceled" -) - -// DeploymentStatus is a helper routine that allocates a new -// DeploymentStatusValue to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func DeploymentStatus(v DeploymentStatusValue) *DeploymentStatusValue { - return Ptr(v) -} - -// DORAMetricType represents all valid DORA metrics types. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -type DORAMetricType string - -// List of available DORA metric type names. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -const ( - DORAMetricDeploymentFrequency DORAMetricType = "deployment_frequency" - DORAMetricLeadTimeForChanges DORAMetricType = "lead_time_for_changes" - DORAMetricTimeToRestoreService DORAMetricType = "time_to_restore_service" - DORAMetricChangeFailureRate DORAMetricType = "change_failure_rate" -) - -// DORAMetricInterval represents the time period over which the -// metrics are aggregated. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -type DORAMetricInterval string - -// List of available DORA metric interval types. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/dora/metrics.html -const ( - DORAMetricIntervalDaily DORAMetricInterval = "daily" - DORAMetricIntervalMonthly DORAMetricInterval = "monthly" - DORAMetricIntervalAll DORAMetricInterval = "all" -) - -// EventTypeValue represents actions type for contribution events. -type EventTypeValue string - -// List of available action type. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events -const ( - CreatedEventType EventTypeValue = "created" - UpdatedEventType EventTypeValue = "updated" - ClosedEventType EventTypeValue = "closed" - ReopenedEventType EventTypeValue = "reopened" - PushedEventType EventTypeValue = "pushed" - CommentedEventType EventTypeValue = "commented" - MergedEventType EventTypeValue = "merged" - JoinedEventType EventTypeValue = "joined" - LeftEventType EventTypeValue = "left" - DestroyedEventType EventTypeValue = "destroyed" - ExpiredEventType EventTypeValue = "expired" -) - -// EventTargetTypeValue represents actions type value for contribution events. -type EventTargetTypeValue string - -// List of available action type. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/events.html#target-types -const ( - IssueEventTargetType EventTargetTypeValue = "issue" - MilestoneEventTargetType EventTargetTypeValue = "milestone" - MergeRequestEventTargetType EventTargetTypeValue = "merge_request" - NoteEventTargetType EventTargetTypeValue = "note" - ProjectEventTargetType EventTargetTypeValue = "project" - SnippetEventTargetType EventTargetTypeValue = "snippet" - UserEventTargetType EventTargetTypeValue = "user" -) - -// FileActionValue represents the available actions that can be performed on a file. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/commits.html#create-a-commit-with-multiple-files-and-actions -type FileActionValue string - -// The available file actions. -const ( - FileCreate FileActionValue = "create" - FileDelete FileActionValue = "delete" - FileMove FileActionValue = "move" - FileUpdate FileActionValue = "update" - FileChmod FileActionValue = "chmod" -) - -// FileAction is a helper routine that allocates a new FileActionValue value -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func FileAction(v FileActionValue) *FileActionValue { - return Ptr(v) -} - -// GenericPackageSelectValue represents a generic package select value. -type GenericPackageSelectValue string - -// The available generic package select values. -const ( - SelectPackageFile GenericPackageSelectValue = "package_file" -) - -// GenericPackageSelect is a helper routine that allocates a new -// GenericPackageSelectValue value to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func GenericPackageSelect(v GenericPackageSelectValue) *GenericPackageSelectValue { - return Ptr(v) -} - -// GenericPackageStatusValue represents a generic package status. -type GenericPackageStatusValue string - -// The available generic package statuses. -const ( - PackageDefault GenericPackageStatusValue = "default" - PackageHidden GenericPackageStatusValue = "hidden" -) - -// GenericPackageStatus is a helper routine that allocates a new -// GenericPackageStatusValue value to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func GenericPackageStatus(v GenericPackageStatusValue) *GenericPackageStatusValue { - return Ptr(v) -} - -// ISOTime represents an ISO 8601 formatted date. -type ISOTime time.Time - -// ISO 8601 date format. -const iso8601 = "2006-01-02" - -// ParseISOTime parses an ISO 8601 formatted date. -func ParseISOTime(s string) (ISOTime, error) { - t, err := time.Parse(iso8601, s) - return ISOTime(t), err -} - -// MarshalJSON implements the json.Marshaler interface. -func (t ISOTime) MarshalJSON() ([]byte, error) { - if reflect.ValueOf(t).IsZero() { - return []byte(`null`), nil - } - - if y := time.Time(t).Year(); y < 0 || y >= 10000 { - // ISO 8901 uses 4 digits for the years. - return nil, errors.New("json: ISOTime year outside of range [0,9999]") - } - - b := make([]byte, 0, len(iso8601)+2) - b = append(b, '"') - b = time.Time(t).AppendFormat(b, iso8601) - b = append(b, '"') - - return b, nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *ISOTime) UnmarshalJSON(data []byte) error { - // Ignore null, like in the main JSON package. - if string(data) == "null" { - return nil - } - - isotime, err := time.Parse(`"`+iso8601+`"`, string(data)) - *t = ISOTime(isotime) - - return err -} - -// EncodeValues implements the query.Encoder interface. -func (t *ISOTime) EncodeValues(key string, v *url.Values) error { - if t == nil || (time.Time(*t)).IsZero() { - return nil - } - v.Add(key, t.String()) - return nil -} - -// String implements the Stringer interface. -func (t ISOTime) String() string { - return time.Time(t).Format(iso8601) -} - -// Labels represents a list of labels. -type Labels []string - -// LabelOptions is a custom type with specific marshaling characteristics. -type LabelOptions []string - -// MarshalJSON implements the json.Marshaler interface. -func (l *LabelOptions) MarshalJSON() ([]byte, error) { - if *l == nil { - return []byte(`null`), nil - } - return json.Marshal(strings.Join(*l, ",")) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelOptions) UnmarshalJSON(data []byte) error { - type alias LabelOptions - if !bytes.HasPrefix(data, []byte("[")) { - data = []byte(fmt.Sprintf("[%s]", string(data))) - } - return json.Unmarshal(data, (*alias)(l)) -} - -// EncodeValues implements the query.EncodeValues interface. -func (l *LabelOptions) EncodeValues(key string, v *url.Values) error { - v.Set(key, strings.Join(*l, ",")) - return nil -} - -// LinkTypeValue represents a release link type. -type LinkTypeValue string - -// List of available release link types. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/releases/links.html#create-a-release-link -const ( - ImageLinkType LinkTypeValue = "image" - OtherLinkType LinkTypeValue = "other" - PackageLinkType LinkTypeValue = "package" - RunbookLinkType LinkTypeValue = "runbook" -) - -// LinkType is a helper routine that allocates a new LinkType value -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func LinkType(v LinkTypeValue) *LinkTypeValue { - return Ptr(v) -} - -// LicenseApprovalStatusValue describe the approval statuses of a license. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/managed_licenses.html -type LicenseApprovalStatusValue string - -// List of available license approval statuses. -const ( - LicenseApproved LicenseApprovalStatusValue = "approved" - LicenseBlacklisted LicenseApprovalStatusValue = "blacklisted" - LicenseAllowed LicenseApprovalStatusValue = "allowed" - LicenseDenied LicenseApprovalStatusValue = "denied" -) - -// LicenseApprovalStatus is a helper routine that allocates a new license -// approval status value to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func LicenseApprovalStatus(v LicenseApprovalStatusValue) *LicenseApprovalStatusValue { - return Ptr(v) -} - -// MergeMethodValue represents a project merge type within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#project-merge-method -type MergeMethodValue string - -// List of available merge type -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#project-merge-method -const ( - NoFastForwardMerge MergeMethodValue = "merge" - FastForwardMerge MergeMethodValue = "ff" - RebaseMerge MergeMethodValue = "rebase_merge" -) - -// MergeMethod is a helper routine that allocates a new MergeMethod -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func MergeMethod(v MergeMethodValue) *MergeMethodValue { - return Ptr(v) -} - -// NoteTypeValue represents the type of a Note. -type NoteTypeValue string - -// List of available note types. -const ( - DiffNote NoteTypeValue = "DiffNote" - DiscussionNote NoteTypeValue = "DiscussionNote" - GenericNote NoteTypeValue = "Note" - LegacyDiffNote NoteTypeValue = "LegacyDiffNote" -) - -// NoteType is a helper routine that allocates a new NoteTypeValue to -// store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func NoteType(v NoteTypeValue) *NoteTypeValue { - return Ptr(v) -} - -// NotificationLevelValue represents a notification level. -type NotificationLevelValue int - -// String implements the fmt.Stringer interface. -func (l NotificationLevelValue) String() string { - return notificationLevelNames[l] -} - -// MarshalJSON implements the json.Marshaler interface. -func (l NotificationLevelValue) MarshalJSON() ([]byte, error) { - return json.Marshal(l.String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *NotificationLevelValue) UnmarshalJSON(data []byte) error { - var raw interface{} - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - switch raw := raw.(type) { - case float64: - *l = NotificationLevelValue(raw) - case string: - *l = notificationLevelTypes[raw] - case nil: - // No action needed. - default: - return fmt.Errorf("json: cannot unmarshal %T into Go value of type %T", raw, *l) - } - - return nil -} - -// List of valid notification levels. -const ( - DisabledNotificationLevel NotificationLevelValue = iota - ParticipatingNotificationLevel - WatchNotificationLevel - GlobalNotificationLevel - MentionNotificationLevel - CustomNotificationLevel -) - -var notificationLevelNames = [...]string{ - "disabled", - "participating", - "watch", - "global", - "mention", - "custom", -} - -var notificationLevelTypes = map[string]NotificationLevelValue{ - "disabled": DisabledNotificationLevel, - "participating": ParticipatingNotificationLevel, - "watch": WatchNotificationLevel, - "global": GlobalNotificationLevel, - "mention": MentionNotificationLevel, - "custom": CustomNotificationLevel, -} - -// NotificationLevel is a helper routine that allocates a new NotificationLevelValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func NotificationLevel(v NotificationLevelValue) *NotificationLevelValue { - return Ptr(v) -} - -// ProjectCreationLevelValue represents a project creation level within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -type ProjectCreationLevelValue string - -// List of available project creation levels. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -const ( - NoOneProjectCreation ProjectCreationLevelValue = "noone" - MaintainerProjectCreation ProjectCreationLevelValue = "maintainer" - DeveloperProjectCreation ProjectCreationLevelValue = "developer" -) - -// ProjectCreationLevel is a helper routine that allocates a new ProjectCreationLevelValue -// to store v and returns a pointer to it. -// Please use Ptr instead. -func ProjectCreationLevel(v ProjectCreationLevelValue) *ProjectCreationLevelValue { - return Ptr(v) -} - -// ProjectHookEvent represents a project hook event. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events -type ProjectHookEvent string - -// List of available project hook events. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#hook-events -const ( - ProjectHookEventPush ProjectHookEvent = "push_events" - ProjectHookEventTagPush ProjectHookEvent = "tag_push_events" - ProjectHookEventIssues ProjectHookEvent = "issues_events" - ProjectHookEventConfidentialIssues ProjectHookEvent = "confidential_issues_events" - ProjectHookEventNote ProjectHookEvent = "note_events" - ProjectHookEventMergeRequests ProjectHookEvent = "merge_requests_events" - ProjectHookEventJob ProjectHookEvent = "job_events" - ProjectHookEventPipeline ProjectHookEvent = "pipeline_events" - ProjectHookEventWiki ProjectHookEvent = "wiki_page_events" - ProjectHookEventReleases ProjectHookEvent = "releases_events" - ProjectHookEventEmoji ProjectHookEvent = "emoji_events" - ProjectHookEventResourceAccessToken ProjectHookEvent = "resource_access_token_events" -) - -// ResourceGroupProcessMode represents a process mode for a resource group -// within a GitLab project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes -type ResourceGroupProcessMode string - -// List of available resource group process modes. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/ci/resource_groups/index.html#process-modes -const ( - Unordered ResourceGroupProcessMode = "unordered" - OldestFirst ResourceGroupProcessMode = "oldest_first" - NewestFirst ResourceGroupProcessMode = "newest_first" -) - -// SharedRunnersSettingValue determines whether shared runners are enabled for a -// group’s subgroups and projects. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-shared_runners_setting -type SharedRunnersSettingValue string - -// List of available shared runner setting levels. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/groups.html#options-for-shared_runners_setting -const ( - EnabledSharedRunnersSettingValue SharedRunnersSettingValue = "enabled" - DisabledAndOverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_overridable" - DisabledAndUnoverridableSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_and_unoverridable" - - // Deprecated: DisabledWithOverrideSharedRunnersSettingValue is deprecated - // in favor of DisabledAndOverridableSharedRunnersSettingValue. - DisabledWithOverrideSharedRunnersSettingValue SharedRunnersSettingValue = "disabled_with_override" -) - -// SharedRunnersSetting is a helper routine that allocates a new SharedRunnersSettingValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func SharedRunnersSetting(v SharedRunnersSettingValue) *SharedRunnersSettingValue { - return Ptr(v) -} - -// SubGroupCreationLevelValue represents a sub group creation level within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -type SubGroupCreationLevelValue string - -// List of available sub group creation levels. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -const ( - OwnerSubGroupCreationLevelValue SubGroupCreationLevelValue = "owner" - MaintainerSubGroupCreationLevelValue SubGroupCreationLevelValue = "maintainer" -) - -// SubGroupCreationLevel is a helper routine that allocates a new SubGroupCreationLevelValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func SubGroupCreationLevel(v SubGroupCreationLevelValue) *SubGroupCreationLevelValue { - return Ptr(v) -} - -// SquashOptionValue represents a squash optional level within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project -type SquashOptionValue string - -// List of available squash options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project -const ( - SquashOptionNever SquashOptionValue = "never" - SquashOptionAlways SquashOptionValue = "always" - SquashOptionDefaultOff SquashOptionValue = "default_off" - SquashOptionDefaultOn SquashOptionValue = "default_on" -) - -// SquashOption is a helper routine that allocates a new SquashOptionValue -// to store s and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func SquashOption(s SquashOptionValue) *SquashOptionValue { - return Ptr(s) -} - -// TasksCompletionStatus represents tasks of the issue/merge request. -type TasksCompletionStatus struct { - Count int `json:"count"` - CompletedCount int `json:"completed_count"` -} - -// TodoAction represents the available actions that can be performed on a todo. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html -type TodoAction string - -// The available todo actions. -const ( - TodoAssigned TodoAction = "assigned" - TodoMentioned TodoAction = "mentioned" - TodoBuildFailed TodoAction = "build_failed" - TodoMarked TodoAction = "marked" - TodoApprovalRequired TodoAction = "approval_required" - TodoDirectlyAddressed TodoAction = "directly_addressed" -) - -// TodoTargetType represents the available target that can be linked to a todo. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/todos.html -type TodoTargetType string - -const ( - TodoTargetAlertManagement TodoTargetType = "AlertManagement::Alert" - TodoTargetDesignManagement TodoTargetType = "DesignManagement::Design" - TodoTargetIssue TodoTargetType = "Issue" - TodoTargetMergeRequest TodoTargetType = "MergeRequest" -) - -// UploadType represents the available upload types. -type UploadType string - -// The available upload types. -const ( - UploadAvatar UploadType = "avatar" - UploadFile UploadType = "file" -) - -// VariableTypeValue represents a variable type within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -type VariableTypeValue string - -// List of available variable types. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -const ( - EnvVariableType VariableTypeValue = "env_var" - FileVariableType VariableTypeValue = "file" -) - -// VariableType is a helper routine that allocates a new VariableTypeValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func VariableType(v VariableTypeValue) *VariableTypeValue { - return Ptr(v) -} - -// VisibilityValue represents a visibility level within GitLab. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -type VisibilityValue string - -// List of available visibility levels. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/ -const ( - PrivateVisibility VisibilityValue = "private" - InternalVisibility VisibilityValue = "internal" - PublicVisibility VisibilityValue = "public" -) - -// Visibility is a helper routine that allocates a new VisibilityValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func Visibility(v VisibilityValue) *VisibilityValue { - return Ptr(v) -} - -// WikiFormatValue represents the available wiki formats. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/wikis.html -type WikiFormatValue string - -// The available wiki formats. -const ( - WikiFormatMarkdown WikiFormatValue = "markdown" - WikiFormatRDoc WikiFormatValue = "rdoc" - WikiFormatASCIIDoc WikiFormatValue = "asciidoc" - WikiFormatOrg WikiFormatValue = "org" -) - -// WikiFormat is a helper routine that allocates a new WikiFormatValue -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func WikiFormat(v WikiFormatValue) *WikiFormatValue { - return Ptr(v) -} - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func Bool(v bool) *bool { - return Ptr(v) -} - -// Int is a helper routine that allocates a new int value -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func Int(v int) *int { - return Ptr(v) -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func String(v string) *string { - return Ptr(v) -} - -// Time is a helper routine that allocates a new time.Time value -// to store v and returns a pointer to it. -// -// Deprecated: Please use Ptr instead. -func Time(v time.Time) *time.Time { - return Ptr(v) -} - -// BoolValue is a boolean value with advanced json unmarshaling features. -type BoolValue bool - -// UnmarshalJSON allows 1, 0, "true", and "false" to be considered as boolean values -// Needed for: -// https://gitlab.com/gitlab-org/gitlab-ce/issues/50122 -// https://gitlab.com/gitlab-org/gitlab/-/issues/233941 -// https://github.com/gitlabhq/terraform-provider-gitlab/issues/348 -func (t *BoolValue) UnmarshalJSON(b []byte) error { - switch string(b) { - case `"1"`: - *t = true - return nil - case `"0"`: - *t = false - return nil - case `"true"`: - *t = true - return nil - case `"false"`: - *t = false - return nil - default: - var v bool - err := json.Unmarshal(b, &v) - *t = BoolValue(v) - return err - } -} diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go deleted file mode 100644 index f463952ac8..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/users.go +++ /dev/null @@ -1,1591 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "time" - - "github.com/hashicorp/go-retryablehttp" -) - -// List a couple of standard errors. -var ( - ErrUserActivatePrevented = errors.New("Cannot activate a user that is blocked by admin or by LDAP synchronization") - ErrUserApprovePrevented = errors.New("Cannot approve a user that is blocked by admin or by LDAP synchronization") - ErrUserBlockPrevented = errors.New("Cannot block a user that is already blocked by LDAP synchronization") - ErrUserConflict = errors.New("User does not have a pending request") - ErrUserDeactivatePrevented = errors.New("Cannot deactivate a user that is blocked by admin or by LDAP synchronization") - ErrUserDisableTwoFactorPrevented = errors.New("Cannot disable two factor authentication if not authenticated as administrator") - ErrUserNotFound = errors.New("User does not exist") - ErrUserRejectPrevented = errors.New("Cannot reject a user if not authenticated as administrator") - ErrUserTwoFactorNotEnabled = errors.New("Cannot disable two factor authentication if not enabled") - ErrUserUnblockPrevented = errors.New("Cannot unblock a user that is blocked by LDAP synchronization") -) - -// UsersService handles communication with the user related methods of -// the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html -type UsersService struct { - client *Client -} - -// BasicUser included in other service responses (such as merge requests, pipelines, etc). -type BasicUser struct { - ID int `json:"id"` - Username string `json:"username"` - Name string `json:"name"` - State string `json:"state"` - CreatedAt *time.Time `json:"created_at"` - AvatarURL string `json:"avatar_url"` - WebURL string `json:"web_url"` -} - -// User represents a GitLab user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html -type User struct { - ID int `json:"id"` - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - State string `json:"state"` - WebURL string `json:"web_url"` - CreatedAt *time.Time `json:"created_at"` - Bio string `json:"bio"` - Bot bool `json:"bot"` - Location string `json:"location"` - PublicEmail string `json:"public_email"` - Skype string `json:"skype"` - Linkedin string `json:"linkedin"` - Twitter string `json:"twitter"` - WebsiteURL string `json:"website_url"` - Organization string `json:"organization"` - JobTitle string `json:"job_title"` - ExternUID string `json:"extern_uid"` - Provider string `json:"provider"` - ThemeID int `json:"theme_id"` - LastActivityOn *ISOTime `json:"last_activity_on"` - ColorSchemeID int `json:"color_scheme_id"` - IsAdmin bool `json:"is_admin"` - IsAuditor bool `json:"is_auditor"` - AvatarURL string `json:"avatar_url"` - CanCreateGroup bool `json:"can_create_group"` - CanCreateProject bool `json:"can_create_project"` - ProjectsLimit int `json:"projects_limit"` - CurrentSignInAt *time.Time `json:"current_sign_in_at"` - CurrentSignInIP *net.IP `json:"current_sign_in_ip"` - LastSignInAt *time.Time `json:"last_sign_in_at"` - LastSignInIP *net.IP `json:"last_sign_in_ip"` - ConfirmedAt *time.Time `json:"confirmed_at"` - TwoFactorEnabled bool `json:"two_factor_enabled"` - Note string `json:"note"` - Identities []*UserIdentity `json:"identities"` - External bool `json:"external"` - PrivateProfile bool `json:"private_profile"` - SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` - ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` - UsingLicenseSeat bool `json:"using_license_seat"` - CustomAttributes []*CustomAttribute `json:"custom_attributes"` - NamespaceID int `json:"namespace_id"` - Locked bool `json:"locked"` -} - -// UserIdentity represents a user identity. -type UserIdentity struct { - Provider string `json:"provider"` - ExternUID string `json:"extern_uid"` -} - -// UserAvatar represents a GitLab user avatar. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html -type UserAvatar struct { - Filename string - Image io.Reader -} - -// MarshalJSON implements the json.Marshaler interface. -func (a *UserAvatar) MarshalJSON() ([]byte, error) { - if a.Filename == "" && a.Image == nil { - return []byte(`""`), nil - } - type alias UserAvatar - return json.Marshal((*alias)(a)) -} - -// ListUsersOptions represents the available ListUsers() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-users -type ListUsersOptions struct { - ListOptions - Active *bool `url:"active,omitempty" json:"active,omitempty"` - Blocked *bool `url:"blocked,omitempty" json:"blocked,omitempty"` - ExcludeInternal *bool `url:"exclude_internal,omitempty" json:"exclude_internal,omitempty"` - ExcludeExternal *bool `url:"exclude_external,omitempty" json:"exclude_external,omitempty"` - - // The options below are only available for admins. - Search *string `url:"search,omitempty" json:"search,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - ExternalUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - TwoFactor *string `url:"two_factor,omitempty" json:"two_factor,omitempty"` - Admins *bool `url:"admins,omitempty" json:"admins,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - WithoutProjects *bool `url:"without_projects,omitempty" json:"without_projects,omitempty"` - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` - WithoutProjectBots *bool `url:"without_project_bots,omitempty" json:"without_project_bots,omitempty"` -} - -// ListUsers gets a list of users. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-users -func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...RequestOptionFunc) ([]*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "users", opt, options) - if err != nil { - return nil, nil, err - } - - var usr []*User - resp, err := s.client.Do(req, &usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} - -// GetUsersOptions represents the available GetUser() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-user -type GetUsersOptions struct { - WithCustomAttributes *bool `url:"with_custom_attributes,omitempty" json:"with_custom_attributes,omitempty"` -} - -// GetUser gets a single user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-user -func (s *UsersService) GetUser(user int, opt GetUsersOptions, options ...RequestOptionFunc) (*User, *Response, error) { - u := fmt.Sprintf("users/%d", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} - -// CreateUserOptions represents the available CreateUser() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation -type CreateUserOptions struct { - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - Avatar *UserAvatar `url:"-" json:"-"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - Email *string `url:"email,omitempty" json:"email,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - ForceRandomPassword *bool `url:"force_random_password,omitempty" json:"force_random_password,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - ResetPassword *bool `url:"reset_password,omitempty" json:"reset_password,omitempty"` - SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` -} - -// CreateUser creates a new user. Note only administrators can create new users. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-creation -func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - var err error - var req *retryablehttp.Request - - if opt.Avatar == nil { - req, err = s.client.NewRequest(http.MethodPost, "users", opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPost, - "users", - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} - -// ModifyUserOptions represents the available ModifyUser() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification -type ModifyUserOptions struct { - Admin *bool `url:"admin,omitempty" json:"admin,omitempty"` - Avatar *UserAvatar `url:"-" json:"avatar,omitempty"` - Bio *string `url:"bio,omitempty" json:"bio,omitempty"` - CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"` - CommitEmail *string `url:"commit_email,omitempty" json:"commit_email,omitempty"` - Email *string `url:"email,omitempty" json:"email,omitempty"` - External *bool `url:"external,omitempty" json:"external,omitempty"` - ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"` - JobTitle *string `url:"job_title,omitempty" json:"job_title,omitempty"` - Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"` - Location *string `url:"location,omitempty" json:"location,omitempty"` - Name *string `url:"name,omitempty" json:"name,omitempty"` - Note *string `url:"note,omitempty" json:"note,omitempty"` - Organization *string `url:"organization,omitempty" json:"organization,omitempty"` - Password *string `url:"password,omitempty" json:"password,omitempty"` - PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` - ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"` - Provider *string `url:"provider,omitempty" json:"provider,omitempty"` - PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` - SkipReconfirmation *bool `url:"skip_reconfirmation,omitempty" json:"skip_reconfirmation,omitempty"` - Skype *string `url:"skype,omitempty" json:"skype,omitempty"` - ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` - Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"` - Username *string `url:"username,omitempty" json:"username,omitempty"` - WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"` -} - -// ModifyUser modifies an existing user. Only administrators can change attributes -// of a user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-modification -func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...RequestOptionFunc) (*User, *Response, error) { - var err error - var req *retryablehttp.Request - u := fmt.Sprintf("users/%d", user) - - if opt.Avatar == nil || (opt.Avatar.Filename == "" && opt.Avatar.Image == nil) { - req, err = s.client.NewRequest(http.MethodPut, u, opt, options) - } else { - req, err = s.client.UploadRequest( - http.MethodPut, - u, - opt.Avatar.Image, - opt.Avatar.Filename, - UploadAvatar, - opt, - options, - ) - } - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} - -// DeleteUser deletes a user. Available only for administrators. This is an -// idempotent function, calling this function for a non-existent user id still -// returns a status code 200 OK. The JSON response differs if the user was -// actually deleted or not. In the former the user is returned and in the -// latter not. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#user-deletion -func (s *UsersService) DeleteUser(user int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d", user) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// CurrentUser gets currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-current-user -func (s *UsersService) CurrentUser(options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user", nil, options) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} - -// UserStatus represents the current status of a user -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#user-status -type UserStatus struct { - Emoji string `json:"emoji"` - Availability AvailabilityValue `json:"availability"` - Message string `json:"message"` - MessageHTML string `json:"message_html"` -} - -// CurrentUserStatus retrieves the user status -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#user-status -func (s *UsersService) CurrentUserStatus(options ...RequestOptionFunc) (*UserStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/status", nil, options) - if err != nil { - return nil, nil, err - } - - status := new(UserStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil -} - -// GetUserStatus retrieves a user's status -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-the-status-of-a-user -func (s *UsersService) GetUserStatus(user int, options ...RequestOptionFunc) (*UserStatus, *Response, error) { - u := fmt.Sprintf("users/%d/status", user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - status := new(UserStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil -} - -// UserStatusOptions represents the options required to set the status -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#set-user-status -type UserStatusOptions struct { - Emoji *string `url:"emoji,omitempty" json:"emoji,omitempty"` - Availability *AvailabilityValue `url:"availability,omitempty" json:"availability,omitempty"` - Message *string `url:"message,omitempty" json:"message,omitempty"` -} - -// SetUserStatus sets the user's status -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#set-user-status -func (s *UsersService) SetUserStatus(opt *UserStatusOptions, options ...RequestOptionFunc) (*UserStatus, *Response, error) { - req, err := s.client.NewRequest(http.MethodPut, "user/status", opt, options) - if err != nil { - return nil, nil, err - } - - status := new(UserStatus) - resp, err := s.client.Do(req, status) - if err != nil { - return nil, resp, err - } - - return status, resp, nil -} - -// UserAssociationsCount represents the user associations count. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/users.html#list-associations-count-for-user -type UserAssociationsCount struct { - GroupsCount int `json:"groups_count"` - ProjectsCount int `json:"projects_count"` - IssuesCount int `json:"issues_count"` - MergeRequestsCount int `json:"merge_requests_count"` -} - -// GetUserAssociationsCount gets a list of a specified user associations. -// -// Gitlab API docs: https://docs.gitlab.com/ee/api/users.html#list-associations-count-for-user -func (s *UsersService) GetUserAssociationsCount(user int, options ...RequestOptionFunc) (*UserAssociationsCount, *Response, error) { - u := fmt.Sprintf("users/%d/associations_count", user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - uac := new(UserAssociationsCount) - resp, err := s.client.Do(req, uac) - if err != nil { - return nil, resp, err - } - - return uac, resp, nil -} - -// SSHKey represents a SSH key. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys -type SSHKey struct { - ID int `json:"id"` - Title string `json:"title"` - Key string `json:"key"` - CreatedAt *time.Time `json:"created_at"` - ExpiresAt *time.Time `json:"expires_at"` -} - -// ListSSHKeysOptions represents the available ListSSHKeys options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys -type ListSSHKeysOptions ListOptions - -// ListSSHKeys gets a list of currently authenticated user's SSH keys. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-ssh-keys -func (s *UsersService) ListSSHKeys(opt *ListSSHKeysOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/keys", opt, options) - if err != nil { - return nil, nil, err - } - - var k []*SSHKey - resp, err := s.client.Do(req, &k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// ListSSHKeysForUserOptions represents the available ListSSHKeysForUser() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#list-ssh-keys-for-user -type ListSSHKeysForUserOptions ListOptions - -// ListSSHKeysForUser gets a list of a specified user's SSH keys. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#list-ssh-keys-for-user -func (s *UsersService) ListSSHKeysForUser(uid interface{}, opt *ListSSHKeysForUserOptions, options ...RequestOptionFunc) ([]*SSHKey, *Response, error) { - user, err := parseID(uid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("users/%s/keys", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var k []*SSHKey - resp, err := s.client.Do(req, &k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// GetSSHKey gets a single key. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-ssh-key -func (s *UsersService) GetSSHKey(key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - u := fmt.Sprintf("user/keys/%d", key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// GetSSHKeyForUser gets a single key for a given user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-ssh-key-for-given-user -func (s *UsersService) GetSSHKeyForUser(user int, key int, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - u := fmt.Sprintf("users/%d/keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// AddSSHKeyOptions represents the available AddSSHKey() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-ssh-key -type AddSSHKeyOptions struct { - Title *string `url:"title,omitempty" json:"title,omitempty"` - Key *string `url:"key,omitempty" json:"key,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// AddSSHKey creates a new key owned by the currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-ssh-key -func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/keys", opt, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// AddSSHKeyForUser creates new key owned by specified user. Available only for -// admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-ssh-key-for-user -func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options ...RequestOptionFunc) (*SSHKey, *Response, error) { - u := fmt.Sprintf("users/%d/keys", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(SSHKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteSSHKey deletes key owned by currently authenticated user. This is an -// idempotent function and calling it on a key that is already deleted or not -// available results in 200 OK. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#delete-ssh-key-for-current-user -func (s *UsersService) DeleteSSHKey(key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("user/keys/%d", key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteSSHKeyForUser deletes key owned by a specified user. Available only -// for admin. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#delete-ssh-key-for-given-user -func (s *UsersService) DeleteSSHKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// GPGKey represents a GPG key. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-all-gpg-keys -type GPGKey struct { - ID int `json:"id"` - Key string `json:"key"` - CreatedAt *time.Time `json:"created_at"` -} - -// ListGPGKeys gets a list of currently authenticated user’s GPG keys. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-all-gpg-keys -func (s *UsersService) ListGPGKeys(options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/gpg_keys", nil, options) - if err != nil { - return nil, nil, err - } - - var ks []*GPGKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil -} - -// GetGPGKey gets a specific GPG key of currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key -func (s *UsersService) GetGPGKey(key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("user/gpg_keys/%d", key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// AddGPGKeyOptions represents the available AddGPGKey() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-a-gpg-key -type AddGPGKeyOptions struct { - Key *string `url:"key,omitempty" json:"key,omitempty"` -} - -// AddGPGKey creates a new GPG key owned by the currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-a-gpg-key -func (s *UsersService) AddGPGKey(opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/gpg_keys", opt, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteGPGKey deletes a GPG key owned by currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#delete-a-gpg-key -func (s *UsersService) DeleteGPGKey(key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("user/gpg_keys/%d", key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// ListGPGKeysForUser gets a list of a specified user’s GPG keys. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#list-all-gpg-keys-for-given-user -func (s *UsersService) ListGPGKeysForUser(user int, options ...RequestOptionFunc) ([]*GPGKey, *Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys", user) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - var ks []*GPGKey - resp, err := s.client.Do(req, &ks) - if err != nil { - return nil, resp, err - } - - return ks, resp, nil -} - -// GetGPGKeyForUser gets a specific GPG key for a given user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#get-a-specific-gpg-key-for-a-given-user -func (s *UsersService) GetGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// AddGPGKeyForUser creates new GPG key owned by the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#add-a-gpg-key-for-a-given-user -func (s *UsersService) AddGPGKeyForUser(user int, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - k := new(GPGKey) - resp, err := s.client.Do(req, k) - if err != nil { - return nil, resp, err - } - - return k, resp, nil -} - -// DeleteGPGKeyForUser deletes a GPG key owned by a specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#delete-a-gpg-key-for-a-given-user -func (s *UsersService) DeleteGPGKeyForUser(user, key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/gpg_keys/%d", user, key) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// Email represents an Email. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-emails -type Email struct { - ID int `json:"id"` - Email string `json:"email"` - ConfirmedAt *time.Time `json:"confirmed_at"` -} - -// ListEmails gets a list of currently authenticated user's Emails. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#list-emails -func (s *UsersService) ListEmails(options ...RequestOptionFunc) ([]*Email, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/emails", nil, options) - if err != nil { - return nil, nil, err - } - - var e []*Email - resp, err := s.client.Do(req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// ListEmailsForUserOptions represents the available ListEmailsForUser() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#list-emails-for-user -type ListEmailsForUserOptions ListOptions - -// ListEmailsForUser gets a list of a specified user's Emails. Available -// only for admin -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#list-emails-for-user -func (s *UsersService) ListEmailsForUser(user int, opt *ListEmailsForUserOptions, options ...RequestOptionFunc) ([]*Email, *Response, error) { - u := fmt.Sprintf("users/%d/emails", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var e []*Email - resp, err := s.client.Do(req, &e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// GetEmail gets a single email. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#single-email -func (s *UsersService) GetEmail(email int, options ...RequestOptionFunc) (*Email, *Response, error) { - u := fmt.Sprintf("user/emails/%d", email) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - e := new(Email) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// AddEmailOptions represents the available AddEmail() options. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-email -type AddEmailOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` - SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` -} - -// AddEmail creates a new email owned by the currently authenticated user. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-email -func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/emails", opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Email) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// AddEmailForUser creates new email owned by specified user. Available only for -// admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#add-email-for-user -func (s *UsersService) AddEmailForUser(user int, opt *AddEmailOptions, options ...RequestOptionFunc) (*Email, *Response, error) { - u := fmt.Sprintf("users/%d/emails", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - e := new(Email) - resp, err := s.client.Do(req, e) - if err != nil { - return nil, resp, err - } - - return e, resp, nil -} - -// DeleteEmail deletes email owned by currently authenticated user. This is an -// idempotent function and calling it on a key that is already deleted or not -// available results in 200 OK. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#delete-email-for-current-user -func (s *UsersService) DeleteEmail(email int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("user/emails/%d", email) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// DeleteEmailForUser deletes email owned by a specified user. Available only -// for admin. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#delete-email-for-given-user -func (s *UsersService) DeleteEmailForUser(user, email int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/emails/%d", user, email) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// BlockUser blocks the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#block-user -func (s *UsersService) BlockUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/block", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 403: - return ErrUserBlockPrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// UnblockUser unblocks the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#unblock-user -func (s *UsersService) UnblockUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/unblock", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 403: - return ErrUserUnblockPrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// BanUser bans the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#ban-user -func (s *UsersService) BanUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/ban", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// UnbanUser unbans the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#unban-user -func (s *UsersService) UnbanUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/unban", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// DeactivateUser deactivate the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#deactivate-user -func (s *UsersService) DeactivateUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/deactivate", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 403: - return ErrUserDeactivatePrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// ActivateUser activate the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#activate-user -func (s *UsersService) ActivateUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/activate", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 403: - return ErrUserActivatePrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// ApproveUser approve the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#approve-user -func (s *UsersService) ApproveUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/approve", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 201: - return nil - case 403: - return ErrUserApprovePrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// RejectUser reject the specified user. Available only for admin. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#reject-user -func (s *UsersService) RejectUser(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/reject", user) - - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 200: - return nil - case 403: - return ErrUserRejectPrevented - case 404: - return ErrUserNotFound - case 409: - return ErrUserConflict - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// ImpersonationToken represents an impersonation token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-all-impersonation-tokens-of-a-user -type ImpersonationToken struct { - ID int `json:"id"` - Name string `json:"name"` - Active bool `json:"active"` - Token string `json:"token"` - Scopes []string `json:"scopes"` - Revoked bool `json:"revoked"` - CreatedAt *time.Time `json:"created_at"` - ExpiresAt *ISOTime `json:"expires_at"` - LastUsedAt *time.Time `json:"last_used_at"` -} - -// GetAllImpersonationTokensOptions represents the available -// GetAllImpersonationTokens() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-all-impersonation-tokens-of-a-user -type GetAllImpersonationTokensOptions struct { - ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` -} - -// GetAllImpersonationTokens retrieves all impersonation tokens of a user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-all-impersonation-tokens-of-a-user -func (s *UsersService) GetAllImpersonationTokens(user int, opt *GetAllImpersonationTokensOptions, options ...RequestOptionFunc) ([]*ImpersonationToken, *Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ts []*ImpersonationToken - resp, err := s.client.Do(req, &ts) - if err != nil { - return nil, resp, err - } - - return ts, resp, nil -} - -// GetImpersonationToken retrieves an impersonation token of a user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-an-impersonation-token-of-a-user -func (s *UsersService) GetImpersonationToken(user, token int, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token) - - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) - if err != nil { - return nil, nil, err - } - - t := new(ImpersonationToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreateImpersonationTokenOptions represents the available -// CreateImpersonationToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-an-impersonation-token -type CreateImpersonationTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// CreateImpersonationToken creates an impersonation token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-an-impersonation-token -func (s *UsersService) CreateImpersonationToken(user int, opt *CreateImpersonationTokenOptions, options ...RequestOptionFunc) (*ImpersonationToken, *Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(ImpersonationToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// RevokeImpersonationToken revokes an impersonation token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#revoke-an-impersonation-token -func (s *UsersService) RevokeImpersonationToken(user, token int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} - -// CreatePersonalAccessTokenOptions represents the available -// CreatePersonalAccessToken() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token -type CreatePersonalAccessTokenOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` -} - -// CreatePersonalAccessToken creates a personal access token. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token -func (s *UsersService) CreatePersonalAccessToken(user int, opt *CreatePersonalAccessTokenOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := fmt.Sprintf("users/%d/personal_access_tokens", user) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(PersonalAccessToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// CreatePersonalAccessTokenForCurrentUserOptions represents the available -// CreatePersonalAccessTokenForCurrentUser() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token-with-limited-scopes-for-the-currently-authenticated-user -type CreatePersonalAccessTokenForCurrentUserOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"` - ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` -} - -// CreatePersonalAccessTokenForCurrentUser creates a personal access token with limited scopes for the currently authenticated user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-personal-access-token-with-limited-scopes-for-the-currently-authenticated-user -func (s *UsersService) CreatePersonalAccessTokenForCurrentUser(opt *CreatePersonalAccessTokenForCurrentUserOptions, options ...RequestOptionFunc) (*PersonalAccessToken, *Response, error) { - u := "user/personal_access_tokens" - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - t := new(PersonalAccessToken) - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// UserActivity represents an entry in the user/activities response -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-user-activities -type UserActivity struct { - Username string `json:"username"` - LastActivityOn *ISOTime `json:"last_activity_on"` -} - -// GetUserActivitiesOptions represents the options for GetUserActivities -// -// GitLap API docs: -// https://docs.gitlab.com/ee/api/users.html#get-user-activities -type GetUserActivitiesOptions struct { - ListOptions - From *ISOTime `url:"from,omitempty" json:"from,omitempty"` -} - -// GetUserActivities retrieves user activities (admin only) -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#get-user-activities -func (s *UsersService) GetUserActivities(opt *GetUserActivitiesOptions, options ...RequestOptionFunc) ([]*UserActivity, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "user/activities", opt, options) - if err != nil { - return nil, nil, err - } - - var t []*UserActivity - resp, err := s.client.Do(req, &t) - if err != nil { - return nil, resp, err - } - - return t, resp, nil -} - -// UserMembership represents a membership of the user in a namespace or project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#user-memberships -type UserMembership struct { - SourceID int `json:"source_id"` - SourceName string `json:"source_name"` - SourceType string `json:"source_type"` - AccessLevel AccessLevelValue `json:"access_level"` -} - -// GetUserMembershipOptions represents the options available to query user memberships. -// -// GitLab API docs: -// ohttps://docs.gitlab.com/ee/api/users.html#user-memberships -type GetUserMembershipOptions struct { - ListOptions - Type *string `url:"type,omitempty" json:"type,omitempty"` -} - -// GetUserMemberships retrieves a list of the user's memberships. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#user-memberships -func (s *UsersService) GetUserMemberships(user int, opt *GetUserMembershipOptions, options ...RequestOptionFunc) ([]*UserMembership, *Response, error) { - u := fmt.Sprintf("users/%d/memberships", user) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var m []*UserMembership - resp, err := s.client.Do(req, &m) - if err != nil { - return nil, resp, err - } - - return m, resp, nil -} - -// DisableTwoFactor disables two factor authentication for the specified user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#disable-two-factor-authentication -func (s *UsersService) DisableTwoFactor(user int, options ...RequestOptionFunc) error { - u := fmt.Sprintf("users/%d/disable_two_factor", user) - - req, err := s.client.NewRequest(http.MethodPatch, u, nil, options) - if err != nil { - return err - } - - resp, err := s.client.Do(req, nil) - if err != nil && resp == nil { - return err - } - - switch resp.StatusCode { - case 204: - return nil - case 400: - return ErrUserTwoFactorNotEnabled - case 403: - return ErrUserDisableTwoFactorPrevented - case 404: - return ErrUserNotFound - default: - return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode) - } -} - -// UserRunner represents a GitLab runner linked to the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-runner -type UserRunner struct { - ID int `json:"id"` - Token string `json:"token"` - TokenExpiresAt *time.Time `json:"token_expires_at"` -} - -// CreateUserRunnerOptions represents the available CreateUserRunner() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-runner -type CreateUserRunnerOptions struct { - RunnerType *string `url:"runner_type,omitempty" json:"runner_type,omitempty"` - GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` - ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"` - Description *string `url:"description,omitempty" json:"description,omitempty"` - Paused *bool `url:"paused,omitempty" json:"paused,omitempty"` - Locked *bool `url:"locked,omitempty" json:"locked,omitempty"` - RunUntagged *bool `url:"run_untagged,omitempty" json:"run_untagged,omitempty"` - TagList *[]string `url:"tag_list,omitempty" json:"tag_list,omitempty"` - AccessLevel *string `url:"access_level,omitempty" json:"access_level,omitempty"` - MaximumTimeout *int `url:"maximum_timeout,omitempty" json:"maximum_timeout,omitempty"` - MaintenanceNote *string `url:"maintenance_note,omitempty" json:"maintenance_note,omitempty"` -} - -// CreateUserRunner creates a runner linked to the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#create-a-runner -func (s *UsersService) CreateUserRunner(opts *CreateUserRunnerOptions, options ...RequestOptionFunc) (*UserRunner, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "user/runners", opts, options) - if err != nil { - return nil, nil, err - } - - r := new(UserRunner) - resp, err := s.client.Do(req, r) - if err != nil { - return nil, resp, err - } - - return r, resp, nil -} - -// CreateServiceAccountUser creates a new service account user. Note only administrators can create new service account users. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/users.html#create-service-account-user -func (s *UsersService) CreateServiceAccountUser(options ...RequestOptionFunc) (*User, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "service_accounts", nil, options) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} - -// UploadAvatar uploads an avatar to the current user. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/users.html#upload-a-current-user-avatar -func (s *UsersService) UploadAvatar(avatar io.Reader, filename string, options ...RequestOptionFunc) (*User, *Response, error) { - u := "user/avatar" - - req, err := s.client.UploadRequest( - http.MethodPut, - u, - avatar, - filename, - UploadAvatar, - nil, - options, - ) - if err != nil { - return nil, nil, err - } - - usr := new(User) - resp, err := s.client.Do(req, usr) - if err != nil { - return nil, resp, err - } - - return usr, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/validate.go b/vendor/github.com/xanzy/go-gitlab/validate.go deleted file mode 100644 index cb79ac8380..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/validate.go +++ /dev/null @@ -1,154 +0,0 @@ -// -// Copyright 2021, Sander van Harmelen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import ( - "fmt" - "net/http" -) - -// ValidateService handles communication with the validation related methods of -// the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/lint.html -type ValidateService struct { - client *Client -} - -// LintResult represents the linting results. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/lint.html -type LintResult struct { - Status string `json:"status"` - Errors []string `json:"errors"` - Warnings []string `json:"warnings"` - MergedYaml string `json:"merged_yaml"` -} - -// ProjectLintResult represents the linting results by project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration -type ProjectLintResult struct { - Valid bool `json:"valid"` - Errors []string `json:"errors"` - Warnings []string `json:"warnings"` - MergedYaml string `json:"merged_yaml"` -} - -// LintOptions represents the available Lint() options. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration -type LintOptions struct { - Content string `url:"content,omitempty" json:"content,omitempty"` - IncludeMergedYAML bool `url:"include_merged_yaml,omitempty" json:"include_merged_yaml,omitempty"` - IncludeJobs bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` -} - -// Lint validates .gitlab-ci.yml content. -// Deprecated: This endpoint was removed in GitLab 16.0. -// -// Gitlab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration-deprecated -func (s *ValidateService) Lint(opts *LintOptions, options ...RequestOptionFunc) (*LintResult, *Response, error) { - req, err := s.client.NewRequest(http.MethodPost, "ci/lint", &opts, options) - if err != nil { - return nil, nil, err - } - - l := new(LintResult) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// ProjectNamespaceLintOptions represents the available ProjectNamespaceLint() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-a-ci-yaml-configuration-with-a-namespace -type ProjectNamespaceLintOptions struct { - Content *string `url:"content,omitempty" json:"content,omitempty"` - DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` - IncludeJobs *bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -// ProjectNamespaceLint validates .gitlab-ci.yml content by project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-a-ci-yaml-configuration-with-a-namespace -func (s *ValidateService) ProjectNamespaceLint(pid interface{}, opt *ProjectNamespaceLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/ci/lint", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, &opt, options) - if err != nil { - return nil, nil, err - } - - l := new(ProjectLintResult) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} - -// ProjectLintOptions represents the available ProjectLint() options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration -type ProjectLintOptions struct { - ContentRef *string `url:"content_ref,omitempty" json:"content_ref,omitempty"` - DryRunRef *string `url:"dry_run_ref,omitempty" json:"dry_run_ref,omitempty"` - DryRun *bool `url:"dry_run,omitempty" json:"dry_run,omitempty"` - IncludeJobs *bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` - Ref *string `url:"ref,omitempty" json:"ref,omitempty"` -} - -// ProjectLint validates .gitlab-ci.yml content by project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration -func (s *ValidateService) ProjectLint(pid interface{}, opt *ProjectLintOptions, options ...RequestOptionFunc) (*ProjectLintResult, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/ci/lint", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, &opt, options) - if err != nil { - return nil, nil, err - } - - l := new(ProjectLintResult) - resp, err := s.client.Do(req, l) - if err != nil { - return nil, resp, err - } - - return l, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/version.go b/vendor/github.com/xanzy/go-gitlab/version.go deleted file mode 100644 index 0d30b90062..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/version.go +++ /dev/null @@ -1,58 +0,0 @@ -// -// Copyright 2021, Andrea Funto' -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package gitlab - -import "net/http" - -// VersionService handles communication with the GitLab server instance to -// retrieve its version information via the GitLab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/version.html -type VersionService struct { - client *Client -} - -// Version represents a GitLab instance version. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/version.html -type Version struct { - Version string `json:"version"` - Revision string `json:"revision"` -} - -func (s Version) String() string { - return Stringify(s) -} - -// GetVersion gets a GitLab server instance version; it is only available to -// authenticated users. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/version.html -func (s *VersionService) GetVersion(options ...RequestOptionFunc) (*Version, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, "version", nil, options) - if err != nil { - return nil, nil, err - } - - v := new(Version) - resp, err := s.client.Do(req, v) - if err != nil { - return nil, resp, err - } - - return v, resp, nil -} diff --git a/vendor/github.com/xanzy/go-gitlab/wikis.go b/vendor/github.com/xanzy/go-gitlab/wikis.go deleted file mode 100644 index 39847ef053..0000000000 --- a/vendor/github.com/xanzy/go-gitlab/wikis.go +++ /dev/null @@ -1,204 +0,0 @@ -// -// Copyright 2021, Stany MARCEL -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gitlab - -import ( - "fmt" - "net/http" - "net/url" -) - -// WikisService handles communication with the wikis related methods of -// the Gitlab API. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/wikis.html -type WikisService struct { - client *Client -} - -// Wiki represents a GitLab wiki. -// -// GitLab API docs: https://docs.gitlab.com/ee/api/wikis.html -type Wiki struct { - Content string `json:"content"` - Encoding string `json:"encoding"` - Format WikiFormatValue `json:"format"` - Slug string `json:"slug"` - Title string `json:"title"` -} - -func (w Wiki) String() string { - return Stringify(w) -} - -// ListWikisOptions represents the available ListWikis options. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#list-wiki-pages -type ListWikisOptions struct { - WithContent *bool `url:"with_content,omitempty" json:"with_content,omitempty"` -} - -// ListWikis lists all pages of the wiki of the given project id. -// When with_content is set, it also returns the content of the pages. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#list-wiki-pages -func (s *WikisService) ListWikis(pid interface{}, opt *ListWikisOptions, options ...RequestOptionFunc) ([]*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - var ws []*Wiki - resp, err := s.client.Do(req, &ws) - if err != nil { - return nil, resp, err - } - - return ws, resp, nil -} - -// GetWikiPageOptions represents options to GetWikiPage -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#get-a-wiki-page -type GetWikiPageOptions struct { - RenderHTML *bool `url:"render_html,omitempty" json:"render_html,omitempty"` - Version *string `url:"version,omitempty" json:"version,omitempty"` -} - -// GetWikiPage gets a wiki page for a given project. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#get-a-wiki-page -func (s *WikisService) GetWikiPage(pid interface{}, slug string, opt *GetWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodGet, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(Wiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil -} - -// CreateWikiPageOptions represents options to CreateWikiPage. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#create-a-new-wiki-page -type CreateWikiPageOptions struct { - Content *string `url:"content,omitempty" json:"content,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` -} - -// CreateWikiPage creates a new wiki page for the given repository with -// the given title, slug, and content. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#create-a-new-wiki-page -func (s *WikisService) CreateWikiPage(pid interface{}, opt *CreateWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis", PathEscape(project)) - - req, err := s.client.NewRequest(http.MethodPost, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(Wiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil -} - -// EditWikiPageOptions represents options to EditWikiPage. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#edit-an-existing-wiki-page -type EditWikiPageOptions struct { - Content *string `url:"content,omitempty" json:"content,omitempty"` - Title *string `url:"title,omitempty" json:"title,omitempty"` - Format *WikiFormatValue `url:"format,omitempty" json:"format,omitempty"` -} - -// EditWikiPage Updates an existing wiki page. At least one parameter is -// required to update the wiki page. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#edit-an-existing-wiki-page -func (s *WikisService) EditWikiPage(pid interface{}, slug string, opt *EditWikiPageOptions, options ...RequestOptionFunc) (*Wiki, *Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, nil, err - } - u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodPut, u, opt, options) - if err != nil { - return nil, nil, err - } - - w := new(Wiki) - resp, err := s.client.Do(req, w) - if err != nil { - return nil, resp, err - } - - return w, resp, nil -} - -// DeleteWikiPage deletes a wiki page with a given slug. -// -// GitLab API docs: -// https://docs.gitlab.com/ee/api/wikis.html#delete-a-wiki-page -func (s *WikisService) DeleteWikiPage(pid interface{}, slug string, options ...RequestOptionFunc) (*Response, error) { - project, err := parseID(pid) - if err != nil { - return nil, err - } - u := fmt.Sprintf("projects/%s/wikis/%s", PathEscape(project), url.PathEscape(slug)) - - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) - if err != nil { - return nil, err - } - - return s.client.Do(req, nil) -} diff --git a/vendor/github.com/zeebo/errs/AUTHORS b/vendor/github.com/zeebo/errs/AUTHORS index 71f6192426..6246e7403d 100644 --- a/vendor/github.com/zeebo/errs/AUTHORS +++ b/vendor/github.com/zeebo/errs/AUTHORS @@ -1,9 +1,5 @@ Egon Elbre Jeff Wendling JT Olio -<<<<<<< HEAD Kaloyan Raev paul cannon -======= -Kaloyan Raev ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/github.com/zeebo/errs/README.md b/vendor/github.com/zeebo/errs/README.md index e06407e7ea..0f72bf7b01 100644 --- a/vendor/github.com/zeebo/errs/README.md +++ b/vendor/github.com/zeebo/errs/README.md @@ -152,11 +152,7 @@ func checkForNeatThings() { } ``` -<<<<<<< HEAD It knows about both the `Unwrap() error` and `Unwrap() []error` methods that are -======= -It knows about both the `Cause() error` and `Unwrap() error` methods that are ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) often used in the community, and will call them as many times as possible. ### Defer diff --git a/vendor/github.com/zeebo/errs/errs.go b/vendor/github.com/zeebo/errs/errs.go index 6745417450..9a42e3da87 100644 --- a/vendor/github.com/zeebo/errs/errs.go +++ b/vendor/github.com/zeebo/errs/errs.go @@ -14,24 +14,11 @@ type Namer interface{ Name() (string, bool) } // Causer is implemented by all errors returned in this package. It returns // the underlying cause of the error, or nil if there is no underlying cause. -<<<<<<< HEAD // // Deprecated: check for the 'Unwrap()' interface from the stdlib errors package // instead. type Causer interface{ Cause() error } -======= -type Causer interface{ Cause() error } - -// unwrapper is implemented by all errors returned in this package. It returns -// the underlying cause of the error, or nil if there is no underlying error. -type unwrapper interface{ Unwrap() error } - -// ungrouper is implemented by combinedError returned in this package. It -// returns all underlying errors, or nil if there is no underlying error. -type ungrouper interface{ Ungroup() []error } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // New returns an error not contained in any class. This is the same as calling // fmt.Errorf(...) except it captures a stack trace on creation. func New(format string, args ...interface{}) error { @@ -53,7 +40,6 @@ func WrapP(err *error) { } } -<<<<<<< HEAD // Often, we call Unwrap as much as possible. Since comparing arbitrary // interfaces with equality isn't panic safe, we only loop up to 100 // times to ensure that a poor implementation that causes a cycle does @@ -65,24 +51,12 @@ const maxUnwrap = 100 // Deprecated: Prefer errors.Is() and errors.As(). func Unwrap(err error) error { for i := 0; err != nil && i < maxUnwrap; i++ { -======= -// Often, we call Cause as much as possible. Since comparing arbitrary -// interfaces with equality isn't panic safe, we only loop up to 100 -// times to ensure that a poor implementation that causes a cycle does -// not run forever. -const maxCause = 100 - -// Unwrap returns the underlying error, if any, or just the error. -func Unwrap(err error) error { - for i := 0; err != nil && i < maxCause; i++ { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var nerr error switch e := err.(type) { case Causer: nerr = e.Cause() -<<<<<<< HEAD case interface{ Unwrap() error }: nerr = e.Unwrap() @@ -98,10 +72,6 @@ func Unwrap(err error) error { if len(errs) > 0 { nerr = errs[0] } -======= - case unwrapper: - nerr = e.Unwrap() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if nerr == nil { @@ -115,7 +85,6 @@ func Unwrap(err error) error { // Classes returns all the classes that have wrapped the error. func Classes(err error) (classes []*Class) { -<<<<<<< HEAD IsFunc(err, func(err error) bool { if e, ok := err.(*errorT); ok { classes = append(classes, e.class) @@ -123,45 +92,10 @@ func Classes(err error) (classes []*Class) { return false }) return classes -======= - causes := 0 - for { - switch e := err.(type) { - case *errorT: - if e.class != nil { - classes = append(classes, e.class) - } - err = e.err - continue - - case Causer: - err = e.Cause() - - case unwrapper: - err = e.Unwrap() - - default: - return classes - } - - if causes >= maxCause { - return classes - } - causes++ - } -} - -// Is checks if any of the underlying errors matches target -func Is(err, target error) bool { - return IsFunc(err, func(err error) bool { - return err == target - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // IsFunc checks if any of the underlying errors matches the func func IsFunc(err error, is func(err error) bool) bool { -<<<<<<< HEAD for { if is(err) { return true @@ -192,47 +126,6 @@ func IsFunc(err error, is func(err error) bool) bool { return false } } -======= - causes := 0 - errs := []error{err} - - for len(errs) > 0 { - var next []error - for _, err := range errs { - if is(err) { - return true - } - - switch e := err.(type) { - case ungrouper: - ungrouped := e.Ungroup() - for _, unerr := range ungrouped { - if unerr != nil { - next = append(next, unerr) - } - } - case Causer: - cause := e.Cause() - if cause != nil { - next = append(next, cause) - } - case unwrapper: - unwrapped := e.Unwrap() - if unwrapped != nil { - next = append(next, unwrapped) - } - } - - if causes >= maxCause { - return false - } - causes++ - } - errs = next - } - - return false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // @@ -243,12 +136,8 @@ func IsFunc(err error, is func(err error) bool) bool { // errors are part of the class. type Class string -<<<<<<< HEAD // Has returns true if the passed in error (or any error wrapped by it) has // this class. -======= -// Has returns true if the passed in error was wrapped by this class. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Class) Has(err error) bool { return IsFunc(err, func(err error) bool { errt, ok := err.(*errorT) @@ -276,7 +165,6 @@ func (c *Class) WrapP(err *error) { } } -<<<<<<< HEAD // Instance creates a class membership object which implements the error // interface and allows errors.Is() to check whether given errors are // (or contain) an instance of this class. @@ -293,8 +181,6 @@ func (c *Class) Instance() error { return (*classMembershipChecker)(c) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // create constructs the error, or just adds the class to the error, keeping // track of the stack if it needs to construct it. func (c *Class) create(depth int, err error) error { @@ -325,15 +211,12 @@ func (c *Class) create(depth int, err error) error { return errt } -<<<<<<< HEAD type classMembershipChecker Class func (cmc *classMembershipChecker) Error() string { panic("classMembershipChecker used as concrete error! don't do that") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // errors // @@ -375,24 +258,13 @@ func (e *errorT) Format(f fmt.State, c rune) { } } -<<<<<<< HEAD // Cause implements the interface wrapping errors were previously // expected to implement to allow getting at underlying causes. -======= -// Cause implements the interface wrapping errors are expected to implement -// to allow getting at underlying causes. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *errorT) Cause() error { return e.err } -<<<<<<< HEAD // Unwrap returns the immediate underlying error. -======= -// Unwrap implements the draft design for error inspection. Since this is -// on an unexported type, it should not be hard to maintain going forward -// given that it also is the exact same semantics as Cause. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (e *errorT) Unwrap() error { return e.err } @@ -405,7 +277,6 @@ func (e *errorT) Name() (string, bool) { return string(*e.class), true } -<<<<<<< HEAD // Is determines whether an error is an instance of the given error class. // // Use with (*Class).Instance(). @@ -414,8 +285,6 @@ func (e *errorT) Is(err error) bool { return ok && e.class == (*Class)(cmc) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // summarizeStack writes stack line entries to the writer. func summarizeStack(w io.Writer, pcs []uintptr) { frames := runtime.CallersFrames(pcs) diff --git a/vendor/github.com/zeebo/errs/group.go b/vendor/github.com/zeebo/errs/group.go index 2a37999097..22b824aaf8 100644 --- a/vendor/github.com/zeebo/errs/group.go +++ b/vendor/github.com/zeebo/errs/group.go @@ -56,28 +56,8 @@ func (group Group) sanitize() Group { // combinedError is a list of non-empty errors type combinedError []error -<<<<<<< HEAD // Unwrap returns the first error. func (group combinedError) Unwrap() []error { return group } -======= -// Cause returns the first error. -func (group combinedError) Cause() error { - if len(group) > 0 { - return group[0] - } - return nil -} - -// Unwrap returns the first error. -func (group combinedError) Unwrap() error { - return group.Cause() -} - -// Ungroup returns all errors. -func (group combinedError) Ungroup() []error { - return group -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Error returns error string delimited by semicolons. func (group combinedError) Error() string { return fmt.Sprintf("%v", group) } diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go index 268cc51e68..2a38c01957 100644 --- a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go +++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go @@ -5,11 +5,7 @@ package gcp // import "go.opentelemetry.io/contrib/detectors/gcp" // Version is the current release version of the GCP resource detector. func Version() string { -<<<<<<< HEAD return "1.32.0" -======= - return "1.29.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index c143fdb58a..9e87fb4bb1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -51,19 +51,11 @@ type config struct { tracer trace.Tracer meter metric.Meter -<<<<<<< HEAD rpcDuration metric.Float64Histogram rpcInBytes metric.Int64Histogram rpcOutBytes metric.Int64Histogram rpcInMessages metric.Int64Histogram rpcOutMessages metric.Int64Histogram -======= - rpcDuration metric.Float64Histogram - rpcRequestSize metric.Int64Histogram - rpcResponseSize metric.Int64Histogram - rpcRequestsPerRPC metric.Int64Histogram - rpcResponsesPerRPC metric.Int64Histogram ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Option applies an option value for a config. @@ -104,73 +96,41 @@ func newConfig(opts []Option, role string) *config { } } -<<<<<<< HEAD rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", -======= - c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) -<<<<<<< HEAD if rpcRequestSize == nil { rpcRequestSize = noop.Int64Histogram{} } } rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", -======= - if c.rpcRequestSize == nil { - c.rpcRequestSize = noop.Int64Histogram{} - } - } - - c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) -<<<<<<< HEAD if rpcResponseSize == nil { rpcResponseSize = noop.Int64Histogram{} } } rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", -======= - if c.rpcResponseSize == nil { - c.rpcResponseSize = noop.Int64Histogram{} - } - } - - c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) -<<<<<<< HEAD if rpcRequestsPerRPC == nil { rpcRequestsPerRPC = noop.Int64Histogram{} } } rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", -======= - if c.rpcRequestsPerRPC == nil { - c.rpcRequestsPerRPC = noop.Int64Histogram{} - } - } - - c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) -<<<<<<< HEAD if rpcResponsesPerRPC == nil { rpcResponsesPerRPC = noop.Int64Histogram{} } @@ -194,13 +154,6 @@ func newConfig(opts []Option, role string) *config { c.rpcOutMessages = noop.Int64Histogram{} } -======= - if c.rpcResponsesPerRPC == nil { - c.rpcResponsesPerRPC = noop.Int64Histogram{} - } - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return c } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 4739f72f1e..c01cb897cd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,36 +13,22 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" -<<<<<<< HEAD -======= - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" -<<<<<<< HEAD "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) type gRPCContextKey struct{} type gRPCContext struct { -<<<<<<< HEAD inMessages int64 outMessages int64 metricAttrs []attribute.KeyValue record bool -======= - messagesReceived int64 - messagesSent int64 - metricAttrs []attribute.KeyValue - record bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type serverHandler struct { @@ -165,13 +151,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { -<<<<<<< HEAD messageId = atomic.AddInt64(&gctx.inMessages, 1) c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) -======= - messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if c.ReceivedEvent { @@ -186,13 +167,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { -<<<<<<< HEAD messageId = atomic.AddInt64(&gctx.outMessages, 1) c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) -======= - messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if c.SentEvent { @@ -238,13 +214,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { -<<<<<<< HEAD c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) -======= - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 893dee9a6d..55219cb6cc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,11 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { -<<<<<<< HEAD return "0.57.0" -======= - return "0.54.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 8235af4737..b25641c55d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,11 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { -<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) -======= - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -31,11 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { -<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) -======= - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -44,11 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { -<<<<<<< HEAD req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) -======= - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index cd91491827..a83a026274 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,16 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -<<<<<<< HEAD -======= -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 3003f4b739..3ea05d0199 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,10 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" -<<<<<<< HEAD "go.opentelemetry.io/otel/attribute" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -25,7 +22,6 @@ type middleware struct { operation string server string -<<<<<<< HEAD tracer trace.Tracer propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption @@ -36,17 +32,6 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool metricAttributesFn func(*http.Request) []attribute.KeyValue -======= - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) semconv semconv.HTTPServer } @@ -96,16 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) -<<<<<<< HEAD h.metricAttributesFn = c.MetricAttributesFn -======= -} - -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -144,14 +120,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } -<<<<<<< HEAD if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { opts = append(opts, trace.WithTimestamp(startTime)) requestStartTime = startTime } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -219,7 +192,6 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) -<<<<<<< HEAD metricAttributes := semconv.MetricAttributes{ Req: r, StatusCode: statusCode, @@ -245,19 +217,6 @@ func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.Ke return attributeForRequest } -======= - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, - }) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index 2977a8b096..fbc344cbdd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -44,13 +44,9 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) { w.mu.Lock() defer w.mu.Unlock() -<<<<<<< HEAD if !w.wroteHeader { w.writeHeader(http.StatusOK) } -======= - w.writeHeader(http.StatusOK) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n, err := w.ResponseWriter.Write(p) n1 := int64(n) @@ -86,16 +82,12 @@ func (w *RespWriterWrapper) writeHeader(statusCode int) { // Flush implements [http.Flusher]. func (w *RespWriterWrapper) Flush() { -<<<<<<< HEAD w.mu.Lock() defer w.mu.Unlock() if !w.wroteHeader { w.writeHeader(http.StatusOK) } -======= - w.WriteHeader(http.StatusOK) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if f, ok := w.ResponseWriter.(http.Flusher); ok { f.Flush() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index a9a4a554eb..eaf4c37967 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,9 +1,6 @@ -<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/env.go.tmpl -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -15,23 +12,17 @@ import ( "net/http" "os" "strings" -<<<<<<< HEAD "sync" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" ) -<<<<<<< HEAD // OTelSemConvStabilityOptIn is an environment variable. // That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -47,14 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram -<<<<<<< HEAD // New metrics requestBodySizeHistogram metric.Int64Histogram responseBodySizeHistogram metric.Int64Histogram requestDurationHistogram metric.Float64Histogram -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -75,15 +63,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { -<<<<<<< HEAD return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } return OldHTTPServer{}.RequestTraceAttrs(server, req) -======= - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) - } - return oldHTTPServer{}.RequestTraceAttrs(server, req) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -91,24 +73,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { -<<<<<<< HEAD return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } return OldHTTPServer{}.ResponseTraceAttrs(resp) -======= - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) - } - return oldHTTPServer{}.ResponseTraceAttrs(resp) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { -<<<<<<< HEAD return OldHTTPServer{}.Route(route) -======= - return oldHTTPServer{}.Route(route) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Status returns a span status code and message for an HTTP status code @@ -124,7 +96,6 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -<<<<<<< HEAD type ServerMetricData struct { ServerName string ResponseSize int64 @@ -186,55 +157,19 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { func NewHTTPServer(meter metric.Meter) HTTPServer { env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) -======= -type MetricData struct { - ServerName string - Req *http.Request - StatusCode int - AdditionalAttributes []attribute.KeyValue - - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 -} - -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. - return - } - - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) - - // TODO: Duplicate Metrics -} - -func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } -<<<<<<< HEAD server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) if duplicate { server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) } -======= - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return server } type HTTPClient struct { duplicate bool -<<<<<<< HEAD // old metrics requestBytesCounter metric.Int64Counter @@ -258,43 +193,23 @@ func NewHTTPClient(meter metric.Meter) HTTPClient { } return client -======= -} - -func NewHTTPClient() HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { -<<<<<<< HEAD return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } return OldHTTPClient{}.RequestTraceAttrs(req) -======= - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) - } - return oldHTTPClient{}.RequestTraceAttrs(req) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { -<<<<<<< HEAD return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } return OldHTTPClient{}.ResponseTraceAttrs(resp) -======= - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) - } - - return oldHTTPClient{}.ResponseTraceAttrs(resp) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -309,16 +224,11 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { -<<<<<<< HEAD return CurrentHTTPClient{}.ErrorType(err) -======= - return newHTTPClient{}.ErrorType(err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return attribute.KeyValue{} } -<<<<<<< HEAD type MetricOpts struct { measurement metric.MeasurementOption @@ -378,5 +288,3 @@ func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 8c483195c6..8c3c627513 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,9 +1,6 @@ -<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/httpconv.go.tmpl -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -13,27 +10,17 @@ import ( "fmt" "net/http" "reflect" -<<<<<<< HEAD "slices" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "strings" "go.opentelemetry.io/otel/attribute" -<<<<<<< HEAD "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) type CurrentHTTPServer struct{} -======= - semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" -) - -type newHTTPServer struct{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -51,31 +38,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -<<<<<<< HEAD func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { -======= -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { -<<<<<<< HEAD host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = SplitHostPort(server) if p < 0 { _, p = SplitHostPort(req.Host) -======= - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -91,11 +65,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) -<<<<<<< HEAD if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { -======= - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -140,11 +110,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } -<<<<<<< HEAD if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { -======= - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -175,11 +141,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -<<<<<<< HEAD func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { -======= -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -194,11 +156,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -<<<<<<< HEAD func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive -======= -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if https { return semconvNew.URLScheme("https") } @@ -208,11 +166,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -<<<<<<< HEAD func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { -======= -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var count int if resp.ReadBytes > 0 { @@ -247,7 +201,6 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -<<<<<<< HEAD func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } @@ -336,16 +289,6 @@ type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { -======= -func (n newHTTPServer) Route(route string) attribute.KeyValue { - return semconvNew.HTTPRoute(route) -} - -type newHTTPClient struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) /* below attributes are returned: - http.request.method @@ -365,11 +308,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { -<<<<<<< HEAD requestHost, requestPort = SplitHostPort(hostport) -======= - requestHost, requestPort = splitHostPort(hostport) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if requestHost != "" || requestPort > 0 { break } @@ -431,11 +370,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -<<<<<<< HEAD func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { -======= -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) /* below attributes are returned: - http.response.status_code @@ -462,11 +397,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -<<<<<<< HEAD func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { -======= -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -483,11 +414,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -<<<<<<< HEAD func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { -======= -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -502,7 +429,6 @@ func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -<<<<<<< HEAD func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { if meter == nil { return noop.Int64Histogram{}, noop.Float64Histogram{} @@ -588,8 +514,6 @@ func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:rev return semconvNew.URLScheme("http") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index d78ec2700d..558efd0594 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,9 +1,6 @@ -<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/util.go.tmpl -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -20,22 +17,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -<<<<<<< HEAD // SplitHostPort splits a network address hostport of the form "host", -======= -// splitHostPort splits a network address hostport of the form "host", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -<<<<<<< HEAD func SplitHostPort(hostport string) (host string, port int) { -======= -func splitHostPort(hostport string) (host string, port int) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) port = -1 if strings.HasPrefix(hostport, "[") { @@ -110,7 +99,6 @@ func handleErr(err error) { otel.Handle(err) } } -<<<<<<< HEAD func standardizeHTTPMethod(method string) string { method = strings.ToUpper(method) @@ -121,5 +109,3 @@ func standardizeHTTPMethod(method string) string { } return method } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 02ebb68bf3..57d1507b62 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,9 +1,6 @@ -<<<<<<< HEAD // Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/v120.0.go.tmpl -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -14,10 +11,6 @@ import ( "io" "net/http" "slices" -<<<<<<< HEAD -======= - "strings" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -26,11 +19,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -<<<<<<< HEAD type OldHTTPServer struct{} -======= -type oldHTTPServer struct{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -48,22 +37,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -<<<<<<< HEAD func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { -======= -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -<<<<<<< HEAD func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { -======= -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -88,11 +69,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -<<<<<<< HEAD func (o OldHTTPServer) Route(route string) attribute.KeyValue { -======= -func (o oldHTTPServer) Route(route string) attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return semconv.HTTPRoute(route) } @@ -109,11 +86,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -<<<<<<< HEAD func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { -======= -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -142,30 +115,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -<<<<<<< HEAD func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { -======= -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) n := len(additionalAttributes) + 3 var host string var p int if server == "" { -<<<<<<< HEAD host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = SplitHostPort(server) if p < 0 { _, p = SplitHostPort(req.Host) -======= - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -186,11 +146,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, -<<<<<<< HEAD semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), -======= - o.methodMetric(req.Method), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -210,28 +166,13 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -<<<<<<< HEAD func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive -======= -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -<<<<<<< HEAD type OldHTTPClient struct{} func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { @@ -323,14 +264,3 @@ func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } -======= -type oldHTTPClient struct{} - -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req) -} - -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp) -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 32ed56c7e4..44b86ad860 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,17 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" -<<<<<<< HEAD "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" -======= - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -35,10 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer -<<<<<<< HEAD -======= - meter metric.Meter ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -46,14 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue -<<<<<<< HEAD semconv semconv.HTTPClient -======= - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var _ http.RoundTripper = &Transport{} @@ -70,12 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ -<<<<<<< HEAD rt: base, -======= - rt: base, - semconv: semconv.NewHTTPClient(), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } defaultOpts := []Option{ @@ -85,59 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) -<<<<<<< HEAD -======= - t.createMeasures() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer -<<<<<<< HEAD -======= - t.meter = c.Meter ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace -<<<<<<< HEAD t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -======= - t.metricAttributesFn = c.MetricAttributesFn -} - -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -207,7 +145,6 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics -<<<<<<< HEAD metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ Req: r, StatusCode: res.StatusCode, @@ -217,18 +154,6 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { t.semconv.RecordResponseSize(ctx, n, metricOpts) -======= - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) - - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) - // For handling response bytes we leverage a callback when the client reads the http response - readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // traces @@ -240,18 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) -<<<<<<< HEAD t.semconv.RecordMetrics(ctx, semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, }, metricOpts) return res, nil -======= - t.latencyMeasure.Record(ctx, elapsedTime, o) - - return res, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index a04cc9938a..386f09e1b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,11 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { -<<<<<<< HEAD return "0.59.0" -======= - return "0.54.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index aa50a57d76..ae8577ef36 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,14 +12,3 @@ go.work go.work.sum gen/ -<<<<<<< HEAD -======= - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 02105abf6d..ce3f40b609 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,25 +22,16 @@ linters: - govet - ineffassign - misspell -<<<<<<< HEAD - perfsprint - revive - staticcheck - tenv - testifylint -======= - - revive - - staticcheck - - tenv ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - typecheck - unconvert - unused - unparam -<<<<<<< HEAD - usestdlibvars -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) issues: # Maximum issues count per one linter. @@ -72,29 +63,17 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive -<<<<<<< HEAD # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) -======= - # It's okay to not run gosec in a test. - - path: _test\.go - linters: - - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec -<<<<<<< HEAD # Ignoring gosec G402: TLS MinVersion too low -======= - # Igonoring gosec G402: TLS MinVersion too low ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -119,7 +98,6 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" -<<<<<<< HEAD auto/sdk: files: - "!internal/global/trace.go" @@ -127,8 +105,6 @@ linters-settings: deny: - pkg: "go.opentelemetry.io/auto/sdk" desc: Do not use SDK from automatic instrumentation. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -161,11 +137,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" -<<<<<<< HEAD -======= - - "**/example/*.go" - - "**/example/**/*.go" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -193,15 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled -<<<<<<< HEAD perfsprint: err-error: true errorf: true int-conversion: true sprintf1: true strconcat: true -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. @@ -349,12 +317,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false -<<<<<<< HEAD testifylint: enable-all: true disable: - float-compare - go-require - require-error -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index f4655720f3..599d59cd13 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,7 +11,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm -<<<<<<< HEAD ## [1.34.0/0.56.0/0.10.0] 2025-01-17 ### Changed @@ -147,8 +146,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Drop support for [Go 1.21]. (#5736, #5740, #5800) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## [1.29.0/0.51.0/0.5.0] 2024-08-23 This release is the last to support [Go 1.21]. @@ -2033,11 +2030,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. -<<<<<<< HEAD Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. -======= - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2611,11 +2604,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -<<<<<<< HEAD - Update otel-collector example to use the v0.5.0 collector. (#915) -======= -- Update otel-colector example to use the v0.5.0 collector. (#915) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3208,16 +3197,12 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -<<<<<<< HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 -======= -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 2213a70537..945a07d2b0 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,12 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -<<<<<<< HEAD * @MrAlias @XSAM @dashpole @pellared @dmathieu CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu -======= -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu - -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index f31cdb0438..22a2e9dbd4 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,14 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -<<<<<<< HEAD absence of race conditions. The top-level tests with this term will be run many times in the `test-concurrent-safe` CI job to increase the chance of catching concurrency issues. This does not apply to subtests when this term is not in their root name. -======= -absence of race conditions. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Internal packages @@ -633,7 +629,6 @@ should be canceled. ## Approvers and Maintainers -<<<<<<< HEAD ### Triagers - [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent @@ -642,15 +637,6 @@ should be canceled. ### Maintainers -======= -### Approvers - -- [Chester Cheung](https://github.com/hanyuancheung), Tencent - -### Maintainers - -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -659,7 +645,6 @@ should be canceled. ### Emeritus -<<<<<<< HEAD - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) - [Chester Cheung](https://github.com/hanyuancheung) @@ -667,13 +652,6 @@ should be canceled. - [Gustavo Silva Paiva](https://github.com/paivagustavo) - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) -======= -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Evan Torrie](https://github.com/evantorrie), Yahoo ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 6ea0dff898..a7f6d8cc68 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,13 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -<<<<<<< HEAD precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage -======= -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Tools @@ -59,12 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -<<<<<<< HEAD -======= -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -75,11 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -<<<<<<< HEAD tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) -======= -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # Virtualized python tools via docker @@ -157,21 +142,14 @@ build-tests/%: # Tests -<<<<<<< HEAD TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe -======= -TEST_TARGETS := test-default test-bench test-short test-verbose test-race ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race -<<<<<<< HEAD test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -257,7 +235,6 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) -<<<<<<< HEAD .PHONY: toolchain-check toolchain-check: @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ @@ -268,8 +245,6 @@ toolchain-check: exit 1; \ fi -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -295,11 +270,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) -<<<<<<< HEAD $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" -======= - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index c43dba0a7d..d9a1920762 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,10 +1,6 @@ # OpenTelemetry-Go -<<<<<<< HEAD [![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) -======= -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) @@ -55,7 +51,6 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.23 | amd64 | | Ubuntu | 1.22 | amd64 | -<<<<<<< HEAD | Ubuntu | 1.23 | 386 | | Ubuntu | 1.22 | 386 | | Linux | 1.23 | arm64 | @@ -68,27 +63,6 @@ Currently, this project supports the following environments. | Windows | 1.22 | amd64 | | Windows | 1.23 | 386 | | Windows | 1.22 | 386 | -======= -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.23 | 386 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.23 | arm64 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| macOS 13 | 1.23 | amd64 | -| macOS 13 | 1.22 | amd64 | -| macOS 13 | 1.21 | amd64 | -| macOS | 1.23 | arm64 | -| macOS | 1.22 | arm64 | -| macOS | 1.21 | arm64 | -| Windows | 1.23 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.23 | 386 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -115,13 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -<<<<<<< HEAD package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) are a good way to see some practical uses of this process. -======= -package. The included [examples](./example/) are a good way to see some -practical uses of this process. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 9896ee3ebd..4ebef4f9dd 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -111,20 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -<<<<<<< HEAD -======= -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## Post-Release ### Contrib Repository @@ -144,12 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -<<<<<<< HEAD - [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) - [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) - [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) -======= -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 1db051e44a..b8cb605c16 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,11 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get -<<<<<<< HEAD go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a -======= - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index e09eefb5d6..6cbefceadf 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,7 +347,6 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: -<<<<<<< HEAD return [1]KeyValue(kvs) case 2: return [2]KeyValue(kvs) @@ -367,47 +366,6 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { return [9]KeyValue(kvs) case 10: return [10]KeyValue(kvs) -======= - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 18e32ed9d9..0e1fe24220 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -50,11 +50,7 @@ type Property struct { // component boundaries may impose their own restrictions on Property key. // For example, the W3C Baggage specification restricts the Property keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. -<<<<<<< HEAD // For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. -======= -// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewKeyProperty(key string) (Property, error) { if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -94,11 +90,7 @@ func NewKeyValueProperty(key, value string) (Property, error) { // component boundaries may impose their own restrictions on Property key. // For example, the W3C Baggage specification restricts the Property keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. -<<<<<<< HEAD // For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. -======= -// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewKeyValuePropertyRaw(key, value string) (Property, error) { if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -295,11 +287,7 @@ func NewMember(key, value string, props ...Property) (Member, error) { // component boundaries may impose their own restrictions on baggage key. // For example, the W3C Baggage specification restricts the baggage keys to strings that // satisfy the token definition from RFC7230, Section 3.2.6. -<<<<<<< HEAD // For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. -======= -// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -367,11 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -<<<<<<< HEAD func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { -======= -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -379,11 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder -<<<<<<< HEAD b.Grow(c) -======= - b.Grow(cap) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 1c8c5bfaf0..49a35b1225 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,10 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "strconv" ) @@ -67,11 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { -<<<<<<< HEAD return errors.New("nil receiver passed to UnmarshalJSON") -======= - return fmt.Errorf("nil receiver passed to UnmarshalJSON") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index b8e759305a..691d96c755 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,20 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } -<<<<<<< HEAD cpy := make([]bool, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy -======= - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -71,20 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } -<<<<<<< HEAD cpy := make([]int64, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy -======= - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -93,20 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } -<<<<<<< HEAD cpy := make([]float64, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy -======= - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -115,18 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } -<<<<<<< HEAD cpy := make([]string, rv.Len()) if len(cpy) > 0 { _ = reflect.Copy(reflect.ValueOf(cpy), rv) } return cpy -======= - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index a8eccf3a72..ae92a42516 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,11 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { -<<<<<<< HEAD unwrap() metric.Observable -======= - Unwrap() metric.Observable ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type afCounter struct { @@ -44,11 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -<<<<<<< HEAD func (i *afCounter) unwrap() metric.Observable { -======= -func (i *afCounter) Unwrap() metric.Observable { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -79,11 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -<<<<<<< HEAD func (i *afUpDownCounter) unwrap() metric.Observable { -======= -func (i *afUpDownCounter) Unwrap() metric.Observable { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -114,11 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -<<<<<<< HEAD func (i *afGauge) unwrap() metric.Observable { -======= -func (i *afGauge) Unwrap() metric.Observable { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -149,11 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -<<<<<<< HEAD func (i *aiCounter) unwrap() metric.Observable { -======= -func (i *aiCounter) Unwrap() metric.Observable { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -184,11 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -<<<<<<< HEAD func (i *aiUpDownCounter) unwrap() metric.Observable { -======= -func (i *aiUpDownCounter) Unwrap() metric.Observable { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -219,11 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -<<<<<<< HEAD func (i *aiGauge) unwrap() metric.Observable { -======= -func (i *aiGauge) Unwrap() metric.Observable { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index d4f63d9389..a6acd8dca6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,14 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" -<<<<<<< HEAD "context" "reflect" "sync" -======= - "sync" - "sync/atomic" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -72,10 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), -<<<<<<< HEAD attrs: c.InstrumentationAttributes(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if p.meters == nil { @@ -86,11 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } -<<<<<<< HEAD t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} -======= - t := &meter{name: name, opts: opts} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p.meters[key] = t return t } @@ -106,26 +94,17 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex -<<<<<<< HEAD instruments map[instID]delegatedInstrument registry list.List delegate metric.Meter -======= - instruments []delegatedInstrument - - registry list.List - - delegate atomic.Value // metric.Meter ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type delegatedInstrument interface { setDelegate(metric.Meter) } -<<<<<<< HEAD // instID are the identifying properties of a instrument. type instID struct { // name is the name of the stream. @@ -138,8 +117,6 @@ type instID struct { unit string } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -147,21 +124,12 @@ type instID struct { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() meter := provider.Meter(m.name, m.opts...) m.delegate = meter -======= - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - - m.mtx.Lock() - defer m.mtx.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -179,7 +147,6 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -199,20 +166,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) } i := &siCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -232,20 +189,10 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou } i := &siUpDownCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -265,20 +212,10 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti } i := &siHistogram{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -298,20 +235,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met } i := &siGauge{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -331,20 +258,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser } i := &aiCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -364,20 +281,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 } i := &aiUpDownCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -397,20 +304,10 @@ func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64Observa } i := &aiGauge{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -430,20 +327,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti } i := &sfCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -463,20 +350,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow } i := &sfUpDownCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -496,20 +373,10 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram } i := &sfHistogram{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -529,20 +396,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) } i := &sfGauge{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -562,20 +419,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O } i := &afCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -595,20 +442,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl } i := &afUpDownCounter{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -628,21 +465,11 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs } i := &afGauge{name: name, opts: options} m.instruments[id] = i -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } - m.mtx.Lock() - defer m.mtx.Unlock() - i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { -<<<<<<< HEAD m.mtx.Lock() defer m.mtx.Unlock() @@ -650,16 +477,6 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } -======= - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - - m.mtx.Lock() - defer m.mtx.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -671,22 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -<<<<<<< HEAD -======= -type wrapped interface { - unwrap() metric.Observable -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { -<<<<<<< HEAD if in, ok := inst.(unwrapper); ok { -======= - if in, ok := inst.(wrapped); ok { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -706,7 +512,6 @@ type registration struct { unregMu sync.Mutex } -<<<<<<< HEAD type unwrapObs struct { embedded.Observer obs metric.Observer @@ -762,11 +567,6 @@ func unwrapCallback(f metric.Callback) metric.Callback { } func (c *registration) setDelegate(m metric.Meter) { -======= -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.unregMu.Lock() defer c.unregMu.Unlock() @@ -775,16 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } -<<<<<<< HEAD reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return -======= - reg, err := m.RegisterCallback(c.function, insts...) - if err != nil { - GetErrorHandler().Handle(err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 2b0194ebab..8982aa0dc5 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,10 +25,7 @@ import ( "sync" "sync/atomic" -<<<<<<< HEAD "go.opentelemetry.io/auto/sdk" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -91,10 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), -<<<<<<< HEAD attrs: c.InstrumentationAttributes(), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if p.tracers == nil { @@ -110,16 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -<<<<<<< HEAD type il struct { name string version string schema string attrs attribute.Set } -======= -type il struct{ name, version, schema string } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // tracer is a placeholder for a trace.Tracer. // @@ -156,7 +146,6 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } -<<<<<<< HEAD return t.newSpan(ctx, autoInstEnabled, name, opts) } @@ -181,8 +170,6 @@ func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts return tracer.Start(ctx, name, opts...) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index b543d512a0..b2fe3e41d3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,12 +20,8 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { -<<<<<<< HEAD // Assumes original was a valid int64 (overflow not checked). return uint64(i) // nolint: gosec -======= - return uint64(i) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func RawToInt64(r uint64) int64 { diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index a965f5d1ed..f8435d8f28 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,11 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -<<<<<<< HEAD // observations for a Float64Observable instrument it is registered with. -======= -// observations for a Float64Observerable instrument it is registered with. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index a3778661a9..e079aaef16 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,11 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -<<<<<<< HEAD // for an Int64Observable instrument it is registered with. Calls to the -======= -// for an Int64Observerable instrument it is registered with. Calls to the ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index 3e1a2ad63a..a535782e1d 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,11 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -<<<<<<< HEAD // WithAttributeSet(attribute.NewSet(cp...)) -======= -// WithAttributes(attribute.NewSet(cp...)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index ab0e0aa0e5..4f80c898a1 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -15,19 +15,12 @@ "enabled": true }, { -<<<<<<< HEAD "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" }, { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" -======= - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index f852f747b4..34852a47b2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,11 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" -<<<<<<< HEAD import "go.opentelemetry.io/otel/attribute" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -17,9 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string -<<<<<<< HEAD // Attributes of the telemetry emitted by the scope. Attributes attribute.Set -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 2e7d9fc38a..203cd9d650 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -5,7 +5,6 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" -<<<<<<< HEAD "errors" "os" "strings" @@ -13,26 +12,15 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/sdk/metric/exemplar" -======= - "fmt" - "sync" - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/resource" ) // config contains configuration options for a MeterProvider. type config struct { -<<<<<<< HEAD res *resource.Resource readers []Reader views []View exemplarFilter exemplar.Filter -======= - res *resource.Resource - readers []Reader - views []View ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // readerSignals returns a force-flush and shutdown function for a @@ -56,7 +44,6 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro // value. func unify(funcs []func(context.Context) error) func(context.Context) error { return func(ctx context.Context) error { -<<<<<<< HEAD var err error for _, f := range funcs { if e := f(ctx); e != nil { @@ -64,27 +51,6 @@ func unify(funcs []func(context.Context) error) func(context.Context) error { } } return err -======= - var errs []error - for _, f := range funcs { - if err := f(ctx); err != nil { - errs = append(errs, err) - } - } - return unifyErrors(errs) - } -} - -// unifyErrors combines multiple errors into a single error. -func unifyErrors(errs []error) error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return fmt.Errorf("%v", errs) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -102,7 +68,6 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { -<<<<<<< HEAD conf := config{ res: resource.Default(), exemplarFilter: exemplar.TraceBasedFilter, @@ -110,9 +75,6 @@ func newConfig(options []Option) config { for _, o := range meterProviderOptionsFromEnv() { conf = o.apply(conf) } -======= - conf := config{res: resource.Default()} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, o := range options { conf = o.apply(conf) } @@ -140,15 +102,11 @@ func (o optionFunc) apply(conf config) config { // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) Option { return optionFunc(func(conf config) config { -<<<<<<< HEAD var err error conf.res, err = resource.Merge(resource.Environment(), res) if err != nil { otel.Handle(err) } -======= - conf.res = res ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return conf }) } @@ -180,7 +138,6 @@ func WithView(views ...View) Option { return cfg }) } -<<<<<<< HEAD // WithExemplarFilter configures the exemplar filter. // @@ -213,5 +170,3 @@ func meterProviderOptionsFromEnv() []Option { } return opts } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 12feb98427..0335b8ae48 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -4,7 +4,6 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( -<<<<<<< HEAD "runtime" "go.opentelemetry.io/otel/attribute" @@ -42,57 +41,12 @@ func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filte // guarantees are made on the shape or statistical properties of returned // exemplars. func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider { -======= - "os" - "runtime" - "slices" - - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - "go.opentelemetry.io/otel/sdk/metric/internal/x" -) - -// reservoirFunc returns the appropriately configured exemplar reservoir -// creation func based on the passed InstrumentKind and user defined -// environment variables. -// -// Note: This will only return non-nil values when the experimental exemplar -// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable -// is not set to always_off. -func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { - if !x.Exemplars.Enabled() { - return nil - } - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar - const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" - - var filter exemplar.Filter - - switch os.Getenv(filterEnvKey) { - case "always_on": - filter = exemplar.AlwaysOnFilter - case "always_off": - return exemplar.Drop - case "trace_based": - fallthrough - default: - filter = exemplar.SampledFilter - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults // Explicit bucket histogram aggregation with more than 1 bucket will // use AlignedHistogramBucketExemplarReservoir. a, ok := agg.(AggregationExplicitBucketHistogram) if ok && len(a.Boundaries) > 0 { -<<<<<<< HEAD return exemplar.HistogramReservoirProvider(a.Boundaries) -======= - cp := slices.Clone(a.Boundaries) - return func() exemplar.FilteredReservoir[N] { - bounds := cp - return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var n int @@ -119,11 +73,5 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR } } -<<<<<<< HEAD return exemplar.FixedSizeReservoirProvider(n) -======= - return func() exemplar.FilteredReservoir[N] { - return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index 6e75e67687..48b723a7b3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -144,15 +144,12 @@ type Stream struct { // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to // provide an allow-list of attribute keys here. AttributeFilter attribute.Filter -<<<<<<< HEAD // ExemplarReservoirProvider selects the // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based // on the [Aggregation]. // // If unspecified, [DefaultExemplarReservoirProviderSelector] is used. ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // instID are the identifying properties of a instrument. @@ -243,13 +240,8 @@ func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Se } } -<<<<<<< HEAD // observableID is a comparable unique identifier of an observable. type observableID[N int64 | float64] struct { -======= -// observablID is a comparable unique identifier of an observable. -type observablID[N int64 | float64] struct { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) name string description string kind InstrumentKind @@ -301,11 +293,7 @@ func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int type observable[N int64 | float64] struct { metric.Observable -<<<<<<< HEAD observableID[N] -======= - observablID[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) meter *meter measures measures[N] @@ -314,11 +302,7 @@ type observable[N int64 | float64] struct { func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { return &observable[N]{ -<<<<<<< HEAD observableID: observableID[N]{ -======= - observablID: observablID[N]{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) name: name, description: desc, kind: kind, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index deeec091e9..fde2193338 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -8,10 +8,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" -<<<<<<< HEAD -======= - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -41,13 +37,8 @@ type Builder[N int64 | float64] struct { // create new exemplar reservoirs for a new seen attribute set. // // If this is not provided a default factory function that returns an -<<<<<<< HEAD // dropReservoir reservoir will be used. ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N] -======= - // exemplar.Drop reservoir will be used. - ReservoirFunc func() exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -58,20 +49,12 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -<<<<<<< HEAD func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] { -======= -func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if b.ReservoirFunc != nil { return b.ReservoirFunc } -<<<<<<< HEAD return dropReservoir -======= - return exemplar.Drop ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go index 6397dbf47e..25d709948e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -6,11 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "sync" -<<<<<<< HEAD "go.opentelemetry.io/otel/sdk/metric/exemplar" -======= - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -21,10 +17,7 @@ var exemplarPool = sync.Pool{ func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { dest := exemplarPool.Get().(*[]exemplar.Exemplar) defer func() { -<<<<<<< HEAD clear(*dest) // Erase elements to let GC collect objects. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) *dest = (*dest)[:0] exemplarPool.Put(dest) }() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index f1729ac727..b7aa721651 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -12,10 +12,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" -<<<<<<< HEAD -======= - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -34,11 +30,7 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set -<<<<<<< HEAD res FilteredExemplarReservoir[N] -======= - res exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) count uint64 min N @@ -291,11 +283,7 @@ func (b *expoBuckets) downscale(delta int32) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -<<<<<<< HEAD func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] { -======= -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -318,11 +306,7 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int32 -<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] -======= - newRes func() exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -343,11 +327,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib v, ok := e.values[attr.Equivalent()] if !ok { v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) -<<<<<<< HEAD v.res = e.newRes(attr) -======= - v.res = e.newRes() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) e.values[attr.Equivalent()] = v } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index e600c99aea..d577ae2c19 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -11,20 +11,12 @@ import ( "time" "go.opentelemetry.io/otel/attribute" -<<<<<<< HEAD -======= - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type buckets[N int64 | float64] struct { attrs attribute.Set -<<<<<<< HEAD res FilteredExemplarReservoir[N] -======= - res exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) counts []uint64 count uint64 @@ -55,21 +47,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 -<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] -======= - newRes func() exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -<<<<<<< HEAD func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] { -======= -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -109,11 +93,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](attr, len(s.bounds)+1) -<<<<<<< HEAD b.res = s.newRes(attr) -======= - b.res = s.newRes() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value @@ -128,11 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -<<<<<<< HEAD func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] { -======= -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index d0f1ad6f18..d3a93f085c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -9,10 +9,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" -<<<<<<< HEAD -======= - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -20,17 +16,10 @@ import ( type datapoint[N int64 | float64] struct { attrs attribute.Set value N -<<<<<<< HEAD res FilteredExemplarReservoir[N] } func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] { -======= - res exemplar.FilteredReservoir[N] -} - -func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), @@ -43,11 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReserv type lastValue[N int64 | float64] struct { sync.Mutex -<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] -======= - newRes func() exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] start time.Time @@ -60,11 +45,7 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. attr := s.limit.Attributes(fltrAttr, s.values) d, ok := s.values[attr.Equivalent()] if !ok { -<<<<<<< HEAD d.res = s.newRes(attr) -======= - d.res = s.newRes() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } d.attrs = attr @@ -133,11 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. -<<<<<<< HEAD func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] { -======= -func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index cd098b02ac..8e132ad618 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -9,40 +9,24 @@ import ( "time" "go.opentelemetry.io/otel/attribute" -<<<<<<< HEAD -======= - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type sumValue[N int64 | float64] struct { n N -<<<<<<< HEAD res FilteredExemplarReservoir[N] -======= - res exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex -<<<<<<< HEAD newRes func(attribute.Set) FilteredExemplarReservoir[N] -======= - newRes func() exemplar.FilteredReservoir[N] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -<<<<<<< HEAD func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { -======= -func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -57,11 +41,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S attr := s.limit.Attributes(fltrAttr, s.values) v, ok := s.values[attr.Equivalent()] if !ok { -<<<<<<< HEAD v.res = s.newRes(attr) -======= - v.res = s.newRes() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } v.attrs = attr @@ -74,11 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -<<<<<<< HEAD func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { -======= -func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -165,15 +141,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { } // newPrecomputedSum returns an aggregator that summarizes a set of -<<<<<<< HEAD // observations as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] { -======= -// observatrions as their arithmetic sum. Each sum is scoped by attributes and -// the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -181,11 +151,7 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() ex } } -<<<<<<< HEAD // precomputedSum summarizes a set of observations as their arithmetic sum. -======= -// precomputedSum summarizes a set of observatrions as their arithmetic sum. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type precomputedSum[N int64 | float64] struct { *valueMap[N] diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go deleted file mode 100644 index 5394f48e0d..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package exemplar provides an implementation of the OpenTelemetry exemplar -// reservoir to be used in metric collection pipelines. -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go deleted file mode 100644 index 5a0f39ae14..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" -) - -// Drop returns a [FilteredReservoir] that drops all measurements it is offered. -func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } - -type dropRes[N int64 | float64] struct{} - -// Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} - -// Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]Exemplar) { - *dest = (*dest)[:0] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go deleted file mode 100644 index fcaa6a4697..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Exemplar is a measurement sampled from a timeseries providing a typical -// example. -type Exemplar struct { - // FilteredAttributes are the attributes recorded with the measurement but - // filtered out of the timeseries' aggregated data. - FilteredAttributes []attribute.KeyValue - // Time is the time when the measurement was recorded. - Time time.Time - // Value is the measured value. - Value Value - // SpanID is the ID of the span that was active during the measurement. If - // no span was active or the span was not sampled this will be empty. - SpanID []byte `json:",omitempty"` - // TraceID is the ID of the trace the active span belonged to during the - // measurement. If no span was active or the span was not sampled this will - // be empty. - TraceID []byte `json:",omitempty"` -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go deleted file mode 100644 index 152a069a09..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - - "go.opentelemetry.io/otel/trace" -) - -// Filter determines if a measurement should be offered. -// -// The passed ctx needs to contain any baggage or span that were active -// when the measurement was made. This information may be used by the -// Reservoir in making a sampling decision. -type Filter func(context.Context) bool - -// SampledFilter is a [Filter] that will only offer measurements -// if the passed context associated with the measurement contains a sampled -// [go.opentelemetry.io/otel/trace.SpanContext]. -func SampledFilter(ctx context.Context) bool { - return trace.SpanContextFromContext(ctx).IsSampled() -} - -// AlwaysOnFilter is a [Filter] that always offers measurements. -func AlwaysOnFilter(ctx context.Context) bool { - return true -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go deleted file mode 100644 index 9fedfa4be6..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// FilteredReservoir wraps a [Reservoir] with a filter. -type FilteredReservoir[N int64 | float64] interface { - // Offer accepts the parameters associated with a measurement. The - // parameters will be stored as an exemplar if the filter decides to - // sample the measurement. - // - // The passed ctx needs to contain any baggage or span that were active - // when the measurement was made. This information may be used by the - // Reservoir in making a sampling decision. - Offer(ctx context.Context, val N, attr []attribute.KeyValue) - // Collect returns all the held exemplars in the reservoir. - Collect(dest *[]Exemplar) -} - -// filteredReservoir handles the pre-sampled exemplar of measurements made. -type filteredReservoir[N int64 | float64] struct { - filter Filter - reservoir Reservoir -} - -// NewFilteredReservoir creates a [FilteredReservoir] which only offers values -// that are allowed by the filter. -func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { - return &filteredReservoir[N]{ - filter: f, - reservoir: r, - } -} - -func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { - if f.filter(ctx) { - // only record the current time if we are sampling this measurment. - f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) - } -} - -func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go deleted file mode 100644 index a6ff86d027..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "slices" - "sort" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Histogram returns a [Reservoir] that samples the last measurement that falls -// within a histogram bucket. The histogram bucket upper-boundaries are define -// by bounds. -// -// The passed bounds will be sorted by this function. -func Histogram(bounds []float64) Reservoir { - slices.Sort(bounds) - return &histRes{ - bounds: bounds, - storage: newStorage(len(bounds) + 1), - } -} - -type histRes struct { - *storage - - // bounds are bucket bounds in ascending order. - bounds []float64 -} - -func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { - var x float64 - switch v.Type() { - case Int64ValueType: - x = float64(v.Int64()) - case Float64ValueType: - x = v.Float64() - default: - panic("unknown value type") - } - r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go deleted file mode 100644 index 199a2608f7..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "math" - "math/rand" - "sync" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -var ( - // rng is used to make sampling decisions. - // - // Do not use crypto/rand. There is no reason for the decrease in performance - // given this is not a security sensitive decision. - rng = rand.New(rand.NewSource(time.Now().UnixNano())) - // Ensure concurrent safe accecess to rng and its underlying source. - rngMu sync.Mutex -) - -// random returns, as a float64, a uniform pseudo-random number in the open -// interval (0.0,1.0). -func random() float64 { - // TODO: This does not return a uniform number. rng.Float64 returns a - // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it - // returns multiples of 2^-53, and not all floating point numbers between 0 - // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand - // are always going to be 0). - // - // An alternative algorithm should be considered that will actually return - // a uniform number in the interval (0,1). For example, since the default - // rand source provides a uniform distribution for Int63, this can be - // converted following the prototypical code of Mersenne Twister 64 (Takuji - // Nishimura and Makoto Matsumoto: - // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) - // - // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) - // - // There are likely many other methods to explore here as well. - - rngMu.Lock() - defer rngMu.Unlock() - - f := rng.Float64() - for f == 0 { - f = rng.Float64() - } - return f -} - -// FixedSize returns a [Reservoir] that samples at most k exemplars. If there -// are k or less measurements made, the Reservoir will sample each one. If -// there are more than k, the Reservoir will then randomly sample all -// additional measurement with a decreasing probability. -func FixedSize(k int) Reservoir { - r := &randRes{storage: newStorage(k)} - r.reset() - return r -} - -type randRes struct { - *storage - - // count is the number of measurement seen. - count int64 - // next is the next count that will store a measurement at a random index - // once the reservoir has been filled. - next int64 - // w is the largest random number in a distribution that is used to compute - // the next next. - w float64 -} - -func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { - // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December - // 1994). "Reservoir-Sampling Algorithms of Time Complexity - // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): - // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). - // - // A high-level overview of "Algorithm L": - // 0) Pre-calculate the random count greater than the storage size when - // an exemplar will be replaced. - // 1) Accept all measurements offered until the configured storage size is - // reached. - // 2) Loop: - // a) When the pre-calculate count is reached, replace a random - // existing exemplar with the offered measurement. - // b) Calculate the next random count greater than the existing one - // which will replace another exemplars - // - // The way a "replacement" count is computed is by looking at `n` number of - // independent random numbers each corresponding to an offered measurement. - // Of these numbers the smallest `k` (the same size as the storage - // capacity) of them are kept as a subset. The maximum value in this - // subset, called `w` is used to weight another random number generation - // for the next count that will be considered. - // - // By weighting the next count computation like described, it is able to - // perform a uniformly-weighted sampling algorithm based on the number of - // samples the reservoir has seen so far. The sampling will "slow down" as - // more and more samples are offered so as to reduce a bias towards those - // offered just prior to the end of the collection. - // - // This algorithm is preferred because of its balance of simplicity and - // performance. It will compute three random numbers (the bulk of - // computation time) for each item that becomes part of the reservoir, but - // it does not spend any time on items that do not. In particular it has an - // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of - // measurements offered and k is the reservoir size. - // - // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of - // this and other reservoir sampling algorithms. See - // https://github.com/MrAlias/reservoir-sampling for a performance - // comparison of reservoir sampling algorithms. - - if int(r.count) < cap(r.store) { - r.store[r.count] = newMeasurement(ctx, t, n, a) - } else { - if r.count == r.next { - // Overwrite a random existing measurement with the one offered. - idx := int(rng.Int63n(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) - r.advance() - } - } - r.count++ -} - -// reset resets r to the initial state. -func (r *randRes) reset() { - // This resets the number of exemplars known. - r.count = 0 - // Random index inserts should only happen after the storage is full. - r.next = int64(cap(r.store)) - - // Initial random number in the series used to generate r.next. - // - // This is set before r.advance to reset or initialize the random number - // series. Without doing so it would always be 0 or never restart a new - // random number series. - // - // This maps the uniform random number in (0,1) to a geometric distribution - // over the same interval. The mean of the distribution is inversely - // proportional to the storage capacity. - r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) - - r.advance() -} - -// advance updates the count at which the offered measurement will overwrite an -// existing exemplar. -func (r *randRes) advance() { - // Calculate the next value in the random number series. - // - // The current value of r.w is based on the max of a distribution of random - // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity - // of the storage and each `u` in the interval (0,w)). To calculate the - // next r.w we use the fact that when the next exemplar is selected to be - // included in the storage an existing one will be dropped, and the - // corresponding random number in the set used to calculate r.w will also - // be replaced. The replacement random number will also be within (0,w), - // therefore the next r.w will be based on the same distribution (i.e. - // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by - // computing the next random number `u` and take r.w as `w * u^(1/k)`. - r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) - // Use the new random number in the series to calculate the count of the - // next measurement that will be stored. - // - // Given 0 < r.w < 1, each iteration will result in subsequent r.w being - // smaller. This translates here into the next next being selected against - // a distribution with a higher mean (i.e. the expected value will increase - // and replacements become less likely) - // - // Important to note, the new r.next will always be at least 1 more than - // the last r.next. - r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 -} - -func (r *randRes) Collect(dest *[]Exemplar) { - r.storage.Collect(dest) - // Call reset here even though it will reset r.count and restart the random - // number series. This will persist any old exemplars as long as no new - // measurements are offered, but it will also prioritize those new - // measurements that are made over the older collection cycle ones. - r.reset() -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go deleted file mode 100644 index 80fa59554f..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Reservoir holds the sampled exemplar of measurements made. -type Reservoir interface { - // Offer accepts the parameters associated with a measurement. The - // parameters will be stored as an exemplar if the Reservoir decides to - // sample the measurement. - // - // The passed ctx needs to contain any baggage or span that were active - // when the measurement was made. This information may be used by the - // Reservoir in making a sampling decision. - // - // The time t is the time when the measurement was made. The val and attr - // parameters are the value and dropped (filtered) attributes of the - // measurement respectively. - Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) - - // Collect returns all the held exemplars. - // - // The Reservoir state is preserved after this call. - Collect(dest *[]Exemplar) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go deleted file mode 100644 index 10b2976f79..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// storage is an exemplar storage for [Reservoir] implementations. -type storage struct { - // store are the measurements sampled. - // - // This does not use []metricdata.Exemplar because it potentially would - // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement -} - -func newStorage(n int) *storage { - return &storage{store: make([]measurement, n)} -} - -// Collect returns all the held exemplars. -// -// The Reservoir state is preserved after this call. -func (r *storage) Collect(dest *[]Exemplar) { - *dest = reset(*dest, len(r.store), len(r.store)) - var n int - for _, m := range r.store { - if !m.valid { - continue - } - - m.Exemplar(&(*dest)[n]) - n++ - } - *dest = (*dest)[:n] -} - -// measurement is a measurement made by a telemetry system. -type measurement struct { - // FilteredAttributes are the attributes dropped during the measurement. - FilteredAttributes []attribute.KeyValue - // Time is the time when the measurement was made. - Time time.Time - // Value is the value of the measurement. - Value Value - // SpanContext is the SpanContext active when a measurement was made. - SpanContext trace.SpanContext - - valid bool -} - -// newMeasurement returns a new non-empty Measurement. -func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { - return measurement{ - FilteredAttributes: droppedAttr, - Time: ts, - Value: v, - SpanContext: trace.SpanContextFromContext(ctx), - valid: true, - } -} - -// Exemplar returns m as an [Exemplar]. -func (m measurement) Exemplar(dest *Exemplar) { - dest.FilteredAttributes = m.FilteredAttributes - dest.Time = m.Time - dest.Value = m.Value - - if m.SpanContext.HasTraceID() { - traceID := m.SpanContext.TraceID() - dest.TraceID = traceID[:] - } else { - dest.TraceID = dest.TraceID[:0] - } - - if m.SpanContext.HasSpanID() { - spanID := m.SpanContext.SpanID() - dest.SpanID = spanID[:] - } else { - dest.SpanID = dest.SpanID[:0] - } -} - -func reset[T any](s []T, length, capacity int) []T { - if cap(s) < capacity { - return make([]T, length, capacity) - } - return s[:length] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go deleted file mode 100644 index 1957d6b1e3..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import "math" - -// ValueType identifies the type of value used in exemplar data. -type ValueType uint8 - -const ( - // UnknownValueType should not be used. It represents a misconfigured - // Value. - UnknownValueType ValueType = 0 - // Int64ValueType represents a Value with int64 data. - Int64ValueType ValueType = 1 - // Float64ValueType represents a Value with float64 data. - Float64ValueType ValueType = 2 -) - -// Value is the value of data held by an exemplar. -type Value struct { - t ValueType - val uint64 -} - -// NewValue returns a new [Value] for the provided value. -func NewValue[N int64 | float64](value N) Value { - switch v := any(value).(type) { - case int64: - return Value{t: Int64ValueType, val: uint64(v)} - case float64: - return Value{t: Float64ValueType, val: math.Float64bits(v)} - } - return Value{} -} - -// Type returns the [ValueType] of data held by v. -func (v Value) Type() ValueType { return v.t } - -// Int64 returns the value of v as an int64. If the ValueType of v is not an -// Int64ValueType, 0 is returned. -func (v Value) Int64() int64 { - if v.t == Int64ValueType { - // Assumes the correct int64 was stored in v.val based on type. - return int64(v.val) // nolint: gosec - } - return 0 -} - -// Float64 returns the value of v as an float64. If the ValueType of v is not -// an Float64ValueType, 0 is returned. -func (v Value) Float64() float64 { - if v.t == Float64ValueType { - return math.Float64frombits(v.val) - } - return 0 -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go index 76dd4a00c7..0891993706 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -10,7 +10,6 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( "os" "strconv" -<<<<<<< HEAD ) // CardinalityLimit is an experimental feature flag that defines if @@ -28,41 +27,6 @@ var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool } return n, true }) -======= - "strings" -) - -var ( - // Exemplars is an experimental feature flag that defines if exemplars - // should be recorded for metric data-points. - // - // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable - // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" - // will also enable this). - Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { - return v, true - } - return "", false - }) - - // CardinalityLimit is an experimental feature flag that defines if - // cardinality limits should be applied to the recorded metric data-points. - // - // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment - // variable to the integer limit value you want to use. - // - // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 - // will disable the cardinality limits. - CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { - n, err := strconv.Atoi(v) - if err != nil { - return 0, false - } - return n, true - }) -) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 5a93e5e9a0..c495985bc2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -113,29 +113,17 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr if err != nil { return err } -<<<<<<< HEAD for _, producer := range mr.externalProducers.Load().([]Producer) { externalMetrics, e := producer.Produce(ctx) if e != nil { err = errors.Join(err, e) -======= - var errs []error - for _, producer := range mr.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("ManualReader collection", "Data", rm) -<<<<<<< HEAD return err -======= - return unifyErrors(errs) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // MarshalLog returns logging data about the ManualReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index e51f9f6981..823cdf2c62 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -150,14 +150,11 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6 continue } inst.appendMeasures(in) -<<<<<<< HEAD // Add the measures to the pipeline. It is required to maintain // measures per pipeline to avoid calling the measure that // is not part of the pipeline. insert.pipeline.addInt64Measure(inst.observableID, in) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, cback := range callbacks { inst := int64Observer{measures: in} fn := cback @@ -317,14 +314,11 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl continue } inst.appendMeasures(in) -<<<<<<< HEAD // Add the measures to the pipeline. It is required to maintain // measures per pipeline to avoid calling the measure that // is not part of the pipeline. insert.pipeline.addFloat64Measure(inst.observableID, in) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, cback := range callbacks { inst := float64Observer{measures: in} fn := cback @@ -457,7 +451,6 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return noopRegister{}, nil } -<<<<<<< HEAD var err error validInstruments := make([]metric.Observable, 0, len(insts)) for _, inst := range insts { @@ -480,52 +473,17 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) } validInstruments = append(validInstruments, inst) -======= - reg := newObserver() - var errs multierror - for _, inst := range insts { - // Unwrap any global. - if u, ok := inst.(interface { - Unwrap() metric.Observable - }); ok { - inst = u.Unwrap() - } - - switch o := inst.(type) { - case int64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) - } - continue - } - reg.registerInt64(o.observablID) - case float64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) - } - continue - } - reg.registerFloat64(o.observablID) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: // Instrument external to the SDK. return nil, fmt.Errorf("invalid observable: from different implementation") } } -<<<<<<< HEAD if len(validInstruments) == 0 { -======= - err := errs.errorOrNil() - if reg.len() == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // All insts use drop aggregation or are invalid. return noopRegister{}, err } -<<<<<<< HEAD unregs := make([]func(), len(m.pipes)) for ix, pipe := range m.pipes { reg := newObserver(pipe) @@ -544,17 +502,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) } return unregisterFuncs{f: unregs}, err -======= - // Some or all instruments were valid. - cback := func(ctx context.Context) error { return f(ctx, reg) } - return m.pipes.registerMultiCallback(cback), err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type observer struct { embedded.Observer -<<<<<<< HEAD pipe *pipeline float64 map[observableID[float64]]struct{} int64 map[observableID[int64]]struct{} @@ -573,28 +525,6 @@ func (r observer) registerFloat64(id observableID[float64]) { } func (r observer) registerInt64(id observableID[int64]) { -======= - float64 map[observablID[float64]]struct{} - int64 map[observablID[int64]]struct{} -} - -func newObserver() observer { - return observer{ - float64: make(map[observablID[float64]]struct{}), - int64: make(map[observablID[int64]]struct{}), - } -} - -func (r observer) len() int { - return len(r.float64) + len(r.int64) -} - -func (r observer) registerFloat64(id observablID[float64]) { - r.float64[id] = struct{}{} -} - -func (r observer) registerInt64(id observablID[int64]) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.int64[id] = struct{}{} } @@ -608,29 +538,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... switch conv := o.(type) { case float64Observable: oImpl = conv -<<<<<<< HEAD -======= - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(float64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: global.Error(errUnknownObserver, "failed to record") return } -<<<<<<< HEAD if _, registered := r.float64[oImpl.observableID]; !registered { -======= - if _, registered := r.float64[oImpl.observablID]; !registered { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -642,16 +555,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... return } c := metric.NewObserveConfig(opts) -<<<<<<< HEAD // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce. // TODO (#5946): Refactor pipeline and observable measures. measures := r.pipe.float64Measures[oImpl.observableID] for _, m := range measures { m(context.Background(), v, c.Attributes()) } -======= - oImpl.observe(v, c.Attributes()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { @@ -659,29 +568,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric switch conv := o.(type) { case int64Observable: oImpl = conv -<<<<<<< HEAD -======= - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(int64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) default: global.Error(errUnknownObserver, "failed to record") return } -<<<<<<< HEAD if _, registered := r.int64[oImpl.observableID]; !registered { -======= - if _, registered := r.int64[oImpl.observablID]; !registered { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -693,16 +585,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric return } c := metric.NewObserveConfig(opts) -<<<<<<< HEAD // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce. // TODO (#5946): Refactor pipeline and observable measures. measures := r.pipe.int64Measures[oImpl.observableID] for _, m := range measures { m(context.Background(), v, c.Attributes()) } -======= - oImpl.observe(v, c.Attributes()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type noopRegister struct{ embedded.Registration } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 9ff2da33fe..dcd2182d9a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -251,29 +251,17 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd if err != nil { return err } -<<<<<<< HEAD for _, producer := range r.externalProducers.Load().([]Producer) { externalMetrics, e := producer.Produce(ctx) if e != nil { err = errors.Join(err, e) -======= - var errs []error - for _, producer := range r.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("PeriodicReader collection", "Data", rm) -<<<<<<< HEAD return err -======= - return unifyErrors(errs) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // export exports metric data m using r's exporter. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 63a25580cc..775e245261 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -8,23 +8,13 @@ import ( "context" "errors" "fmt" -<<<<<<< HEAD -======= - "strings" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" -<<<<<<< HEAD "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/exemplar" -======= - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/embedded" - "go.opentelemetry.io/otel/sdk/instrumentation" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/internal/x" @@ -47,27 +37,17 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -<<<<<<< HEAD func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { -======= -func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if res == nil { res = resource.Empty() } return &pipeline{ -<<<<<<< HEAD resource: res, reader: reader, views: views, int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, exemplarFilter: exemplarFilter, -======= - resource: res, - reader: reader, - views: views, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // aggregations is lazy allocated when needed. } } @@ -85,7 +65,6 @@ type pipeline struct { views []View sync.Mutex -<<<<<<< HEAD int64Measures map[observableID[int64]][]aggregate.Measure[int64] float64Measures map[observableID[float64]][]aggregate.Measure[float64] aggregations map[instrumentation.Scope][]instrumentSync @@ -106,11 +85,6 @@ func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Mea p.Lock() defer p.Unlock() p.float64Measures[id] = m -======= - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // addSync adds the instrumentSync to pipeline p with scope. This method is not @@ -150,7 +124,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) p.Lock() defer p.Unlock() -<<<<<<< HEAD var err error for _, c := range p.callbacks { // TODO make the callbacks parallel. ( #3034 ) @@ -160,16 +133,6 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) if err := ctx.Err(); err != nil { rm.Resource = nil clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. -======= - var errs multierror - for _, c := range p.callbacks { - // TODO make the callbacks parallel. ( #3034 ) - if err := c(ctx); err != nil { - errs.append(err) - } - if err := ctx.Err(); err != nil { - rm.Resource = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -177,21 +140,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) f := e.Value.(multiCallback) -<<<<<<< HEAD if e := f(ctx); e != nil { err = errors.Join(err, e) -======= - if err := f(ctx); err != nil { - errs.append(err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := ctx.Err(); err != nil { // This means the context expired before we finished running callbacks. rm.Resource = nil -<<<<<<< HEAD clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -223,11 +178,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) rm.ScopeMetrics = rm.ScopeMetrics[:i] -<<<<<<< HEAD return err -======= - return errs.errorOrNil() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // inserter facilitates inserting of new instruments from a single scope into a @@ -289,11 +240,7 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures []aggregate.Measure[N] ) -<<<<<<< HEAD var err error -======= - errs := &multierror{wrapped: errCreatingAggregators} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) seen := make(map[uint64]struct{}) for _, v := range i.pipeline.views { stream, match := v(inst) @@ -301,15 +248,9 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) continue } matched = true -<<<<<<< HEAD in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) if e != nil { err = errors.Join(err, e) -======= - in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if in == nil { // Drop aggregation. continue @@ -322,17 +263,12 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures = append(measures, in) } -<<<<<<< HEAD if err != nil { err = errors.Join(errCreatingAggregators, err) } if matched { return measures, err -======= - if matched { - return measures, errs.errorOrNil() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Apply implicit default view if no explicit matched. @@ -341,28 +277,18 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) Description: inst.Description, Unit: inst.Unit, } -<<<<<<< HEAD in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) if e != nil { if err == nil { err = errCreatingAggregators } err = errors.Join(err, e) -======= - in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if in != nil { // Ensured to have not seen given matched was false. measures = append(measures, in) } -<<<<<<< HEAD return measures, err -======= - return measures, errs.errorOrNil() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // addCallback registers a single instrument callback to be run when @@ -431,12 +357,9 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum // The view explicitly requested the default aggregation. stream.Aggregation = DefaultAggregationSelector(kind) } -<<<<<<< HEAD if stream.ExemplarReservoirProviderSelector == nil { stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { return nil, 0, fmt.Errorf( @@ -457,11 +380,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), -<<<<<<< HEAD ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter), -======= - ReservoirFunc: reservoirFunc[N](stream.Aggregation), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -664,34 +583,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -<<<<<<< HEAD func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { p := newPipeline(res, r, views, exemplarFilter) -======= -func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { - pipes := make([]*pipeline, 0, len(readers)) - for _, r := range readers { - p := newPipeline(res, r, views) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.register(p) pipes = append(pipes, p) } return pipes } -<<<<<<< HEAD -======= -func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { - unregs := make([]func(), len(p)) - for i, pipe := range p { - unregs[i] = pipe.addMultiCallback(c) - } - return unregisterFuncs{f: unregs} -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type unregisterFuncs struct { embedded.Registration f []func() @@ -724,7 +625,6 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] -<<<<<<< HEAD var err error for _, i := range r.inserters { in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) @@ -734,17 +634,6 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) measures = append(measures, in...) } return measures, err -======= - errs := &multierror{} - for _, i := range r.inserters { - in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) - if err != nil { - errs.append(err) - } - measures = append(measures, in...) - } - return measures, errs.errorOrNil() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // HistogramAggregators returns the histogram Aggregators that must be updated by the instrument @@ -753,18 +642,13 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] -<<<<<<< HEAD var err error -======= - errs := &multierror{} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, i := range r.inserters { agg := i.readerDefaultAggregation(id.Kind) if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { histAgg.Boundaries = boundaries agg = histAgg } -<<<<<<< HEAD in, e := i.Instrument(id, agg) if e != nil { err = errors.Join(err, e) @@ -772,32 +656,4 @@ func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ( measures = append(measures, in...) } return measures, err -======= - in, err := i.Instrument(id, agg) - if err != nil { - errs.append(err) - } - measures = append(measures, in...) - } - return measures, errs.errorOrNil() -} - -type multierror struct { - wrapped error - errors []string -} - -func (m *multierror) errorOrNil() error { - if len(m.errors) == 0 { - return nil - } - if m.wrapped == nil { - return errors.New(strings.Join(m.errors, "; ")) - } - return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) -} - -func (m *multierror) append(err error) { - m.errors = append(m.errors, err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index 5598a7a6c8..2fca89e5a8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,11 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ -<<<<<<< HEAD pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), -======= - pipes: newPipelines(conf.res, conf.readers, conf.views), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) forceFlush: flush, shutdown: sdown, } @@ -80,26 +76,17 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri c := metric.NewMeterConfig(options...) s := instrumentation.Scope{ -<<<<<<< HEAD Name: name, Version: c.InstrumentationVersion(), SchemaURL: c.SchemaURL(), Attributes: c.InstrumentationAttributes(), -======= - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } global.Info("Meter created", "Name", s.Name, "Version", s.Version, "SchemaURL", s.SchemaURL, -<<<<<<< HEAD "Attributes", s.Attributes, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) return mp.meters.Lookup(s, func() *meter { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index d4f76c9121..6347060bf4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,9 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { -<<<<<<< HEAD return "1.32.0" -======= - return "1.29.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index 7df11083ae..630890f426 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -96,20 +96,12 @@ func NewView(criteria Instrument, mask Stream) View { return func(i Instrument) (Stream, bool) { if matchFunc(i) { return Stream{ -<<<<<<< HEAD Name: nonZero(mask.Name, i.Name), Description: nonZero(mask.Description, i.Description), Unit: nonZero(mask.Unit, i.Unit), Aggregation: agg, AttributeFilter: mask.AttributeFilter, ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector, -======= - Name: nonZero(mask.Name, i.Name), - Description: nonZero(mask.Description, i.Description), - Unit: nonZero(mask.Unit, i.Unit), - Aggregation: agg, - AttributeFilter: mask.AttributeFilter, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, true } return Stream{}, false diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 9efba5c021..c02aeefdde 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,10 +7,6 @@ import ( "context" "errors" "fmt" -<<<<<<< HEAD -======= - "strings" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // ErrPartialResource is returned by a detector when complete source @@ -60,22 +56,15 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( -<<<<<<< HEAD r *Resource err error e error -======= - r *Resource - errs detectErrs - err error ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) for _, detector := range detectors { if detector == nil { continue } -<<<<<<< HEAD r, e = detector.Detect(ctx) if e != nil { err = errors.Join(err, e) @@ -86,23 +75,10 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error { r, e = Merge(res, r) if e != nil { err = errors.Join(err, e) -======= - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { - continue - } - } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } *res = *r } -<<<<<<< HEAD if err != nil { if errors.Is(err, ErrSchemaURLConflict) { // If there has been a merge conflict, ensure the resource has no @@ -113,41 +89,4 @@ func detect(ctx context.Context, res *Resource, detectors []Detector) error { err = fmt.Errorf("error detecting resource: %w", err) } return err -======= - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} - -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] - } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 1e8588fb17..cf3c88e15c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,23 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use -<<<<<<< HEAD // resource.New() to explicitly disable them. -======= - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the -<<<<<<< HEAD // resource.New() to explicitly disable them. -======= - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index f308630856..3677c83d7d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,28 +10,16 @@ import ( "golang.org/x/sys/windows/registry" ) -<<<<<<< HEAD // implements hostIDReader. type hostIDReaderWindows struct{} // read reads MachineGuid from the Windows registry key: // SOFTWARE\Microsoft\Cryptography. -======= -// implements hostIDReader -type hostIDReaderWindows struct{} - -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index b74a4fe847..a6a5a53c0e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,10 +17,6 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) -<<<<<<< HEAD -======= - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 38a1fae4d1..6b40385107 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,9 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { -<<<<<<< HEAD return "1.34.0" -======= - return "1.29.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index a0ff0a1660..9c0b720a4d 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,11 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -<<<<<<< HEAD // option is provided to a Span's start event. Otherwise, these -======= -// option is provided to a Span's start or end events. Otherwise, these ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index c92f47d1ec..8c45a7107f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,11 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -<<<<<<< HEAD // ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly -======= -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index a17df0da1b..cdbf41d6d7 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,11 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -<<<<<<< HEAD [go.opentelemetry.io/otel/trace], which may be done with a transitive -======= -[go.opentelemetry.io/otel/trace], which may be done with a trasitive ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fce..0000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 0a0496e9b7..eb22002d82 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,9 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { -<<<<<<< HEAD return "1.34.0" -======= - return "1.29.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 24b65ac6ec..ce4fe59b0e 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,26 +3,13 @@ module-sets: stable-v1: -<<<<<<< HEAD version: v1.34.0 -======= - version: v1.29.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test -<<<<<<< HEAD -======= - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -36,20 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: -<<<<<<< HEAD version: v0.56.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: version: v0.10.0 -======= - version: v0.51.0 - modules: - - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/exporters/prometheus - experimental-logs: - version: v0.5.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -57,11 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: -<<<<<<< HEAD version: v0.0.12 -======= - version: v0.0.8 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.step.sm/crypto/LICENSE b/vendor/go.step.sm/crypto/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.step.sm/crypto/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.step.sm/crypto/fingerprint/fingerprint.go b/vendor/go.step.sm/crypto/fingerprint/fingerprint.go deleted file mode 100644 index 6eb174bdb6..0000000000 --- a/vendor/go.step.sm/crypto/fingerprint/fingerprint.go +++ /dev/null @@ -1,78 +0,0 @@ -package fingerprint - -import ( - "crypto" - "encoding/base64" - "encoding/hex" - "fmt" - "strings" - - "go.step.sm/crypto/internal/emoji" -) - -// Encoding defines the supported encodings for certificates and key -// fingerprints. -// -// This type is the base for sshutil.FingerprintEncoding and -// x509util.FingerprintEncoding types. -type Encoding int - -const ( - // HexFingerprint represents the hex encoding of the fingerprint. - // - // This is the default encoding for an X.509 certificate. - HexFingerprint Encoding = iota + 1 - // Base64Fingerprint represents the base64 encoding of the fingerprint. - // - // This is the default encoding for a public key. - Base64Fingerprint - // Base64URLFingerprint represents the base64URL encoding of the fingerprint. - Base64URLFingerprint - // Base64RawFingerprint represents the base64RawStd encoding of the - // fingerprint. - // - // This is the default encoding for an SSH key and certificate. - Base64RawFingerprint - // Base64RawURLFingerprint represents the base64RawURL encoding of the fingerprint. - Base64RawURLFingerprint - // EmojiFingerprint represents the emoji encoding of the fingerprint. - EmojiFingerprint -) - -// New creates a fingerprint of the given data by hashing it and returns it in -// the encoding format. -func New(data []byte, h crypto.Hash, encoding Encoding) (string, error) { - if !h.Available() { - return "", fmt.Errorf("hash function %q is not available", h.String()) - } - hash := h.New() - if _, err := hash.Write(data); err != nil { - return "", fmt.Errorf("error creating hash: %w", err) - } - fp := Fingerprint(hash.Sum(nil), encoding) - if fp == "" { - return "", fmt.Errorf("unknown encoding value %d", encoding) - } - return fp, nil -} - -// Fingerprint encodes the given digest using the encoding format. If an invalid -// encoding is passed, the return value will be an empty string. -func Fingerprint(digest []byte, encoding Encoding) string { - switch encoding { - case HexFingerprint: - return strings.ToLower(hex.EncodeToString(digest)) - case Base64Fingerprint: - return base64.StdEncoding.EncodeToString(digest) - case Base64URLFingerprint: - return base64.URLEncoding.EncodeToString(digest) - case Base64RawFingerprint: - return base64.RawStdEncoding.EncodeToString(digest) - case Base64RawURLFingerprint: - return base64.RawURLEncoding.EncodeToString(digest) - case EmojiFingerprint: - return emoji.Emoji(digest) - default: - return "" - } -} diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE deleted file mode 100644 index b99c5e3b98..0000000000 --- a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 Dmitry Chestnykh -Copyright (c) 2010 The Go Authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README deleted file mode 100644 index fb0fc8b70f..0000000000 --- a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README +++ /dev/null @@ -1,22 +0,0 @@ -Go implementation of bcrypt_pbkdf(3) from OpenBSD -(a variant of PBKDF2 with bcrypt-based PRF). - - -USAGE - - func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) - - - Key derives a key from the password, salt and rounds count, returning a - []byte of length keyLen that can be used as cryptographic key. - - Remember to get a good random salt of at least 16 bytes. Using a higher - rounds count will increase the cost of an exhaustive search but will also - make derivation proportionally slower. - - -REFERENCES - -* https://github.com/dchest/bcrypt_pbkdf -* http://www.tedunangst.com/flak/post/bcrypt-pbkdf -* http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index be443c8788..0000000000 --- a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2014 Dmitry Chestnykh. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements password-based key derivation function based -// on bcrypt compatible with bcrypt_pbkdf(3) from OpenBSD. -// -//nolint:revive,stylecheck // ignore underscore in package -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - - // NOTE! Requires blowfish package version from Aug 1, 2014 or later. - // Will produce incorrect results if the package is older. - // See commit message for details: http://goo.gl/wx6g8O - //nolint:staticcheck // needs insecure package - "golang.org/x/crypto/blowfish" -) - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -// -// Remember to get a good random salt of at least 16 bytes. Using a higher -// rounds count will increase the cost of an exhaustive search but will also -// make derivation proportionally slower. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - var shapass, shasalt [sha512.Size]byte - var out, tmp [32]byte - var cnt [4]byte - - numBlocks := (keyLen + len(out) - 1) / len(out) - key := make([]byte, numBlocks*len(out)) - - h := sha512.New() - h.Write(password) - h.Sum(shapass[:0]) - - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt[:]) - bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) - copy(out[:], tmp[:]) - - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp[:]) - bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/vendor/go.step.sm/crypto/internal/emoji/emoji.go b/vendor/go.step.sm/crypto/internal/emoji/emoji.go deleted file mode 100644 index 7235cff1f5..0000000000 --- a/vendor/go.step.sm/crypto/internal/emoji/emoji.go +++ /dev/null @@ -1,274 +0,0 @@ -package emoji - -import "strings" - -func Emoji(input []byte) string { - var b strings.Builder - for _, r := range input { - b.WriteString(emojiCodeMap[r]) - } - return b.String() -} - -// emojiCodeMap is a mapping from byte to emoji. -// -// The mapping is based on draft+2 of https://github.com/emojisum/emojisum. -// (see: https://github.com/emojisum/emojisum/releases/tag/draft%2B2) -var emojiCodeMap = []string{ - "\U0001f44d", // 👍 :+1: - "\U0001f3b1", // 🎱 :8ball: - "\u2708\ufe0f", // ✈️ :airplane: - "\U0001f47d", // 👽 :alien: - "\u2693", // ⚓ :anchor: - "\U0001f47c", // 👼 :angel: - "\U0001f620", // 😠 :angry: - "\U0001f41c", // 🐜 :ant: - "\U0001f34e", // 🍎 :apple: - "\U0001f3a8", // 🎨 :art: - "\U0001f476", // 👶 :baby: - "\U0001f37c", // 🍼 :baby_bottle: - "\U0001f519", // 🔙 :back: - "\U0001f38d", // 🎍 :bamboo: - "\U0001f34c", // 🍌 :banana: - "\U0001f488", // 💈 :barber: - "\U0001f6c1", // 🛁 :bathtub: - "\U0001f37a", // 🍺 :beer: - "\U0001f514", // 🔔 :bell: - "\U0001f6b4\u200d\u2642\ufe0f", // 🚴‍♂️ :bicyclist: - "\U0001f426", // 🐦 :bird: - "\U0001f382", // 🎂 :birthday: - "\U0001f33c", // 🌼 :blossom: - "\U0001f699", // 🚙 :blue_car: - "\U0001f417", // 🐗 :boar: - "\U0001f4a3", // 💣 :bomb: - "\U0001f4a5", // 💥 :boom: - "\U0001f647\u200d\u2642\ufe0f", // 🙇‍♂️ :bow: - "\U0001f466", // 👦 :boy: - "\U0001f494", // 💔 :broken_heart: - "\U0001f4a1", // 💡 :bulb: - "\U0001f68c", // 🚌 :bus: - "\U0001f335", // 🌵 :cactus: - "\U0001f4c6", // 📆 :calendar: - "\U0001f4f7", // 📷 :camera: - "\U0001f36c", // 🍬 :candy: - "\U0001f431", // 🐱 :cat: - "\U0001f352", // 🍒 :cherries: - "\U0001f6b8", // 🚸 :children_crossing: - "\U0001f36b", // 🍫 :chocolate_bar: - "\U0001f44f", // 👏 :clap: - "\u2601\ufe0f", // ☁️ :cloud: - "\u2663\ufe0f", // ♣️ :clubs: - "\U0001f1e8\U0001f1f3", // 🇨🇳 :cn: - "\u2615", // ☕ :coffee: - "\U0001f6a7", // 🚧 :construction: - "\U0001f36a", // 🍪 :cookie: - "\u00a9\ufe0f", // ©️ :copyright: - "\U0001f33d", // 🌽 :corn: - "\U0001f42e", // 🐮 :cow: - "\U0001f319", // 🌙 :crescent_moon: - "\U0001f451", // 👑 :crown: - "\U0001f622", // 😢 :cry: - "\U0001f52e", // 🔮 :crystal_ball: - "\u27b0", // ➰ :curly_loop: - "\U0001f46f\u200d\u2640\ufe0f", // 👯‍♀️ :dancers: - "\U0001f4a8", // 💨 :dash: - "\U0001f1e9\U0001f1ea", // 🇩🇪 :de: - "\u2666\ufe0f", // ♦️ :diamonds: - "\U0001f436", // 🐶 :dog: - "\U0001f369", // 🍩 :doughnut: - "\U0001f409", // 🐉 :dragon: - "\U0001f4c0", // 📀 :dvd: - "\U0001f442", // 👂 :ear: - "\U0001f346", // 🍆 :eggplant: - "\U0001f418", // 🐘 :elephant: - "\U0001f51a", // 🔚 :end: - "\u2709", // ✉ :envelope: - "\U0001f1ea\U0001f1f8", // 🇪🇸 :es: - "\U0001f440", // 👀 :eyes: - "\U0001f44a", // 👊 :facepunch: - "\U0001f468\u200d\U0001f469\u200d\U0001f466", // 👨‍👩‍👦 :family: - "\U0001f3a1", // 🎡 :ferris_wheel: - "\U0001f630", // 😰 :cold_sweat: - "\U0001f525", // 🔥 :fire: - "\U0001f386", // 🎆 :fireworks: - "\U0001f4be", // 💾 :floppy_disk: - "\U0001f3c8", // 🏈 :football: - "\U0001f374", // 🍴 :fork_and_knife: - "\U0001f340", // 🍀 :four_leaf_clover: - "\U0001f1eb\U0001f1f7", // 🇫🇷 :fr: - "\U0001f35f", // 🍟 :fries: - "\U0001f95c", // 🥜 :peanuts: - "\U0001f595", // 🖕 :fu: - "\U0001f315", // 🌕 :full_moon: - "\U0001f3b2", // 🎲 :game_die: - "\U0001f1ea\U0001f1fa", // 🇪🇺 :eu: - "\U0001f48e", // 💎 :gem: - "\U0001f467", // 👧 :girl: - "\U0001f410", // 🐐 :goat: - "\U0001f62c", // 😬 :grimacing: - "\U0001f601", // 😁 :grin: - "\U0001f482\u200d\u2642\ufe0f", // 💂‍♂️ :guardsman: - "\U0001f3b8", // 🎸 :guitar: - "\U0001f52b", // 🔫 :gun: - "\U0001f354", // 🍔 :hamburger: - "\U0001f528", // 🔨 :hammer: - "\U0001f439", // 🐹 :hamster: - "\U0001f649", // 🙉 :hear_no_evil: - "\u2764\ufe0f", // ❤️ :heart: - "\U0001f63b", // 😻 :heart_eyes_cat: - "\u2763\ufe0f", // ❣️ :heavy_heart_exclamation: - "\u2714\ufe0f", // ✔️ :heavy_check_mark: - "\U0001f5ff", // 🗿 :moyai: - "\U0001f3ee", // 🏮 :izakaya_lantern: - "\U0001f681", // 🚁 :helicopter: - "\U0001f52a", // 🔪 :hocho: - "\U0001f41d", // 🐝 :honeybee: - "\U0001f434", // 🐴 :horse: - "\U0001f3c7", // 🏇 :horse_racing: - "\u231b", // ⌛ :hourglass: - "\U0001f3e0", // 🏠 :house: - "\U0001f575\ufe0f\u200d\u2640\ufe0f", // 🕵️‍♀️ :female_detective: - "\U0001f366", // 🍦 :icecream: - "\U0001f47f", // 👿 :imp: - "\U0001f1ee\U0001f1f9", // 🇮🇹 :it: - "\U0001f383", // 🎃 :jack_o_lantern: - "\U0001f47a", // 👺 :japanese_goblin: - "\U0001f1ef\U0001f1f5", // 🇯🇵 :jp: - "\U0001f511", // 🔑 :key: - "\U0001f48b", // 💋 :kiss: - "\U0001f63d", // 😽 :kissing_cat: - "\U0001f428", // 🐨 :koala: - "\U0001f1f0\U0001f1f7", // 🇰🇷 :kr: - "\U0001f34b", // 🍋 :lemon: - "\U0001f484", // 💄 :lipstick: - "\U0001f512", // 🔒 :lock: - "\U0001f36d", // 🍭 :lollipop: - "\U0001f468", // 👨 :man: - "\U0001f341", // 🍁 :maple_leaf: - "\U0001f637", // 😷 :mask: - "\U0001f918", // 🤘 :metal: - "\U0001f52c", // 🔬 :microscope: - "\U0001f4b0", // 💰 :moneybag: - "\U0001f412", // 🐒 :monkey: - "\U0001f5fb", // 🗻 :mount_fuji: - "\U0001f4aa", // 💪 :muscle: - "\U0001f344", // 🍄 :mushroom: - "\U0001f3b9", // 🎹 :musical_keyboard: - "\U0001f3bc", // 🎼 :musical_score: - "\U0001f485", // 💅 :nail_care: - "\U0001f311", // 🌑 :new_moon: - "\u26d4", // ⛔ :no_entry: - "\U0001f443", // 👃 :nose: - "\U0001f39b\ufe0f", // 🎛️ :control_knobs: - "\U0001f529", // 🔩 :nut_and_bolt: - "\u2b55", // ⭕ :o: - "\U0001f30a", // 🌊 :ocean: - "\U0001f44c", // 👌 :ok_hand: - "\U0001f51b", // 🔛 :on: - "\U0001f4e6", // 📦 :package: - "\U0001f334", // 🌴 :palm_tree: - "\U0001f43c", // 🐼 :panda_face: - "\U0001f4ce", // 📎 :paperclip: - "\u26c5", // ⛅ :partly_sunny: - "\U0001f6c2", // 🛂 :passport_control: - "\U0001f43e", // 🐾 :paw_prints: - "\U0001f351", // 🍑 :peach: - "\U0001f427", // 🐧 :penguin: - "\u260e\ufe0f", // ☎️ :phone: - "\U0001f437", // 🐷 :pig: - "\U0001f48a", // 💊 :pill: - "\U0001f34d", // 🍍 :pineapple: - "\U0001f355", // 🍕 :pizza: - "\U0001f448", // 👈 :point_left: - "\U0001f449", // 👉 :point_right: - "\U0001f4a9", // 💩 :poop: - "\U0001f357", // 🍗 :poultry_leg: - "\U0001f64f", // 🙏 :pray: - "\U0001f478", // 👸 :princess: - "\U0001f45b", // 👛 :purse: - "\U0001f4cc", // 📌 :pushpin: - "\U0001f430", // 🐰 :rabbit: - "\U0001f308", // 🌈 :rainbow: - "\u270b", // ✋ :raised_hand: - "\u267b\ufe0f", // ♻️ :recycle: - "\U0001f697", // 🚗 :red_car: - "\u00ae\ufe0f", // ®️ :registered: - "\U0001f380", // 🎀 :ribbon: - "\U0001f35a", // 🍚 :rice: - "\U0001f680", // 🚀 :rocket: - "\U0001f3a2", // 🎢 :roller_coaster: - "\U0001f413", // 🐓 :rooster: - "\U0001f1f7\U0001f1fa", // 🇷🇺 :ru: - "\u26f5", // ⛵ :sailboat: - "\U0001f385", // 🎅 :santa: - "\U0001f6f0\ufe0f", // 🛰️ :satellite: - "\U0001f606", // 😆 :satisfied: - "\U0001f3b7", // 🎷 :saxophone: - "\u2702\ufe0f", // ✂️ :scissors: - "\U0001f648", // 🙈 :see_no_evil: - "\U0001f411", // 🐑 :sheep: - "\U0001f41a", // 🐚 :shell: - "\U0001f45e", // 👞 :shoe: - "\U0001f3bf", // 🎿 :ski: - "\U0001f480", // 💀 :skull: - "\U0001f62a", // 😪 :sleepy: - "\U0001f604", // 😄 :smile: - "\U0001f63a", // 😺 :smiley_cat: - "\U0001f60f", // 😏 :smirk: - "\U0001f6ac", // 🚬 :smoking: - "\U0001f40c", // 🐌 :snail: - "\U0001f40d", // 🐍 :snake: - "\u2744\ufe0f", // ❄️ :snowflake: - "\u26bd", // ⚽ :soccer: - "\U0001f51c", // 🔜 :soon: - "\U0001f47e", // 👾 :space_invader: - "\u2660\ufe0f", // ♠️ :spades: - "\U0001f64a", // 🙊 :speak_no_evil: - "\u2b50", // ⭐ :star: - "\u26f2", // ⛲ :fountain: - "\U0001f5fd", // 🗽 :statue_of_liberty: - "\U0001f682", // 🚂 :steam_locomotive: - "\U0001f33b", // 🌻 :sunflower: - "\U0001f60e", // 😎 :sunglasses: - "\u2600\ufe0f", // ☀️ :sunny: - "\U0001f305", // 🌅 :sunrise: - "\U0001f3c4\u200d\u2642\ufe0f", // 🏄‍♂️ :surfer: - "\U0001f3ca\u200d\u2642\ufe0f", // 🏊‍♂️ :swimmer: - "\U0001f489", // 💉 :syringe: - "\U0001f389", // 🎉 :tada: - "\U0001f34a", // 🍊 :tangerine: - "\U0001f695", // 🚕 :taxi: - "\U0001f3be", // 🎾 :tennis: - "\u26fa", // ⛺ :tent: - "\U0001f4ad", // 💭 :thought_balloon: - "\u2122\ufe0f", // ™️ :tm: - "\U0001f6bd", // 🚽 :toilet: - "\U0001f445", // 👅 :tongue: - "\U0001f3a9", // 🎩 :tophat: - "\U0001f69c", // 🚜 :tractor: - "\U0001f68e", // 🚎 :trolleybus: - "\U0001f922", // 🤢 :nauseated_face: - "\U0001f3c6", // 🏆 :trophy: - "\U0001f3ba", // 🎺 :trumpet: - "\U0001f422", // 🐢 :turtle: - "\U0001f3a0", // 🎠 :carousel_horse: - "\U0001f46d", // 👭 :two_women_holding_hands: - "\U0001f1ec\U0001f1e7", // 🇬🇧 :uk: - "\u2602\ufe0f", // ☂️ :umbrella: - "\U0001f513", // 🔓 :unlock: - "\U0001f1fa\U0001f1f8", // 🇺🇸 :us: - "\u270c\ufe0f", // ✌️ :v: - "\U0001f4fc", // 📼 :vhs: - "\U0001f3bb", // 🎻 :violin: - "\u26a0\ufe0f", // ⚠️ :warning: - "\U0001f349", // 🍉 :watermelon: - "\U0001f44b", // 👋 :wave: - "\u3030\ufe0f", // 〰️ :wavy_dash: - "\U0001f6be", // 🚾 :wc: - "\u267f", // ♿ :wheelchair: - "\U0001f469", // 👩 :woman: - "\u274c", // ❌ :x: - "\U0001f60b", // 😋 :yum: - "\u26a1", // ⚡ :zap: - "\U0001f4a4", // 💤 :zzz: -} diff --git a/vendor/go.step.sm/crypto/internal/utils/io.go b/vendor/go.step.sm/crypto/internal/utils/io.go deleted file mode 100644 index ccccf5f94f..0000000000 --- a/vendor/go.step.sm/crypto/internal/utils/io.go +++ /dev/null @@ -1,70 +0,0 @@ -package utils - -import ( - "bytes" - "io" - "os" - "unicode" - - "github.com/pkg/errors" - - "go.step.sm/crypto/internal/utils/utfbom" -) - -func maybeUnwrap(err error) error { - if wrapped := errors.Unwrap(err); wrapped != nil { - return wrapped - } - return err -} - -// stdinFilename is the name of the file that is used in many command -// line utilities to denote input is to be read from STDIN. -const stdinFilename = "-" - -// stdin points to STDIN through os.Stdin. -var stdin = os.Stdin - -// ReadFile reads the file identified by filename and returns -// the contents. If filename is equal to "-", it will read from -// STDIN. -func ReadFile(filename string) (b []byte, err error) { - if filename == stdinFilename { - filename = "/dev/stdin" - b, err = io.ReadAll(stdin) - } else { - var contents []byte - contents, err = os.ReadFile(filename) - if err != nil { - return nil, errors.Wrapf(maybeUnwrap(err), "error reading %q", filename) - } - b, err = io.ReadAll(utfbom.SkipOnly(bytes.NewReader(contents))) - } - if err != nil { - return nil, errors.Wrapf(maybeUnwrap(err), "error reading %q", filename) - } - return -} - -// ReadPasswordFromFile reads and returns the password from the given filename. -// The contents of the file will be trimmed at the right. -func ReadPasswordFromFile(filename string) ([]byte, error) { - password, err := ReadFile(filename) - if err != nil { - return nil, errors.Wrapf(err, "error reading %s", filename) - } - password = bytes.TrimRightFunc(password, unicode.IsSpace) - return password, nil -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm -// (before umask); otherwise WriteFile truncates it before writing. -// -// It wraps os.WriteFile wrapping the errors. -func WriteFile(filename string, data []byte, perm os.FileMode) error { - if err := os.WriteFile(filename, data, perm); err != nil { - return errors.Wrapf(maybeUnwrap(err), "error writing %s", filename) - } - return nil -} diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE b/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE deleted file mode 100644 index 6279cb87f4..0000000000 --- a/vendor/go.step.sm/crypto/internal/utils/utfbom/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md b/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md deleted file mode 100644 index 8ece280089..0000000000 --- a/vendor/go.step.sm/crypto/internal/utils/utfbom/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) - -The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. - -## Installation - - go get -u github.com/dimchansky/utfbom - -## Example - -```go -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - - "github.com/dimchansky/utfbom" -) - -func main() { - trySkip([]byte("\xEF\xBB\xBFhello")) - trySkip([]byte("hello")) -} - -func trySkip(byteData []byte) { - fmt.Println("Input:", byteData) - - // just skip BOM - output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) - if err != nil { - fmt.Println(err) - return - } - fmt.Println("ReadAll with BOM skipping", output) - - // skip BOM and detect encoding - sr, enc := utfbom.Skip(bytes.NewReader(byteData)) - fmt.Printf("Detected encoding: %s\n", enc) - output, err = ioutil.ReadAll(sr) - if err != nil { - fmt.Println(err) - return - } - fmt.Println("ReadAll with BOM detection and skipping", output) - fmt.Println() -} -``` - -Output: - -``` -$ go run main.go -Input: [239 187 191 104 101 108 108 111] -ReadAll with BOM skipping [104 101 108 108 111] -Detected encoding: UTF8 -ReadAll with BOM detection and skipping [104 101 108 108 111] - -Input: [104 101 108 108 111] -ReadAll with BOM skipping [104 101 108 108 111] -Detected encoding: Unknown -ReadAll with BOM detection and skipping [104 101 108 108 111] -``` - - diff --git a/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go b/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go deleted file mode 100644 index 93a144fd2c..0000000000 --- a/vendor/go.step.sm/crypto/internal/utils/utfbom/utfbom.go +++ /dev/null @@ -1,195 +0,0 @@ -// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. -// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader -// interface but provides automatic BOM checking and removing as necessary. -// -// This package was copied from https://github.com/dimchansky/utfbom. Only minor changes -// were made to not depend on the io/ioutil package and to make our linters pass. -package utfbom - -import ( - "errors" - "io" -) - -// Encoding is type alias for detected UTF encoding. -type Encoding int - -// Constants to identify detected UTF encodings. -const ( - // Unknown encoding, returned when no BOM was detected - Unknown Encoding = iota - - // UTF8, BOM bytes: EF BB BF - UTF8 - - // UTF-16, big-endian, BOM bytes: FE FF - UTF16BigEndian - - // UTF-16, little-endian, BOM bytes: FF FE - UTF16LittleEndian - - // UTF-32, big-endian, BOM bytes: 00 00 FE FF - UTF32BigEndian - - // UTF-32, little-endian, BOM bytes: FF FE 00 00 - UTF32LittleEndian -) - -// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. -func (e Encoding) String() string { - switch e { - case UTF8: - return "UTF8" - case UTF16BigEndian: - return "UTF16BigEndian" - case UTF16LittleEndian: - return "UTF16LittleEndian" - case UTF32BigEndian: - return "UTF32BigEndian" - case UTF32LittleEndian: - return "UTF32LittleEndian" - default: - return "Unknown" - } -} - -const maxConsecutiveEmptyReads = 100 - -// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. -// It also returns the encoding detected by the BOM. -// If the detected encoding is not needed, you can call the SkipOnly function. -func Skip(rd io.Reader) (*Reader, Encoding) { - // Is it already a Reader? - b, ok := rd.(*Reader) - if ok { - return b, Unknown - } - - enc, left, err := detectUtf(rd) - return &Reader{ - rd: rd, - buf: left, - err: err, - }, enc -} - -// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. -func SkipOnly(rd io.Reader) *Reader { - r, _ := Skip(rd) - return r -} - -// Reader implements automatic BOM (Unicode Byte Order Mark) checking and -// removing as necessary for an io.Reader object. -type Reader struct { - rd io.Reader // reader provided by the client - buf []byte // buffered data - err error // last error -} - -// Read is an implementation of io.Reader interface. -// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. -func (r *Reader) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - - if r.buf == nil { - if r.err != nil { - return 0, r.readErr() - } - - return r.rd.Read(p) - } - - // copy as much as we can - n = copy(p, r.buf) - r.buf = nilIfEmpty(r.buf[n:]) - return n, nil -} - -func (r *Reader) readErr() error { - err := r.err - r.err = nil - return err -} - -var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") - -func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { - buf, err = readBOM(rd) - - if len(buf) >= 4 { - if isUTF32BigEndianBOM4(buf) { - return UTF32BigEndian, nilIfEmpty(buf[4:]), err - } - if isUTF32LittleEndianBOM4(buf) { - return UTF32LittleEndian, nilIfEmpty(buf[4:]), err - } - } - - if len(buf) > 2 && isUTF8BOM3(buf) { - return UTF8, nilIfEmpty(buf[3:]), err - } - - if (err != nil && !errors.Is(err, io.EOF)) || (len(buf) < 2) { - return Unknown, nilIfEmpty(buf), err - } - - if isUTF16BigEndianBOM2(buf) { - return UTF16BigEndian, nilIfEmpty(buf[2:]), err - } - if isUTF16LittleEndianBOM2(buf) { - return UTF16LittleEndian, nilIfEmpty(buf[2:]), err - } - - return Unknown, nilIfEmpty(buf), err -} - -func readBOM(rd io.Reader) (buf []byte, err error) { - const maxBOMSize = 4 - var bom [maxBOMSize]byte // used to read BOM - - // read as many bytes as possible - for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { //nolint:wastedassign // copied code - if n, err = rd.Read(bom[len(buf):]); n < 0 { - return nil, errNegativeRead - } - if n > 0 { - nEmpty = 0 - } else { - nEmpty++ - if nEmpty >= maxConsecutiveEmptyReads { - err = io.ErrNoProgress - } - } - } - return -} - -func isUTF32BigEndianBOM4(buf []byte) bool { - return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF -} - -func isUTF32LittleEndianBOM4(buf []byte) bool { - return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 -} - -func isUTF8BOM3(buf []byte) bool { - return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF -} - -func isUTF16BigEndianBOM2(buf []byte) bool { - return buf[0] == 0xFE && buf[1] == 0xFF -} - -func isUTF16LittleEndianBOM2(buf []byte) bool { - return buf[0] == 0xFF && buf[1] == 0xFE -} - -func nilIfEmpty(buf []byte) (res []byte) { - if len(buf) > 0 { - res = buf - } - return -} diff --git a/vendor/go.step.sm/crypto/jose/encrypt.go b/vendor/go.step.sm/crypto/jose/encrypt.go deleted file mode 100644 index 9b61a5f448..0000000000 --- a/vendor/go.step.sm/crypto/jose/encrypt.go +++ /dev/null @@ -1,135 +0,0 @@ -package jose - -import ( - "encoding/json" - - "github.com/pkg/errors" - "go.step.sm/crypto/randutil" -) - -// MaxDecryptTries is the maximum number of attempts to decrypt a file. -const MaxDecryptTries = 3 - -// PasswordPrompter defines the function signature for the PromptPassword -// callback. -type PasswordPrompter func(s string) ([]byte, error) - -// PromptPassword is a method used to prompt for a password to decode encrypted -// keys. If this method is not defined and the key or password are not passed, -// the parse of the key will fail. -var PromptPassword PasswordPrompter - -// Encrypt returns the given data encrypted with the default encryption -// algorithm (PBES2-HS256+A128KW). -func Encrypt(data []byte, opts ...Option) (*JSONWebEncryption, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - - var passphrase []byte - switch { - case len(ctx.password) > 0: - passphrase = ctx.password - case ctx.passwordPrompter != nil: - if passphrase, err = ctx.passwordPrompter(ctx.passwordPrompt); err != nil { - return nil, err - } - case PromptPassword != nil: - if passphrase, err = PromptPassword("Please enter the password to encrypt the data"); err != nil { - return nil, err - } - default: - return nil, errors.New("failed to encrypt the data: missing password") - } - - salt, err := randutil.Salt(PBKDF2SaltSize) - if err != nil { - return nil, err - } - - // Encrypt private key using PBES2 - recipient := Recipient{ - Algorithm: PBES2_HS256_A128KW, - Key: passphrase, - PBES2Count: PBKDF2Iterations, - PBES2Salt: salt, - } - - encrypterOptions := new(EncrypterOptions) - if ctx.contentType != "" { - encrypterOptions.WithContentType(ContentType(ctx.contentType)) - } - - encrypter, err := NewEncrypter(DefaultEncAlgorithm, recipient, encrypterOptions) - if err != nil { - return nil, errors.Wrap(err, "error creating cipher") - } - - jwe, err := encrypter.Encrypt(data) - if err != nil { - return nil, errors.Wrap(err, "error encrypting data") - } - - return jwe, nil -} - -// EncryptJWK returns the given JWK encrypted with the default encryption -// algorithm (PBES2-HS256+A128KW). -func EncryptJWK(jwk *JSONWebKey, passphrase []byte) (*JSONWebEncryption, error) { - b, err := json.Marshal(jwk) - if err != nil { - return nil, errors.Wrap(err, "error marshaling JWK") - } - - return Encrypt(b, WithPassword(passphrase), WithContentType("jwk+json")) -} - -// Decrypt returns the decrypted version of the given data if it's encrypted, -// it will return the raw data if it's not encrypted or the format is not -// valid. -func Decrypt(data []byte, opts ...Option) ([]byte, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - - enc, err := ParseEncrypted(string(data)) - if err != nil { - return data, nil //nolint:nilerr // Return the given data if we cannot parse it as encrypted. - } - - // Try with the given password. - if len(ctx.password) > 0 { - if data, err = enc.Decrypt(ctx.password); err == nil { - return data, nil - } - return nil, errors.New("failed to decrypt JWE: invalid password") - } - - // Try with a given password prompter. - if ctx.passwordPrompter != nil || PromptPassword != nil { - var pass []byte - for i := 0; i < MaxDecryptTries; i++ { - switch { - case ctx.passwordPrompter != nil: - if pass, err = ctx.passwordPrompter(ctx.passwordPrompt); err != nil { - return nil, err - } - case ctx.filename != "": - if pass, err = PromptPassword("Please enter the password to decrypt " + ctx.filename); err != nil { - return nil, err - } - default: - if pass, err = PromptPassword("Please enter the password to decrypt the JWE"); err != nil { - return nil, err - } - } - if data, err = enc.Decrypt(pass); err == nil { - return data, nil - } - } - } - - return nil, errors.New("failed to decrypt JWE: invalid password") -} diff --git a/vendor/go.step.sm/crypto/jose/generate.go b/vendor/go.step.sm/crypto/jose/generate.go deleted file mode 100644 index 4bdc6c44dc..0000000000 --- a/vendor/go.step.sm/crypto/jose/generate.go +++ /dev/null @@ -1,204 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - - "github.com/pkg/errors" - "go.step.sm/crypto/keyutil" - "go.step.sm/crypto/pemutil" - "go.step.sm/crypto/x25519" -) - -const ( - jwksUsageSig = "sig" - jwksUsageEnc = "enc" - // defaultKeyType is the default type of the one-time token key. - defaultKeyType = EC - // defaultKeyCurve is the default curve of the one-time token key. - defaultKeyCurve = P256 - // defaultKeyAlg is the default algorithm of the one-time token key. - defaultKeyAlg = ES256 - // defaultKeySize is the default size of the one-time token key. - defaultKeySize = 0 -) - -var ( - errAmbiguousCertKeyUsage = errors.New("jose/generate: certificate's key usage is ambiguous, it should be for signature or encipherment, but not both (use --subtle to ignore usage field)") - errNoCertKeyUsage = errors.New("jose/generate: certificate doesn't contain any key usage (use --subtle to ignore usage field)") -) - -// Thumbprint computes the JWK Thumbprint of a key using SHA256 as the hash -// algorithm. It returns the hash encoded in the Base64 raw url encoding. -func Thumbprint(jwk *JSONWebKey) (string, error) { - var sum []byte - var err error - switch key := jwk.Key.(type) { - case x25519.PublicKey: - sum, err = x25519Thumbprint(key, crypto.SHA256) - case x25519.PrivateKey: - var pub x25519.PublicKey - if pub, err = key.PublicKey(); err == nil { - sum, err = x25519Thumbprint(pub, crypto.SHA256) - } - case OpaqueSigner: - sum, err = key.Public().Thumbprint(crypto.SHA256) - default: - sum, err = jwk.Thumbprint(crypto.SHA256) - } - if err != nil { - return "", errors.Wrap(err, "error generating JWK thumbprint") - } - return base64.RawURLEncoding.EncodeToString(sum), nil -} - -// GenerateDefaultKeyPair generates an asymmetric public/private key pair. -// Returns the public key as a JWK and the private key as an encrypted JWE. -func GenerateDefaultKeyPair(passphrase []byte) (*JSONWebKey, *JSONWebEncryption, error) { - if len(passphrase) == 0 { - return nil, nil, errors.New("step-jose: password cannot be empty when encryptying a JWK") - } - - // Generate the OTT key - jwk, err := GenerateJWK(defaultKeyType, defaultKeyCurve, defaultKeyAlg, jwksUsageSig, "", defaultKeySize) - if err != nil { - return nil, nil, err - } - - jwk.KeyID, err = Thumbprint(jwk) - if err != nil { - return nil, nil, err - } - - jwe, err := EncryptJWK(jwk, passphrase) - if err != nil { - return nil, nil, err - } - - public := jwk.Public() - return &public, jwe, nil -} - -// GenerateJWK generates a JWK given the key type, curve, alg, use, kid and -// the size of the RSA or oct keys if necessary. -func GenerateJWK(kty, crv, alg, use, kid string, size int) (jwk *JSONWebKey, err error) { - if kty == "OKP" && use == "enc" && (crv == "" || crv == "Ed25519") { - return nil, errors.New("invalid algorithm: Ed25519 cannot be used for encryption") - } - - switch { - case kty == "EC" && crv == "": - crv = P256 - case kty == "OKP" && crv == "": - crv = Ed25519 - case kty == "RSA" && size == 0: - size = DefaultRSASize - case kty == "oct" && size == 0: - size = DefaultOctSize - } - - key, err := keyutil.GenerateKey(kty, crv, size) - if err != nil { - return nil, err - } - jwk = &JSONWebKey{ - Key: key, - KeyID: kid, - Use: use, - Algorithm: alg, - } - guessJWKAlgorithm(&context{alg: alg}, jwk) - if jwk.KeyID == "" && kty != "oct" { - jwk.KeyID, err = Thumbprint(jwk) - } - return jwk, err -} - -// GenerateJWKFromPEM returns an incomplete JSONWebKey using the key from a -// PEM file. -func GenerateJWKFromPEM(filename string, subtle bool) (*JSONWebKey, error) { - key, err := pemutil.Read(filename) - if err != nil { - return nil, err - } - - switch key := key.(type) { - case *rsa.PrivateKey, *rsa.PublicKey: - return &JSONWebKey{ - Key: key, - }, nil - case *ecdsa.PrivateKey, *ecdsa.PublicKey, ed25519.PrivateKey, ed25519.PublicKey: - return &JSONWebKey{ - Key: key, - Algorithm: algForKey(key), - }, nil - case *x509.Certificate: - var use string - if !subtle { - use, err = keyUsageForCert(key) - if err != nil { - return nil, err - } - } - return &JSONWebKey{ - Key: key.PublicKey, - Certificates: []*x509.Certificate{key}, - Algorithm: algForKey(key.PublicKey), - Use: use, - }, nil - default: - return nil, errors.Errorf("error parsing %s: unsupported key type '%T'", filename, key) - } -} - -func algForKey(key crypto.PublicKey) string { - switch key := key.(type) { - case *ecdsa.PrivateKey: - return getECAlgorithm(key.Curve) - case *ecdsa.PublicKey: - return getECAlgorithm(key.Curve) - case ed25519.PrivateKey, ed25519.PublicKey: - return EdDSA - default: - return "" - } -} - -func keyUsageForCert(cert *x509.Certificate) (string, error) { - isDigitalSignature := containsUsage(cert.KeyUsage, - x509.KeyUsageDigitalSignature, - x509.KeyUsageContentCommitment, - x509.KeyUsageCertSign, - x509.KeyUsageCRLSign, - ) - isEncipherment := containsUsage(cert.KeyUsage, - x509.KeyUsageKeyEncipherment, - x509.KeyUsageDataEncipherment, - x509.KeyUsageKeyAgreement, - x509.KeyUsageEncipherOnly, - x509.KeyUsageDecipherOnly, - ) - if isDigitalSignature && isEncipherment { - return "", errAmbiguousCertKeyUsage - } - if isDigitalSignature { - return jwksUsageSig, nil - } - if isEncipherment { - return jwksUsageEnc, nil - } - return "", errNoCertKeyUsage -} - -func containsUsage(usage x509.KeyUsage, queries ...x509.KeyUsage) bool { - for _, query := range queries { - if usage&query == query { - return true - } - } - return false -} diff --git a/vendor/go.step.sm/crypto/jose/options.go b/vendor/go.step.sm/crypto/jose/options.go deleted file mode 100644 index e1e1393b48..0000000000 --- a/vendor/go.step.sm/crypto/jose/options.go +++ /dev/null @@ -1,125 +0,0 @@ -package jose - -import ( - "go.step.sm/crypto/internal/utils" -) - -type context struct { - filename string - use, alg, kid string - subtle, insecure bool - noDefaults bool - password []byte - passwordPrompt string - passwordPrompter PasswordPrompter - contentType string -} - -// apply the options to the context and returns an error if one of the options -// fails. -func (ctx *context) apply(opts ...Option) (*context, error) { - for _, opt := range opts { - if err := opt(ctx); err != nil { - return nil, err - } - } - return ctx, nil -} - -// Option is the type used to add attributes to the context. -type Option func(ctx *context) error - -// WithFilename adds the given filename to the context. -func WithFilename(filename string) Option { - return func(ctx *context) error { - ctx.filename = filename - return nil - } -} - -// WithUse adds the use claim to the context. -func WithUse(use string) Option { - return func(ctx *context) error { - ctx.use = use - return nil - } -} - -// WithAlg adds the alg claim to the context. -func WithAlg(alg string) Option { - return func(ctx *context) error { - ctx.alg = alg - return nil - } -} - -// WithKid adds the kid property to the context. -func WithKid(kid string) Option { - return func(ctx *context) error { - ctx.kid = kid - return nil - } -} - -// WithSubtle marks the context as subtle. -func WithSubtle(subtle bool) Option { - return func(ctx *context) error { - ctx.subtle = subtle - return nil - } -} - -// WithInsecure marks the context as insecure. -func WithInsecure(insecure bool) Option { - return func(ctx *context) error { - ctx.insecure = insecure - return nil - } -} - -// WithNoDefaults avoids that the parser loads defaults values, specially the -// default algorithms. -func WithNoDefaults(val bool) Option { - return func(ctx *context) error { - ctx.noDefaults = val - return nil - } -} - -// WithPassword is a method that adds the given password to the context. -func WithPassword(pass []byte) Option { - return func(ctx *context) error { - ctx.password = pass - return nil - } -} - -// WithPasswordFile is a method that adds the password in a file to the context. -func WithPasswordFile(filename string) Option { - return func(ctx *context) error { - b, err := utils.ReadPasswordFromFile(filename) - if err != nil { - return err - } - ctx.password = b - return nil - } -} - -// WithPasswordPrompter defines a method that can be used to prompt for the -// password to decrypt an encrypted JWE. -func WithPasswordPrompter(prompt string, fn PasswordPrompter) Option { - return func(ctx *context) error { - ctx.passwordPrompt = prompt - ctx.passwordPrompter = fn - return nil - } -} - -// WithContentType adds the content type when encrypting data. -func WithContentType(cty string) Option { - return func(ctx *context) error { - ctx.contentType = cty - return nil - } -} diff --git a/vendor/go.step.sm/crypto/jose/parse.go b/vendor/go.step.sm/crypto/jose/parse.go deleted file mode 100644 index 760c4f161f..0000000000 --- a/vendor/go.step.sm/crypto/jose/parse.go +++ /dev/null @@ -1,411 +0,0 @@ -package jose - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "os" - "strings" - "time" - - "github.com/pkg/errors" - "go.step.sm/crypto/pemutil" - "go.step.sm/crypto/x25519" -) - -type keyType int - -const ( - jwkKeyType keyType = iota - pemKeyType - octKeyType -) - -// read returns the bytes from reading a file, or from a url if the filename has -// the prefix https:// -func read(filename string) ([]byte, error) { - if strings.HasPrefix(filename, "https://") { - resp, err := http.Get(filename) //nolint:gosec // no SSRF - if err != nil { - return nil, errors.Wrapf(err, "error retrieving %s", filename) - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return nil, errors.Errorf("error retrieving %s: status code %d", filename, resp.StatusCode) - } - b, err := io.ReadAll(resp.Body) - return b, errors.Wrapf(err, "error retrieving %s", filename) - } - - b, err := os.ReadFile(filename) - if err != nil { - return nil, errors.Wrapf(err, "error reading %s", filename) - } - return b, nil -} - -// ReadKey returns a JSONWebKey from the given JWK or PEM file. If the file is -// password protected, and no password or prompt password function is given it -// will fail. -func ReadKey(filename string, opts ...Option) (*JSONWebKey, error) { - b, err := read(filename) - if err != nil { - return nil, err - } - opts = append(opts, WithFilename(filename)) - return ParseKey(b, opts...) -} - -// ParseKey returns a JSONWebKey from the given JWK file or a PEM file. If the -// file is password protected, and no password or prompt password function is -// given it will fail. -func ParseKey(b []byte, opts ...Option) (*JSONWebKey, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - if ctx.filename == "" { - ctx.filename = "key" - } - - jwk := new(JSONWebKey) - switch guessKeyType(ctx, b) { - case jwkKeyType: - // Attempt to parse an encrypted file - if b, err = Decrypt(b, opts...); err != nil { - return nil, err - } - - // Unmarshal the plain (or decrypted JWK) - if err = json.Unmarshal(b, jwk); err != nil { - return nil, errors.Errorf("error reading %s: unsupported format", ctx.filename) - } - - // If KeyID not set by environment, then use the default. - // NOTE: we do not set this value by default in the case of jwkKeyType - // because it is assumed to have been left empty on purpose. - case pemKeyType: - pemOptions := []pemutil.Options{ - pemutil.WithFilename(ctx.filename), - } - if ctx.password != nil { - pemOptions = append(pemOptions, pemutil.WithPassword(ctx.password)) - } - if ctx.passwordPrompter != nil { - pemOptions = append(pemOptions, pemutil.WithPasswordPrompt(ctx.passwordPrompt, pemutil.PasswordPrompter(ctx.passwordPrompter))) - } - if pemutil.PromptPassword == nil && PromptPassword != nil { - pemutil.PromptPassword = pemutil.PasswordPrompter(PromptPassword) - } - - jwk.Key, err = pemutil.ParseKey(b, pemOptions...) - if err != nil { - return nil, err - } - if ctx.kid == "" { - if jwk.KeyID, err = Thumbprint(jwk); err != nil { - return nil, err - } - } - case octKeyType: - jwk.Key = b - } - - // Validate key id - if ctx.kid != "" && jwk.KeyID != "" && ctx.kid != jwk.KeyID { - return nil, errors.Errorf("kid %s does not match the kid on %s", ctx.kid, ctx.filename) - } - if jwk.KeyID == "" { - jwk.KeyID = ctx.kid - } - if jwk.Use == "" { - jwk.Use = ctx.use - } - - // Set the algorithm if empty - guessJWKAlgorithm(ctx, jwk) - - // Validate alg: if the flag '--subtle' is passed we will allow to overwrite it - if !ctx.subtle && ctx.alg != "" && jwk.Algorithm != "" && ctx.alg != jwk.Algorithm { - return nil, errors.Errorf("alg %s does not match the alg on %s", ctx.alg, ctx.filename) - } - if ctx.subtle && ctx.alg != "" { - jwk.Algorithm = ctx.alg - } - - return jwk, nil -} - -// ReadKeySet reads a JWK Set from a URL or filename. URLs must start with -// "https://". -func ReadKeySet(filename string, opts ...Option) (*JSONWebKey, error) { - b, err := read(filename) - if err != nil { - return nil, err - } - opts = append(opts, WithFilename(filename)) - return ParseKeySet(b, opts...) -} - -// ParseKeySet returns the JWK with the given key after parsing a JWKSet from -// a given file. -func ParseKeySet(b []byte, opts ...Option) (*JSONWebKey, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - - // Attempt to parse an encrypted file - if b, err = Decrypt(b, opts...); err != nil { - return nil, err - } - - // Unmarshal the plain or decrypted JWKSet - jwkSet := new(JSONWebKeySet) - if err := json.Unmarshal(b, jwkSet); err != nil { - return nil, errors.Errorf("error reading %s: unsupported format", ctx.filename) - } - - jwks := jwkSet.Key(ctx.kid) - switch len(jwks) { - case 0: - return nil, errors.Errorf("cannot find key with kid %s on %s", ctx.kid, ctx.filename) - case 1: - jwk := &jwks[0] - - // Set the algorithm if empty - guessJWKAlgorithm(ctx, jwk) - - // Validate alg: if the flag '--subtle' is passed we will allow the - // overwrite of the alg - if !ctx.subtle && ctx.alg != "" && jwk.Algorithm != "" && ctx.alg != jwk.Algorithm { - return nil, errors.Errorf("alg %s does not match the alg on %s", ctx.alg, ctx.filename) - } - if ctx.subtle && ctx.alg != "" { - jwk.Algorithm = ctx.alg - } - return jwk, nil - default: - return nil, errors.Errorf("multiple keys with kid %s have been found on %s", ctx.kid, ctx.filename) - } -} - -func decodeCerts(l []interface{}) ([]*x509.Certificate, error) { - certs := make([]*x509.Certificate, len(l)) - for i, j := range l { - certStr, ok := j.(string) - if !ok { - return nil, errors.Errorf("wrong type in x5c header list; expected string but %T", i) - } - certB, err := base64.StdEncoding.DecodeString(certStr) - if err != nil { - return nil, errors.Wrap(err, "error decoding base64 encoded x5c cert") - } - cert, err := x509.ParseCertificate(certB) - if err != nil { - return nil, errors.Wrap(err, "error parsing x5c cert") - } - certs[i] = cert - } - return certs, nil -} - -// X5cInsecureKey is the key used to store the x5cInsecure cert chain in the JWT header. -var X5cInsecureKey = "x5cInsecure" - -// GetX5cInsecureHeader extracts the x5cInsecure certificate chain from the token. -func GetX5cInsecureHeader(jwt *JSONWebToken) ([]*x509.Certificate, error) { - x5cVal, ok := jwt.Headers[0].ExtraHeaders[HeaderKey(X5cInsecureKey)] - if !ok { - return nil, errors.New("ssh check-host token missing x5cInsecure header") - } - interfaces, ok := x5cVal.([]interface{}) - if !ok { - return nil, errors.Errorf("ssh check-host token x5cInsecure header has wrong type; expected []string, but got %T", x5cVal) - } - chain, err := decodeCerts(interfaces) - if err != nil { - return nil, errors.Wrap(err, "error decoding x5cInsecure header certs") - } - return chain, nil -} - -// ParseX5cInsecure parses an x5cInsecure token, validates the certificate chain -// in the token, and returns the JWT struct along with all the verified chains. -func ParseX5cInsecure(tok string, roots []*x509.Certificate) (*JSONWebToken, [][]*x509.Certificate, error) { - jwt, err := ParseSigned(tok) - if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing x5cInsecure token") - } - - chain, err := GetX5cInsecureHeader(jwt) - if err != nil { - return nil, nil, errors.Wrap(err, "error extracting x5cInsecure cert chain") - } - leaf := chain[0] - - interPool := x509.NewCertPool() - for _, crt := range chain[1:] { - interPool.AddCert(crt) - } - rootPool := x509.NewCertPool() - for _, crt := range roots { - rootPool.AddCert(crt) - } - // Correctly parse and validate the x5c certificate chain. - verifiedChains, err := leaf.Verify(x509.VerifyOptions{ - Roots: rootPool, - Intermediates: interPool, - // A hack so we skip validity period validation. - CurrentTime: leaf.NotAfter.Add(-1 * time.Minute), - KeyUsages: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - }, - }) - if err != nil { - return nil, nil, errors.Wrap(err, "error verifying x5cInsecure certificate chain") - } - leaf = verifiedChains[0][0] - - if leaf.KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return nil, nil, errors.New("certificate used to sign x5cInsecure token cannot be used for digital signature") - } - - return jwt, verifiedChains, nil -} - -// guessKeyType returns the key type of the given data. Key types are JWK, PEM -// or oct. -func guessKeyType(ctx *context, data []byte) keyType { - switch ctx.alg { - // jwk or file with oct data - case "HS256", "HS384", "HS512": - // Encrypted JWK ? - if _, err := ParseEncrypted(string(data)); err == nil { - return jwkKeyType - } - // JSON JWK ? - if err := json.Unmarshal(data, &JSONWebKey{}); err == nil { - return jwkKeyType - } - // Default to oct - return octKeyType - default: - // PEM or default to JWK - if bytes.HasPrefix(data, []byte("-----BEGIN ")) { - return pemKeyType - } - return jwkKeyType - } -} - -// guessJWKAlgorithm set the algorithm if it's not set and we can guess it -func guessJWKAlgorithm(ctx *context, jwk *JSONWebKey) { - if jwk.Algorithm == "" { - // Force default algorithm if passed. - if ctx.alg != "" { - jwk.Algorithm = ctx.alg - return - } - - // Guess only fixed algorithms if no defaults is enabled - if ctx.noDefaults { - guessKnownJWKAlgorithm(ctx, jwk) - return - } - - // Use defaults for each key type - switch k := jwk.Key.(type) { - case []byte: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultOctKeyAlgorithm) - } else { - jwk.Algorithm = string(DefaultOctSigAlgorithm) - } - case *ecdsa.PrivateKey: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultECKeyAlgorithm) - } else { - jwk.Algorithm = getECAlgorithm(k.Curve) - } - case *ecdsa.PublicKey: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultECKeyAlgorithm) - } else { - jwk.Algorithm = getECAlgorithm(k.Curve) - } - case *rsa.PrivateKey, *rsa.PublicKey: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultRSAKeyAlgorithm) - } else { - jwk.Algorithm = string(DefaultRSASigAlgorithm) - } - // Ed25519 can only be used for signing operations - case ed25519.PrivateKey, ed25519.PublicKey: - jwk.Algorithm = EdDSA - case x25519.PrivateKey, x25519.PublicKey: - jwk.Algorithm = XEdDSA - } - } -} - -// guessSignatureAlgorithm returns the signature algorithm for a given private key. -func guessSignatureAlgorithm(key crypto.PrivateKey) SignatureAlgorithm { - switch k := key.(type) { - case []byte: - return DefaultOctSigAlgorithm - case *ecdsa.PrivateKey: - return SignatureAlgorithm(getECAlgorithm(k.Curve)) - case *rsa.PrivateKey: - return DefaultRSASigAlgorithm - case ed25519.PrivateKey: - return EdDSA - case x25519.PrivateKey, X25519Signer: - return XEdDSA - default: - return "" - } -} - -// guessKnownJWKAlgorithm sets the algorithm for keys that only have one -// possible algorithm. -func guessKnownJWKAlgorithm(_ *context, jwk *JSONWebKey) { - if jwk.Algorithm == "" && jwk.Use != "enc" { - switch k := jwk.Key.(type) { - case *ecdsa.PrivateKey: - jwk.Algorithm = getECAlgorithm(k.Curve) - case *ecdsa.PublicKey: - jwk.Algorithm = getECAlgorithm(k.Curve) - case ed25519.PrivateKey, ed25519.PublicKey: - jwk.Algorithm = EdDSA - case x25519.PrivateKey, x25519.PublicKey: - jwk.Algorithm = XEdDSA - } - } -} - -// getECAlgorithm returns the JWA algorithm name for the given elliptic curve. -// If the curve is not supported it will return an empty string. -// -// Supported curves are P-256, P-384, and P-521. -func getECAlgorithm(crv elliptic.Curve) string { - switch crv.Params().Name { - case P256: - return ES256 - case P384: - return ES384 - case P521: - return ES512 - default: - return "" - } -} diff --git a/vendor/go.step.sm/crypto/jose/types.go b/vendor/go.step.sm/crypto/jose/types.go deleted file mode 100644 index f034763850..0000000000 --- a/vendor/go.step.sm/crypto/jose/types.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package jose is a wrapper for github.com/go-jose/go-jose/v3 and implements -// utilities to parse and generate JWT, JWK and JWKSets. -package jose - -import ( - "crypto" - "errors" - "strings" - "time" - - jose "github.com/go-jose/go-jose/v3" - "github.com/go-jose/go-jose/v3/cryptosigner" - "github.com/go-jose/go-jose/v3/jwt" - "go.step.sm/crypto/x25519" -) - -// SupportsPBKDF2 constant to know if the underlaying library supports -// password based cryptography algorithms. -const SupportsPBKDF2 = true - -// PBKDF2SaltSize is the default size of the salt for PBKDF2, 128-bit salt. -const PBKDF2SaltSize = 16 - -// PBKDF2Iterations is the default number of iterations for PBKDF2. -// -// 600k is the current OWASP recommendation (Dec 2022) -// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 -// -// Nist recommends at least 10k (800-63B), 1Password increased in 2023 the -// number of iterations from 100k to 650k. -const PBKDF2Iterations = 600000 - -// JSONWebSignature represents a signed JWS object after parsing. -type JSONWebSignature = jose.JSONWebSignature - -// JSONWebToken represents a JSON Web Token (as specified in RFC7519). -type JSONWebToken = jwt.JSONWebToken - -// JSONWebKey represents a public or private key in JWK format. -type JSONWebKey = jose.JSONWebKey - -// JSONWebKeySet represents a JWK Set object. -type JSONWebKeySet = jose.JSONWebKeySet - -// JSONWebEncryption represents an encrypted JWE object after parsing. -type JSONWebEncryption = jose.JSONWebEncryption - -// Recipient represents an algorithm/key to encrypt messages to. -type Recipient = jose.Recipient - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions = jose.EncrypterOptions - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter = jose.Encrypter - -// ContentType represents type of the contained data. -type ContentType = jose.ContentType - -// KeyAlgorithm represents a key management algorithm. -type KeyAlgorithm = jose.KeyAlgorithm - -// ContentEncryption represents a content encryption algorithm. -type ContentEncryption = jose.ContentEncryption - -// SignatureAlgorithm represents a signature (or MAC) algorithm. -type SignatureAlgorithm = jose.SignatureAlgorithm - -// Signature represents a signature. -type Signature = jose.Signature - -// ErrCryptoFailure indicates an error in a cryptographic primitive. -var ErrCryptoFailure = jose.ErrCryptoFailure - -// Claims represents public claim values (as specified in RFC 7519). -type Claims = jwt.Claims - -// Builder is a utility for making JSON Web Tokens. Calls can be chained, and -// errors are accumulated until the final call to CompactSerialize/FullSerialize. -type Builder = jwt.Builder - -// NumericDate represents date and time as the number of seconds since the -// epoch, including leap seconds. Non-integer values can be represented -// in the serialized format, but we round to the nearest second. -type NumericDate = jwt.NumericDate - -// Audience represents the recipients that the token is intended for. -type Audience = jwt.Audience - -// Expected defines values used for protected claims validation. -// If field has zero value then validation is skipped. -type Expected = jwt.Expected - -// Signer represents a signer which takes a payload and produces a signed JWS object. -type Signer = jose.Signer - -// OpaqueSigner represents a jose.Signer that wraps a crypto.Signer -type OpaqueSigner = jose.OpaqueSigner - -// SigningKey represents an algorithm/key used to sign a message. -type SigningKey = jose.SigningKey - -// SignerOptions represents options that can be set when creating signers. -type SignerOptions = jose.SignerOptions - -// Header represents the read-only JOSE header for JWE/JWS objects. -type Header = jose.Header - -// HeaderKey represents the type used as a key in the protected header of a JWS -// object. -type HeaderKey = jose.HeaderKey - -// ErrInvalidIssuer indicates invalid iss claim. -var ErrInvalidIssuer = jwt.ErrInvalidIssuer - -// ErrInvalidAudience indicated invalid aud claim. -var ErrInvalidAudience = jwt.ErrInvalidAudience - -// ErrNotValidYet indicates that token is used before time indicated in nbf claim. -var ErrNotValidYet = jwt.ErrNotValidYet - -// ErrExpired indicates that token is used after expiry time indicated in exp claim. -var ErrExpired = jwt.ErrExpired - -// ErrInvalidSubject indicates invalid sub claim. -var ErrInvalidSubject = jwt.ErrInvalidSubject - -// ErrInvalidID indicates invalid jti claim. -var ErrInvalidID = jwt.ErrInvalidID - -// ErrIssuedInTheFuture indicates that the iat field is in the future. -var ErrIssuedInTheFuture = jwt.ErrIssuedInTheFuture - -// Key management algorithms -// -//nolint:stylecheck,revive // use standard names in upper-case -const ( - RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5 - RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1 - RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256 - A128KW = KeyAlgorithm("A128KW") // AES key wrap (128) - A192KW = KeyAlgorithm("A192KW") // AES key wrap (192) - A256KW = KeyAlgorithm("A256KW") // AES key wrap (256) - DIRECT = KeyAlgorithm("dir") // Direct encryption - ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES - ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128) - ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192) - ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256) - A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128) - A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192) - A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256) - PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128) - PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192) - PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256) -) - -// Signature algorithms -const ( - HS256 = "HS256" // HMAC using SHA-256 - HS384 = "HS384" // HMAC using SHA-384 - HS512 = "HS512" // HMAC using SHA-512 - RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 - RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 - RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 - ES256 = "ES256" // ECDSA using P-256 and SHA-256 - ES384 = "ES384" // ECDSA using P-384 and SHA-384 - ES512 = "ES512" // ECDSA using P-521 and SHA-512 - PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 - PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 - PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 - EdDSA = "EdDSA" // Ed25519 with EdDSA signature schema - XEdDSA = "XEdDSA" // X25519 with XEdDSA signature schema -) - -// Content encryption algorithms -// -//nolint:revive,stylecheck // use standard names in upper-case -const ( - A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128) - A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192) - A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256) - A128GCM = ContentEncryption("A128GCM") // AES-GCM (128) - A192GCM = ContentEncryption("A192GCM") // AES-GCM (192) - A256GCM = ContentEncryption("A256GCM") // AES-GCM (256) -) - -// Elliptic curves -const ( - P256 = "P-256" // P-256 curve (FIPS 186-3) - P384 = "P-384" // P-384 curve (FIPS 186-3) - P521 = "P-521" // P-521 curve (FIPS 186-3) -) - -// Key types -const ( - EC = "EC" // Elliptic curves - RSA = "RSA" // RSA - OKP = "OKP" // Ed25519 - OCT = "oct" // Octet sequence -) - -// Ed25519 is the EdDSA signature scheme using SHA-512/256 and Curve25519 -const Ed25519 = "Ed25519" - -// Default key management, signature, and content encryption algorithms to use if none is specified. -const ( - // Key management algorithms - DefaultECKeyAlgorithm = ECDH_ES - DefaultRSAKeyAlgorithm = RSA_OAEP_256 - DefaultOctKeyAlgorithm = A256GCMKW - // Signature algorithms - DefaultRSASigAlgorithm = RS256 - DefaultOctSigAlgorithm = HS256 - // Content encryption algorithm - DefaultEncAlgorithm = A256GCM -) - -// Default sizes -const ( - DefaultRSASize = 2048 - DefaultOctSize = 32 -) - -// ParseEncrypted parses an encrypted message in compact or full serialization format. -func ParseEncrypted(input string) (*JSONWebEncryption, error) { - return jose.ParseEncrypted(input) -} - -// NewEncrypter creates an appropriate encrypter based on the key type. -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - return jose.NewEncrypter(enc, rcpt, opts) -} - -// NewNumericDate constructs NumericDate from time.Time value. -func NewNumericDate(t time.Time) *NumericDate { - return jwt.NewNumericDate(t) -} - -// UnixNumericDate returns a NumericDate from the given seconds since the UNIX -// Epoch time. For backward compatibility is s is 0, a nil value will be returned. -func UnixNumericDate(s int64) *NumericDate { - if s == 0 { - return nil - } - out := NumericDate(s) - return &out -} - -// NewSigner creates an appropriate signer based on the key type -func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) { - if k, ok := sig.Key.(x25519.PrivateKey); ok { - sig.Key = X25519Signer(k) - } - if sig.Algorithm == "" { - sig.Algorithm = guessSignatureAlgorithm(sig.Key) - } - return jose.NewSigner(sig, opts) -} - -// NewOpaqueSigner creates a new OpaqueSigner for JWT signing from a crypto.Signer -func NewOpaqueSigner(signer crypto.Signer) OpaqueSigner { - return cryptosigner.Opaque(signer) -} - -// Verify validates the token payload with the given public key and deserializes -// the token into the destination. -func Verify(token *JSONWebToken, publicKey interface{}, dest ...interface{}) error { - if k, ok := publicKey.(x25519.PublicKey); ok { - publicKey = X25519Verifier(k) - } - return token.Claims(publicKey, dest...) -} - -// ParseSigned parses token from JWS form. -func ParseSigned(s string) (*JSONWebToken, error) { - return jwt.ParseSigned(s) -} - -// Signed creates builder for signed tokens. -func Signed(sig Signer) Builder { - return jwt.Signed(sig) -} - -// ParseJWS parses a signed message in compact or full serialization format. -func ParseJWS(s string) (*JSONWebSignature, error) { - return jose.ParseSigned(s) -} - -// Determine whether a JSONWebKey is symmetric -func IsSymmetric(k *JSONWebKey) bool { - switch k.Key.(type) { - case []byte: - return true - default: - return false - } -} - -// Determine whether a JSONWebKey is asymmetric -func IsAsymmetric(k *JSONWebKey) bool { - return !IsSymmetric(k) -} - -// TrimPrefix removes the string "go-jose/go-jose" from all errors. -func TrimPrefix(err error) error { - if err == nil { - return nil - } - return errors.New(strings.TrimPrefix(err.Error(), "go-jose/go-jose: ")) -} diff --git a/vendor/go.step.sm/crypto/jose/validate.go b/vendor/go.step.sm/crypto/jose/validate.go deleted file mode 100644 index 6a904167e7..0000000000 --- a/vendor/go.step.sm/crypto/jose/validate.go +++ /dev/null @@ -1,221 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/sha1" //nolint:gosec // RFC 7515 - X.509 Certificate SHA-1 Thumbprint - "crypto/x509" - "encoding/base64" - "fmt" - "os" - - "github.com/pkg/errors" - "go.step.sm/crypto/keyutil" - "golang.org/x/crypto/ssh" -) - -// ValidateSSHPOP validates the given SSH certificate and key for use in an -// sshpop header. -func ValidateSSHPOP(certFile string, key interface{}) (string, error) { - if certFile == "" { - return "", errors.New("ssh certfile cannot be empty") - } - certBytes, err := os.ReadFile(certFile) - if err != nil { - return "", errors.Wrapf(err, "error reading ssh certificate from %s", certFile) - } - sshpub, _, _, _, err := ssh.ParseAuthorizedKey(certBytes) - if err != nil { - return "", errors.Wrapf(err, "error parsing ssh public key from %s", certFile) - } - cert, ok := sshpub.(*ssh.Certificate) - if !ok { - return "", errors.New("error casting ssh public key to ssh certificate") - } - pubkey, err := keyutil.ExtractKey(cert) - if err != nil { - return "", errors.Wrap(err, "error extracting public key from ssh public key interface") - } - if err = validateKeyPair(pubkey, key); err != nil { - return "", errors.Wrap(err, "error verifying ssh key pair") - } - - return base64.StdEncoding.EncodeToString(cert.Marshal()), nil -} - -func validateKeyPair(pub crypto.PublicKey, priv crypto.PrivateKey) error { - switch key := priv.(type) { - case *JSONWebKey: - return keyutil.VerifyPair(pub, key.Key) - case OpaqueSigner: - if !keyutil.Equal(pub, key.Public().Key) { - return errors.New("private key does not match public key") - } - return nil - default: - return keyutil.VerifyPair(pub, priv) - } -} - -func validateX5(certs []*x509.Certificate, key interface{}) error { - if len(certs) == 0 { - return errors.New("certs cannot be empty") - } - - if err := validateKeyPair(certs[0].PublicKey, key); err != nil { - return errors.Wrap(err, "error verifying certificate and key") - } - - if certs[0].KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return errors.New("certificate/private-key pair used to sign " + - "token is not approved for digital signature") - } - return nil -} - -// ValidateX5C validates the given certificate chain and key for use as a token -// signer and x5t header. -func ValidateX5C(certs []*x509.Certificate, key interface{}) ([]string, error) { - if err := validateX5(certs, key); err != nil { - return nil, errors.Wrap(err, "ValidateX5C") - } - strs := make([]string, len(certs)) - for i, cert := range certs { - strs[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - return strs, nil -} - -// ValidateX5T validates the given certificate and key for use as a token signer -// and x5t header. -func ValidateX5T(certs []*x509.Certificate, key interface{}) (string, error) { - if err := validateX5(certs, key); err != nil { - return "", errors.Wrap(err, "ValidateX5T") - } - // x5t is the base64 URL encoded SHA1 thumbprint - // (see https://tools.ietf.org/html/rfc7515#section-4.1.7) - //nolint:gosec // RFC 7515 - X.509 Certificate SHA-1 Thumbprint - fingerprint := sha1.Sum(certs[0].Raw) - return base64.URLEncoding.EncodeToString(fingerprint[:]), nil -} - -// ValidateJWK validates the given JWK. -func ValidateJWK(jwk *JSONWebKey) error { - switch jwk.Use { - case "sig": - return validateSigJWK(jwk) - case "enc": - return validateEncJWK(jwk) - default: - return validateGeneric(jwk) - } -} - -// validateSigJWK validates the given JWK for signature operations. -func validateSigJWK(jwk *JSONWebKey) error { - if jwk.Algorithm == "" { - return errors.New("flag '--alg' is required with the given key") - } - errctx := "the given key" - - switch k := jwk.Key.(type) { - case []byte: - switch jwk.Algorithm { - case HS256, HS384, HS512: - return nil - } - errctx = "kty 'oct'" - case *rsa.PrivateKey, *rsa.PublicKey: - switch jwk.Algorithm { - case RS256, RS384, RS512: - return nil - case PS256, PS384, PS512: - return nil - } - errctx = "kty 'RSA'" - case *ecdsa.PrivateKey: - curve := k.Params().Name - switch { - case jwk.Algorithm == ES256 && curve == P256: - return nil - case jwk.Algorithm == ES384 && curve == P384: - return nil - case jwk.Algorithm == ES512 && curve == P521: - return nil - } - errctx = fmt.Sprintf("kty 'EC' and crv '%s'", curve) - case *ecdsa.PublicKey: - curve := k.Params().Name - switch { - case jwk.Algorithm == ES256 && curve == P256: - return nil - case jwk.Algorithm == ES384 && curve == P384: - return nil - case jwk.Algorithm == ES512 && curve == P521: - return nil - } - errctx = fmt.Sprintf("kty 'EC' and crv '%s'", curve) - case ed25519.PrivateKey, ed25519.PublicKey: - if jwk.Algorithm == EdDSA { - return nil - } - errctx = "kty 'OKP' and crv 'Ed25519'" - case OpaqueSigner: - for _, alg := range k.Algs() { - if jwk.Algorithm == string(alg) { - return nil - } - } - } - - return errors.Errorf("alg '%s' is not compatible with %s", jwk.Algorithm, errctx) -} - -// validatesEncJWK validates the given JWK for encryption operations. -func validateEncJWK(jwk *JSONWebKey) error { - alg := KeyAlgorithm(jwk.Algorithm) - var kty string - - switch jwk.Key.(type) { - case []byte: - switch alg { - case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW: - return nil - } - kty = "oct" - case *rsa.PrivateKey, *rsa.PublicKey: - switch alg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - return nil - } - kty = "RSA" - case *ecdsa.PrivateKey, *ecdsa.PublicKey: - switch alg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - return nil - } - kty = "EC" - case ed25519.PrivateKey, ed25519.PublicKey: - return errors.New("key Ed25519 cannot be used for encryption") - } - - return errors.Errorf("alg '%s' is not compatible with kty '%s'", jwk.Algorithm, kty) -} - -// validateGeneric validates just the supported key types. -func validateGeneric(jwk *JSONWebKey) error { - switch jwk.Key.(type) { - case []byte: - return nil - case *rsa.PrivateKey, *rsa.PublicKey: - return nil - case *ecdsa.PrivateKey, *ecdsa.PublicKey: - return nil - case ed25519.PrivateKey, ed25519.PublicKey: - return nil - } - - return errors.Errorf("unsupported key type '%T'", jwk.Key) -} diff --git a/vendor/go.step.sm/crypto/jose/x25519.go b/vendor/go.step.sm/crypto/jose/x25519.go deleted file mode 100644 index 25e90e8ad7..0000000000 --- a/vendor/go.step.sm/crypto/jose/x25519.go +++ /dev/null @@ -1,66 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/rand" - "encoding/base64" - "fmt" - - "github.com/pkg/errors" - "go.step.sm/crypto/x25519" -) - -const x25519ThumbprintTemplate = `{"crv":"X25519","kty":"OKP","x":%q}` - -func x25519Thumbprint(key x25519.PublicKey, hash crypto.Hash) ([]byte, error) { - if len(key) != 32 { - return nil, errors.New("invalid elliptic key") - } - h := hash.New() - fmt.Fprintf(h, x25519ThumbprintTemplate, base64.RawURLEncoding.EncodeToString(key)) - return h.Sum(nil), nil -} - -// X25519Signer implements the jose.OpaqueSigner using an X25519 key and XEdDSA -// as the signing algorithm. -type X25519Signer x25519.PrivateKey - -// Public returns the public key of the current signing key. -func (s X25519Signer) Public() *JSONWebKey { - return &JSONWebKey{ - Key: x25519.PrivateKey(s).Public(), - } -} - -// Algs returns a list of supported signing algorithms, in this case only -// XEdDSA. -func (s X25519Signer) Algs() []SignatureAlgorithm { - return []SignatureAlgorithm{ - XEdDSA, - } -} - -// SignPayload signs a payload with the current signing key using the given -// algorithm, it will fail if it's not XEdDSA. -func (s X25519Signer) SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error) { - if alg != XEdDSA { - return nil, errors.Errorf("x25519 key does not support the signature algorithm %s", alg) - } - return x25519.PrivateKey(s).Sign(rand.Reader, payload, crypto.Hash(0)) -} - -// X25519Verifier implements the jose.OpaqueVerifier interface using an X25519 -// key and XEdDSA as a signing algorithm. -type X25519Verifier x25519.PublicKey - -// VerifyPayload verifies the given signature using the X25519 public key, it -// will fail if the signature algorithm is not XEdDSA. -func (v X25519Verifier) VerifyPayload(payload, signature []byte, alg SignatureAlgorithm) error { - if alg != XEdDSA { - return errors.Errorf("x25519 key does not support the signature algorithm %s", alg) - } - if !x25519.Verify(x25519.PublicKey(v), payload, signature) { - return errors.New("failed to verify XEdDSA signature") - } - return nil -} diff --git a/vendor/go.step.sm/crypto/keyutil/fingerprint.go b/vendor/go.step.sm/crypto/keyutil/fingerprint.go deleted file mode 100644 index 4447ff31e7..0000000000 --- a/vendor/go.step.sm/crypto/keyutil/fingerprint.go +++ /dev/null @@ -1,74 +0,0 @@ -package keyutil - -import ( - "crypto" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "fmt" - - "go.step.sm/crypto/fingerprint" -) - -// FingerprintEncoding defines the supported encodings in certificate -// fingerprints. -type FingerprintEncoding = fingerprint.Encoding - -// Supported fingerprint encodings. -const ( - // DefaultFingerprint represents the base64 encoding of the fingerprint. - DefaultFingerprint = FingerprintEncoding(0) - // HexFingerprint represents the hex encoding of the fingerprint. - HexFingerprint = fingerprint.HexFingerprint - // Base64Fingerprint represents the base64 encoding of the fingerprint. - Base64Fingerprint = fingerprint.Base64Fingerprint - // Base64URLFingerprint represents the base64URL encoding of the fingerprint. - Base64URLFingerprint = fingerprint.Base64URLFingerprint - // Base64RawFingerprint represents the base64RawStd encoding of the fingerprint. - Base64RawFingerprint = fingerprint.Base64RawFingerprint - // Base64RawURLFingerprint represents the base64RawURL encoding of the fingerprint. - Base64RawURLFingerprint = fingerprint.Base64RawURLFingerprint - // EmojiFingerprint represents the emoji encoding of the fingerprint. - EmojiFingerprint = fingerprint.EmojiFingerprint -) - -// subjectPublicKeyInfo is a PKIX public key structure defined in RFC 5280. -type subjectPublicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - SubjectPublicKey asn1.BitString -} - -// Fingerprint returns the SHA-256 fingerprint of an public key. -// -// The fingerprint is calculated from the encoding of the key according to RFC -// 5280 section 4.2.1.2, but using SHA-256 instead of SHA-1. -func Fingerprint(pub crypto.PublicKey) (string, error) { - return EncodedFingerprint(pub, DefaultFingerprint) -} - -// EncodedFingerprint returns the SHA-256 hash of the certificate using the -// specified encoding. -// -// The fingerprint is calculated from the encoding of the key according to RFC -// 5280 section 4.2.1.2, but using SHA-256 instead of SHA-1. -func EncodedFingerprint(pub crypto.PublicKey, encoding FingerprintEncoding) (string, error) { - b, err := x509.MarshalPKIXPublicKey(pub) - if err != nil { - return "", fmt.Errorf("error marshaling public key: %w", err) - } - var info subjectPublicKeyInfo - if _, err = asn1.Unmarshal(b, &info); err != nil { - return "", fmt.Errorf("error unmarshaling public key: %w", err) - } - if encoding == DefaultFingerprint { - encoding = Base64Fingerprint - } - - sum := sha256.Sum256(info.SubjectPublicKey.Bytes) - fp := fingerprint.Fingerprint(sum[:], encoding) - if fp == "" { - return "", fmt.Errorf("error formatting fingerprint: unsupported encoding") - } - return "SHA256:" + fp, nil -} diff --git a/vendor/go.step.sm/crypto/keyutil/key.go b/vendor/go.step.sm/crypto/keyutil/key.go deleted file mode 100644 index 171cdf3f6e..0000000000 --- a/vendor/go.step.sm/crypto/keyutil/key.go +++ /dev/null @@ -1,265 +0,0 @@ -// Package keyutil implements utilities to generate cryptographic keys. -package keyutil - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "math/big" - "sync/atomic" - - "github.com/pkg/errors" - "go.step.sm/crypto/x25519" - "golang.org/x/crypto/ssh" -) - -var ( - // DefaultKeyType is the default type of a private key. - DefaultKeyType = "EC" - // DefaultKeySize is the default size (in # of bits) of a private key. - DefaultKeySize = 2048 - // DefaultKeyCurve is the default curve of a private key. - DefaultKeyCurve = "P-256" - // DefaultSignatureAlgorithm is the default signature algorithm used on a - // certificate with the default key type. - DefaultSignatureAlgorithm = x509.ECDSAWithSHA256 - // MinRSAKeyBytes is the minimum acceptable size (in bytes) for RSA keys - // signed by the authority. - MinRSAKeyBytes = 256 -) - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } - -var insecureMode atomicBool - -// Insecure enables the insecure mode in this package and returns a function to -// revert the configuration. The insecure mode removes the minimum limits when -// generating RSA keys. -func Insecure() (revert func()) { - insecureMode.setTrue() - return func() { - insecureMode.setFalse() - } -} - -// PublicKey extracts a public key from a private key. -func PublicKey(priv interface{}) (crypto.PublicKey, error) { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &k.PublicKey, nil - case *ecdsa.PrivateKey: - return &k.PublicKey, nil - case ed25519.PrivateKey: - return k.Public(), nil - case x25519.PrivateKey: - return k.Public(), nil - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey, x25519.PublicKey: - return k, nil - case crypto.Signer: - return k.Public(), nil - default: - return nil, errors.Errorf("unrecognized key type: %T", priv) - } -} - -// GenerateDefaultKey generates a public/private key pair using sane defaults -// for key type, curve, and size. -func GenerateDefaultKey() (crypto.PrivateKey, error) { - return GenerateKey(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) -} - -// GenerateDefaultKeyPair generates a public/private key pair using configured -// default values for key type, curve, and size. -func GenerateDefaultKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { - return GenerateKeyPair(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) -} - -// GenerateKey generates a key of the given type (kty). -func GenerateKey(kty, crv string, size int) (crypto.PrivateKey, error) { - switch kty { - case "EC", "RSA", "OKP": - return GenerateSigner(kty, crv, size) - case "oct": - return generateOctKey(size) - default: - return nil, errors.Errorf("unrecognized key type: %s", kty) - } -} - -// GenerateKeyPair creates an asymmetric crypto keypair using input -// configuration. -func GenerateKeyPair(kty, crv string, size int) (crypto.PublicKey, crypto.PrivateKey, error) { - signer, err := GenerateSigner(kty, crv, size) - if err != nil { - return nil, nil, err - } - return signer.Public(), signer, nil -} - -// GenerateDefaultSigner returns an asymmetric crypto key that implements -// crypto.Signer using sane defaults. -func GenerateDefaultSigner() (crypto.Signer, error) { - return GenerateSigner(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) -} - -// GenerateSigner creates an asymmetric crypto key that implements -// crypto.Signer. -func GenerateSigner(kty, crv string, size int) (crypto.Signer, error) { - switch kty { - case "EC": - return generateECKey(crv) - case "RSA": - return generateRSAKey(size) - case "OKP": - return generateOKPKey(crv) - default: - return nil, errors.Errorf("unrecognized key type: %s", kty) - } -} - -// ExtractKey returns the given public or private key or extracts the public key -// if a x509.Certificate or x509.CertificateRequest is given. -func ExtractKey(in interface{}) (interface{}, error) { - switch k := in.(type) { - case *rsa.PublicKey, *rsa.PrivateKey, - *ecdsa.PublicKey, *ecdsa.PrivateKey, - ed25519.PublicKey, ed25519.PrivateKey, - x25519.PublicKey, x25519.PrivateKey: - return in, nil - case []byte: - return in, nil - case *x509.Certificate: - return k.PublicKey, nil - case *x509.CertificateRequest: - return k.PublicKey, nil - case ssh.CryptoPublicKey: - return k.CryptoPublicKey(), nil - case *ssh.Certificate: - return ExtractKey(k.Key) - default: - return nil, errors.Errorf("cannot extract the key from type '%T'", k) - } -} - -// VerifyPair that the public key matches the given private key. -func VerifyPair(pub crypto.PublicKey, priv crypto.PrivateKey) error { - signer, ok := priv.(crypto.Signer) - if !ok { - return errors.New("private key type does implement crypto.Signer") - } - if !Equal(pub, signer.Public()) { - return errors.New("private key does not match public key") - } - return nil -} - -// Equal reports if x and y are the same key. -func Equal(x, y any) bool { - switch xx := x.(type) { - case *ecdsa.PublicKey: - yy, ok := y.(*ecdsa.PublicKey) - return ok && xx.Equal(yy) - case *ecdsa.PrivateKey: - yy, ok := y.(*ecdsa.PrivateKey) - return ok && xx.Equal(yy) - case *rsa.PublicKey: - yy, ok := y.(*rsa.PublicKey) - return ok && xx.Equal(yy) - case *rsa.PrivateKey: - yy, ok := y.(*rsa.PrivateKey) - return ok && xx.Equal(yy) - case ed25519.PublicKey: - yy, ok := y.(ed25519.PublicKey) - return ok && xx.Equal(yy) - case ed25519.PrivateKey: - yy, ok := y.(ed25519.PrivateKey) - return ok && xx.Equal(yy) - case x25519.PublicKey: - yy, ok := y.(x25519.PublicKey) - return ok && xx.Equal(yy) - case x25519.PrivateKey: - yy, ok := y.(x25519.PrivateKey) - return ok && xx.Equal(yy) - case []byte: // special case for symmetric keys - yy, ok := y.([]byte) - return ok && bytes.Equal(xx, yy) - default: - return false - } -} - -func generateECKey(crv string) (crypto.Signer, error) { - var c elliptic.Curve - switch crv { - case "P-256": - c = elliptic.P256() - case "P-384": - c = elliptic.P384() - case "P-521": - c = elliptic.P521() - default: - return nil, errors.Errorf("invalid value for argument crv (crv: '%s')", crv) - } - - key, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, errors.Wrap(err, "error generating EC key") - } - - return key, nil -} - -func generateRSAKey(bits int) (crypto.Signer, error) { - if minBits := MinRSAKeyBytes * 8; !insecureMode.isSet() && bits < minBits { - return nil, errors.Errorf("the size of the RSA key should be at least %d bits", minBits) - } - - key, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, errors.Wrap(err, "error generating RSA key") - } - - return key, nil -} - -func generateOKPKey(crv string) (crypto.Signer, error) { - switch crv { - case "Ed25519": - _, key, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, errors.Wrap(err, "error generating Ed25519 key") - } - return key, nil - case "X25519": - _, key, err := x25519.GenerateKey(rand.Reader) - if err != nil { - return nil, errors.Wrap(err, "error generating X25519 key") - } - return key, nil - default: - return nil, errors.Errorf("missing or invalid value for argument 'crv'. "+ - "expected 'Ed25519' or 'X25519', but got '%s'", crv) - } -} - -func generateOctKey(size int) (interface{}, error) { - const chars = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - result := make([]byte, size) - for i := range result { - num, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars)))) - if err != nil { - return nil, err - } - result[i] = chars[num.Int64()] - } - return result, nil -} diff --git a/vendor/go.step.sm/crypto/pemutil/cosign.go b/vendor/go.step.sm/crypto/pemutil/cosign.go deleted file mode 100644 index d28c9f7d70..0000000000 --- a/vendor/go.step.sm/crypto/pemutil/cosign.go +++ /dev/null @@ -1,79 +0,0 @@ -package pemutil - -import ( - "crypto" - "crypto/x509" - "encoding/json" - - "github.com/pkg/errors" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/scrypt" -) - -type cosignEnvelope struct { - KDF cosignKDF `json:"kdf"` - Cipher cosignCipher `json:"cipher"` - Ciphertext []byte `json:"ciphertext"` -} - -type cosignKDF struct { - Name string `json:"name"` - Params cosignScryptParams `json:"params"` - Salt []byte `json:"salt"` -} - -type cosignScryptParams struct { - N int `json:"N"` - R int `json:"r"` - P int `json:"p"` -} - -type cosignCipher struct { - Name string `json:"name"` - Nonce []byte `json:"nonce"` -} - -// ParseCosignPrivateKey returns the private key encoded using cosign envelope. -// If an incorrect password is detected an x509.IncorrectPasswordError is -// returned. -// -// Cosign keys are encrypted under a password using scrypt as a KDF and -// nacl/secretbox for encryption. -func ParseCosignPrivateKey(data, password []byte) (crypto.PrivateKey, error) { - var env cosignEnvelope - if err := json.Unmarshal(data, &env); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - if env.KDF.Name != "scrypt" { - return nil, errors.Errorf("error parsing key: unsupported kdf %s", env.KDF.Name) - } - if env.Cipher.Name != "nacl/secretbox" { - return nil, errors.Errorf("error parsing key: unsupported cipher %s", env.Cipher.Name) - } - if len(env.Cipher.Nonce) != 24 { - return nil, errors.New("error parsing key: nonce must be 24 bytes long") - } - - params := env.KDF.Params - k, err := scrypt.Key(password, env.KDF.Salt, params.N, params.R, params.P, 32) - if err != nil { - return nil, errors.Wrap(err, "error generating key") - } - - var nonce [24]byte - var key [32]byte - copy(nonce[:], env.Cipher.Nonce) - copy(key[:], k) - - out, ok := secretbox.Open(nil, env.Ciphertext, &nonce, &key) - if !ok { - return nil, x509.IncorrectPasswordError - } - - priv, err := x509.ParsePKCS8PrivateKey(out) - if err != nil { - return nil, errors.Wrap(err, "error parsing pkcs8 key") - } - - return priv, nil -} diff --git a/vendor/go.step.sm/crypto/pemutil/pem.go b/vendor/go.step.sm/crypto/pemutil/pem.go deleted file mode 100644 index 9202510d2d..0000000000 --- a/vendor/go.step.sm/crypto/pemutil/pem.go +++ /dev/null @@ -1,856 +0,0 @@ -// Package pemutil implements utilities to parse keys and certificates. It also -// includes a method to serialize keys, X.509 certificates and certificate -// requests to PEM. -package pemutil - -import ( - "bytes" - "crypto/ecdh" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/pem" - "fmt" - "math/big" - "os" - "strings" - - "github.com/pkg/errors" - "go.step.sm/crypto/internal/utils" - "go.step.sm/crypto/keyutil" - "go.step.sm/crypto/x25519" - "golang.org/x/crypto/ssh" -) - -// DefaultEncCipher is the default algorithm used when encrypting sensitive -// data in the PEM format. -var DefaultEncCipher = x509.PEMCipherAES256 - -// PasswordPrompter defines the function signature for the PromptPassword -// callback. -type PasswordPrompter func(s string) ([]byte, error) - -// FileWriter defines the function signature for the WriteFile callback. -type FileWriter func(filename string, data []byte, perm os.FileMode) error - -// PromptPassword is a method used to prompt for a password to decode encrypted -// keys. If this method is not defined and the key or password are not passed, -// the parse of the key will fail. -var PromptPassword PasswordPrompter - -// WriteFile is a method used to write a file, by default it uses a wrapper over -// ioutil.WriteFile, but it can be set to a custom method, that for example can -// check if a file exists and prompts the user if it should be overwritten. -var WriteFile FileWriter = utils.WriteFile - -// PEMBlockHeader is the expected header for any PEM formatted block. -var PEMBlockHeader = []byte("-----BEGIN ") - -// context add options to the pem methods. -type context struct { - filename string - perm os.FileMode - password []byte - pkcs8 bool - openSSH bool - comment string - firstBlock bool - passwordPrompt string - passwordPrompter PasswordPrompter -} - -// newContext initializes the context with a filename. -func newContext(name string) *context { - return &context{ - filename: name, - perm: 0600, - } -} - -// apply the context options and return the first error if exists. -func (c *context) apply(opts []Options) error { - for _, fn := range opts { - if err := fn(c); err != nil { - return err - } - } - return nil -} - -// promptPassword returns the password or prompts for one. -func (c *context) promptPassword() ([]byte, error) { - switch { - case len(c.password) > 0: - return c.password, nil - case c.passwordPrompter != nil: - return c.passwordPrompter(c.passwordPrompt) - case PromptPassword != nil: - return PromptPassword(fmt.Sprintf("Please enter the password to decrypt %s", c.filename)) - default: - return nil, errors.Errorf("error decoding %s: key is password protected", c.filename) - } -} - -// promptEncryptPassword returns the password or prompts for one if -// WithPassword, WithPasswordFile or WithPasswordPrompt have been used. This -// method is used to encrypt keys, and it will only use the options passed, it -// will not use the global PromptPassword. -func (c *context) promptEncryptPassword() ([]byte, error) { - switch { - case len(c.password) > 0: - return c.password, nil - case c.passwordPrompter != nil: - return c.passwordPrompter(c.passwordPrompt) - default: - return nil, nil - } -} - -// Options is the type to add attributes to the context. -type Options func(o *context) error - -// withContext replaces the context with the given one. -func withContext(c *context) Options { - return func(ctx *context) error { - *ctx = *c - return nil - } -} - -// WithFilename is a method that adds the given filename to the context. -func WithFilename(name string) Options { - return func(ctx *context) error { - ctx.filename = name - // Default perm mode if not set - if ctx.perm == 0 { - ctx.perm = 0600 - } - return nil - } -} - -// ToFile is a method that adds the given filename and permissions to the -// context. It is used in the Serialize to store PEM in disk. -func ToFile(name string, perm os.FileMode) Options { - return func(ctx *context) error { - ctx.filename = name - ctx.perm = perm - return nil - } -} - -// WithPassword is a method that adds the given password to the context. -func WithPassword(pass []byte) Options { - return func(ctx *context) error { - ctx.password = pass - return nil - } -} - -// WithPasswordFile is a method that adds the password in a file to the context. -func WithPasswordFile(filename string) Options { - return func(ctx *context) error { - b, err := utils.ReadPasswordFromFile(filename) - if err != nil { - return err - } - ctx.password = b - return nil - } -} - -// WithPasswordPrompt ask the user for a password and adds it to the context. -func WithPasswordPrompt(prompt string, fn PasswordPrompter) Options { - return func(ctx *context) error { - ctx.passwordPrompt = prompt - ctx.passwordPrompter = fn - return nil - } -} - -// WithPKCS8 with v set to true returns an option used in the Serialize method -// to use the PKCS#8 encoding form on the private keys. With v set to false -// default form will be used. -func WithPKCS8(v bool) Options { - return func(ctx *context) error { - ctx.pkcs8 = v - return nil - } -} - -// WithOpenSSH is an option used in the Serialize method to use OpenSSH encoding -// form on the private keys. With v set to false default form will be used. -func WithOpenSSH(v bool) Options { - return func(ctx *context) error { - ctx.openSSH = v - return nil - } -} - -// WithComment is an option used in the Serialize method to add a comment in the -// OpenSSH private keys. WithOpenSSH must be set to true too. -func WithComment(comment string) Options { - return func(ctx *context) error { - ctx.comment = comment - return nil - } -} - -// WithFirstBlock will avoid failing if a PEM contains more than one block or -// certificate and it will only look at the first. -func WithFirstBlock() Options { - return func(ctx *context) error { - ctx.firstBlock = true - return nil - } -} - -// ParseCertificate extracts the first certificate from the given pem. -func ParseCertificate(pemData []byte) (*x509.Certificate, error) { - var block *pem.Block - for len(pemData) > 0 { - block, pemData = pem.Decode(pemData) - if block == nil { - return nil, errors.New("error decoding pem block") - } - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errors.Wrap(err, "error parsing certificate") - } - return cert, nil - } - - return nil, errors.New("error parsing certificate: no certificate found") -} - -// ParseCertificateBundle returns a list of *x509.Certificate parsed from -// the given bytes. -// -// - supports PEM and DER certificate formats -// - If a DER-formatted file is given only one certificate will be returned. -func ParseCertificateBundle(data []byte) ([]*x509.Certificate, error) { - var err error - - // PEM format - if bytes.Contains(data, PEMBlockHeader) { - var block *pem.Block - var bundle []*x509.Certificate - for len(data) > 0 { - block, data = pem.Decode(data) - if block == nil { - break - } - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - var crt *x509.Certificate - crt, err = x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, &InvalidPEMError{ - Err: err, - Type: PEMTypeCertificate, - } - } - bundle = append(bundle, crt) - } - if len(bundle) == 0 { - return nil, &InvalidPEMError{ - Type: PEMTypeCertificate, - } - } - return bundle, nil - } - - // DER format (binary) - crt, err := x509.ParseCertificate(data) - if err != nil { - return nil, &InvalidPEMError{ - Message: fmt.Sprintf("error parsing certificate as DER format: %v", err), - Type: PEMTypeCertificate, - } - } - return []*x509.Certificate{crt}, nil -} - -// ParseCertificateRequest extracts the first *x509.CertificateRequest -// from the given data. -// -// - supports PEM and DER certificate formats -// - If a DER-formatted file is given only one certificate will be returned. -func ParseCertificateRequest(data []byte) (*x509.CertificateRequest, error) { - // PEM format - if bytes.Contains(data, PEMBlockHeader) { - var block *pem.Block - for len(data) > 0 { - block, data = pem.Decode(data) - if block == nil { - break - } - if !strings.HasSuffix(block.Type, "CERTIFICATE REQUEST") { - continue - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, &InvalidPEMError{ - Type: PEMTypeCertificateRequest, - Err: err, - } - } - - return csr, nil - } - } - - // DER format (binary) - csr, err := x509.ParseCertificateRequest(data) - if err != nil { - return nil, &InvalidPEMError{ - Message: fmt.Sprintf("error parsing certificate request as DER format: %v", err), - Type: PEMTypeCertificateRequest, - } - } - return csr, nil -} - -// PEMType represents a PEM block type. (e.g., CERTIFICATE, CERTIFICATE REQUEST, etc.) -type PEMType int - -func (pt PEMType) String() string { - switch pt { - case PEMTypeCertificate: - return "certificate" - case PEMTypeCertificateRequest: - return "certificate request" - default: - return "undefined" - } -} - -const ( - // PEMTypeUndefined undefined - PEMTypeUndefined = iota - // PEMTypeCertificate CERTIFICATE - PEMTypeCertificate - // PEMTypeCertificateRequest CERTIFICATE REQUEST - PEMTypeCertificateRequest -) - -// InvalidPEMError represents an error that occurs when parsing a file with -// PEM encoded data. -type InvalidPEMError struct { - Type PEMType - File string - Message string - Err error -} - -func (e *InvalidPEMError) Error() string { - switch { - case e.Message != "": - return e.Message - case e.Err != nil: - return fmt.Sprintf("error decoding PEM data: %v", e.Err) - default: - if e.Type == PEMTypeUndefined { - return "does not contain valid PEM encoded data" - } - return fmt.Sprintf("does not contain a valid PEM encoded %s", e.Type) - } -} - -func (e *InvalidPEMError) Unwrap() error { - return e.Err -} - -// ReadCertificate returns a *x509.Certificate from the given filename. It -// supports certificates formats PEM and DER. -func ReadCertificate(filename string, opts ...Options) (*x509.Certificate, error) { - // Populate options - ctx := newContext(filename) - if err := ctx.apply(opts); err != nil { - return nil, err - } - - bundle, err := ReadCertificateBundle(filename) - switch { - case err != nil: - return nil, err - case len(bundle) == 0: - return nil, errors.Errorf("file %s does not contain a valid PEM or DER formatted certificate", filename) - case len(bundle) > 1 && !ctx.firstBlock: - return nil, errors.Errorf("error decoding %s: contains more than one PEM encoded block", filename) - default: - return bundle[0], nil - } -} - -// ReadCertificateBundle reads the given filename and returns a list of -// *x509.Certificate. -// -// - supports PEM and DER certificate formats -// - If a DER-formatted file is given only one certificate will be returned. -func ReadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - bundle, err := ParseCertificateBundle(b) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %w", filename, err) - } - return bundle, nil -} - -// ReadCertificateRequest reads the given filename and returns a -// *x509.CertificateRequest. -// -// - supports PEM and DER Certificate formats. -// - supports reading from STDIN with filename `-`. -func ReadCertificateRequest(filename string) (*x509.CertificateRequest, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - cr, err := ParseCertificateRequest(b) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %w", filename, err) - } - return cr, nil -} - -// Parse returns the key or certificate PEM-encoded in the given bytes. -func Parse(b []byte, opts ...Options) (interface{}, error) { - // Populate options - ctx := newContext("PEM") - if err := ctx.apply(opts); err != nil { - return nil, err - } - - block, rest := pem.Decode(b) - switch { - case block == nil: - return nil, errors.Errorf("error decoding %s: not a valid PEM encoded block", ctx.filename) - case len(bytes.TrimSpace(rest)) > 0 && !ctx.firstBlock: - return nil, errors.Errorf("error decoding %s: contains more than one PEM encoded block", ctx.filename) - } - - // PEM is encrypted: ask for password - if block.Headers["Proc-Type"] == "4,ENCRYPTED" || block.Type == "ENCRYPTED PRIVATE KEY" { - pass, err := ctx.promptPassword() - if err != nil { - return nil, err - } - - block.Bytes, err = DecryptPEMBlock(block, pass) - if err != nil { - return nil, errors.Wrapf(err, "error decrypting %s", ctx.filename) - } - } - - switch block.Type { - case "PUBLIC KEY": - pub, err := x509.ParsePKIXPublicKey(block.Bytes) - return pub, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "RSA PRIVATE KEY": - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "EC PRIVATE KEY": - priv, err := x509.ParseECPrivateKey(block.Bytes) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY": - priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "OPENSSH PRIVATE KEY": - priv, err := ParseOpenSSHPrivateKey(b, withContext(ctx)) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "CERTIFICATE": - crt, err := x509.ParseCertificate(block.Bytes) - return crt, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST": - csr, err := x509.ParseCertificateRequest(block.Bytes) - return csr, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "ENCRYPTED COSIGN PRIVATE KEY": - pass, err := ctx.promptPassword() - if err != nil { - return nil, err - } - priv, err := ParseCosignPrivateKey(block.Bytes, pass) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "NEBULA X25519 PUBLIC KEY": - if len(block.Bytes) != x25519.PublicKeySize { - return nil, errors.Errorf("error parsing %s: key is not 32 bytes", ctx.filename) - } - return x25519.PublicKey(block.Bytes), nil - case "NEBULA X25519 PRIVATE KEY": - if len(block.Bytes) != x25519.PrivateKeySize { - return nil, errors.Errorf("error parsing %s: key is not 32 bytes", ctx.filename) - } - return x25519.PrivateKey(block.Bytes), nil - default: - return nil, errors.Errorf("error decoding %s: contains an unexpected header '%s'", ctx.filename, block.Type) - } -} - -// ParseKey returns the key or the public key of a certificate or certificate -// signing request in the given PEM-encoded bytes. -func ParseKey(b []byte, opts ...Options) (interface{}, error) { - k, err := Parse(b, opts...) - if err != nil { - return nil, err - } - return keyutil.ExtractKey(k) -} - -// Read returns the key or certificate encoded in the given PEM file. -// If the file is encrypted it will ask for a password and it will try -// to decrypt it. -// -// Supported keys algorithms are RSA and EC. Supported standards for private -// keys are PKCS#1, PKCS#8, RFC5915 for EC, and base64-encoded DER for -// certificates and public keys. -func Read(filename string, opts ...Options) (interface{}, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - // force given filename - opts = append(opts, WithFilename(filename)) - return Parse(b, opts...) -} - -// Serialize will serialize the input to a PEM formatted block and apply -// modifiers. -func Serialize(in interface{}, opts ...Options) (*pem.Block, error) { - ctx := new(context) - if err := ctx.apply(opts); err != nil { - return nil, err - } - - var p *pem.Block - var isPrivateKey bool - switch k := in.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: - b, err := x509.MarshalPKIXPublicKey(k) - if err != nil { - return nil, errors.WithStack(err) - } - p = &pem.Block{ - Type: "PUBLIC KEY", - Bytes: b, - } - case *rsa.PrivateKey: - isPrivateKey = true - switch { - case ctx.pkcs8: - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil, err - } - p = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - case ctx.openSSH: - return SerializeOpenSSHPrivateKey(k, withContext(ctx)) - default: - p = &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(k), - } - } - case *ecdsa.PrivateKey: - isPrivateKey = true - switch { - case ctx.pkcs8: - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil, err - } - p = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - case ctx.openSSH: - return SerializeOpenSSHPrivateKey(k, withContext(ctx)) - default: - b, err := x509.MarshalECPrivateKey(k) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal private key") - } - p = &pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: b, - } - } - case ed25519.PrivateKey: - isPrivateKey = true - switch { - case !ctx.pkcs8 && ctx.openSSH: - return SerializeOpenSSHPrivateKey(k, withContext(ctx)) - default: // Ed25519 keys will use pkcs8 by default - ctx.pkcs8 = true - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil, err - } - p = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - } - case *x509.Certificate: - p = &pem.Block{ - Type: "CERTIFICATE", - Bytes: k.Raw, - } - case *x509.CertificateRequest: - p = &pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: k.Raw, - } - default: - return nil, errors.Errorf("cannot serialize type '%T', value '%v'", k, k) - } - - if isPrivateKey { - // Request password if needed. - password, err := ctx.promptEncryptPassword() - if err != nil { - return nil, err - } - - // Apply options on the PEM blocks. - if password != nil { - if ctx.pkcs8 { - var err error - p, err = EncryptPKCS8PrivateKey(rand.Reader, p.Bytes, password, DefaultEncCipher) - if err != nil { - return nil, err - } - } else { - var err error - p, err = x509.EncryptPEMBlock(rand.Reader, p.Type, p.Bytes, password, DefaultEncCipher) - if err != nil { - return nil, errors.Wrap(err, "failed to serialize to PEM") - } - } - } - } - - if ctx.filename != "" { - if err := WriteFile(ctx.filename, pem.EncodeToMemory(p), ctx.perm); err != nil { - return nil, err - } - } - - return p, nil -} - -// ParseDER parses the given DER-encoded bytes and results the public or private -// key encoded. -func ParseDER(b []byte) (interface{}, error) { - // Try private keys - key, err := x509.ParsePKCS8PrivateKey(b) - if err != nil { - if key, err = x509.ParseECPrivateKey(b); err != nil { - key, err = x509.ParsePKCS1PrivateKey(b) - } - } - - // Try public key - if err != nil { - if key, err = x509.ParsePKIXPublicKey(b); err != nil { - if key, err = x509.ParsePKCS1PublicKey(b); err != nil { - return nil, errors.New("error decoding DER; bad format") - } - } - } - - return key, nil -} - -// ParseSSH parses parses a public key from an authorized_keys file used in -// OpenSSH according to the sshd(8) manual page. -func ParseSSH(b []byte) (interface{}, error) { - key, _, _, _, err := ssh.ParseAuthorizedKey(b) - if err != nil { - return nil, errors.Wrap(err, "error parsing OpenSSH key") - } - - if cert, ok := key.(*ssh.Certificate); ok { - key = cert.Key - } - - switch key.Type() { - case ssh.KeyAlgoRSA: - var w struct { - Name string - E *big.Int - N *big.Int - } - if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - - if w.E.BitLen() > 24 { - return nil, errors.New("error unmarshaling key: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, errors.New("error unmarshaling key: incorrect exponent") - } - - key := new(rsa.PublicKey) - key.E = int(e) - key.N = w.N - return key, nil - - case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: - var w struct { - Name string - ID string - KeyBytes []byte - } - if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - - var c ecdh.Curve - switch w.Name { - case ssh.KeyAlgoECDSA256: - c = ecdh.P256() - case ssh.KeyAlgoECDSA384: - c = ecdh.P384() - case ssh.KeyAlgoECDSA521: - c = ecdh.P521() - default: - return nil, errors.Errorf("unsupported ecdsa curve %s", w.Name) - } - - var p *ecdh.PublicKey - if p, err = c.NewPublicKey(w.KeyBytes); err != nil { - return nil, errors.Wrapf(err, "failed decoding %s key", w.Name) - } - - // convert ECDH public key to ECDSA public key to keep - // the returned type backwards compatible. - rawKey := p.Bytes() - switch p.Curve() { - case ecdh.P256(): - return &ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: big.NewInt(0).SetBytes(rawKey[1:33]), - Y: big.NewInt(0).SetBytes(rawKey[33:]), - }, nil - case ecdh.P384(): - return &ecdsa.PublicKey{ - Curve: elliptic.P384(), - X: big.NewInt(0).SetBytes(rawKey[1:49]), - Y: big.NewInt(0).SetBytes(rawKey[49:]), - }, nil - case ecdh.P521(): - return &ecdsa.PublicKey{ - Curve: elliptic.P521(), - X: big.NewInt(0).SetBytes(rawKey[1:67]), - Y: big.NewInt(0).SetBytes(rawKey[67:]), - }, nil - default: - return nil, errors.New("cannot convert non-NIST *ecdh.PublicKey to *ecdsa.PublicKey") - } - case ssh.KeyAlgoED25519: - var w struct { - Name string - KeyBytes []byte - } - if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - return ed25519.PublicKey(w.KeyBytes), nil - case ssh.KeyAlgoDSA: - return nil, errors.Errorf("DSA keys not supported") - default: - return nil, errors.Errorf("unsupported key type %T", key) - } -} - -// BundleCertificate adds PEM-encoded certificates to a PEM-encoded certificate -// bundle if not already in the bundle. -func BundleCertificate(bundlePEM []byte, certsPEM ...[]byte) ([]byte, bool, error) { - bundle, err := ParseCertificateBundle(bundlePEM) - if err != nil { - return nil, false, fmt.Errorf("invalid bundle: %w", err) - } - - sums := make(map[[sha256.Size224]byte]bool, len(bundle)+len(certsPEM)) - for i := range bundle { - sums[sha256.Sum224(bundle[i].Raw)] = true - } - - modified := false - - for i := range certsPEM { - cert, err := ParseCertificate(certsPEM[i]) - if err != nil { - return nil, false, fmt.Errorf("invalid certificate %d: %w", i, err) - } - certSum := sha256.Sum224(cert.Raw) - if sums[certSum] { - continue - } - sums[certSum] = true - bundlePEM = append(bundlePEM, certsPEM[i]...) - modified = true - } - - return bundlePEM, modified, nil -} - -// UnbundleCertificate removes PEM-encoded certificates from a PEM-encoded -// certificate bundle. -func UnbundleCertificate(bundlePEM []byte, certsPEM ...[]byte) ([]byte, bool, error) { - if len(certsPEM) == 0 { - return bundlePEM, false, nil - } - drop := make(map[[sha256.Size224]byte]bool, len(certsPEM)) - for i := range certsPEM { - certs, err := ParseCertificateBundle(certsPEM[i]) - if err != nil { - return nil, false, fmt.Errorf("invalid certificate %d: %w", i, err) - } - for _, cert := range certs { - drop[sha256.Sum224(cert.Raw)] = true - } - } - - var modified bool - var keep []byte - - bundle, err := ParseCertificateBundle(bundlePEM) - if err != nil { - return nil, false, fmt.Errorf("invalid bundle: %w", err) - } - for _, cert := range bundle { - sum := sha256.Sum224(cert.Raw) - if drop[sum] { - modified = true - continue - } - keep = append(keep, pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - })...) - } - - return keep, modified, nil -} diff --git a/vendor/go.step.sm/crypto/pemutil/pkcs8.go b/vendor/go.step.sm/crypto/pemutil/pkcs8.go deleted file mode 100644 index fb6c96c295..0000000000 --- a/vendor/go.step.sm/crypto/pemutil/pkcs8.go +++ /dev/null @@ -1,353 +0,0 @@ -package pemutil - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" //nolint:gosec // support for legacy keys - "crypto/sha1" //nolint:gosec // support for legacy keys - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "hash" - "io" - - "github.com/pkg/errors" - "golang.org/x/crypto/pbkdf2" -) - -// PBKDF2SaltSize is the default size of the salt for PBKDF2, 128-bit salt. -const PBKDF2SaltSize = 16 - -// PBKDF2Iterations is the default number of iterations for PBKDF2. -// -// 600k is the current OWASP recommendation (Dec 2022) -// https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2 -// -// Nist recommends at least 10k (800-63B), 1Password increased in 2023 the -// number of iterations from 100k to 650k. -const PBKDF2Iterations = 600000 - -// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See -// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn -// and RFC 5208. -type pkcs8 struct { - Version int - Algo pkix.AlgorithmIdentifier - PrivateKey []byte - // optional attributes omitted. -} - -type publicKeyInfo struct { - Raw asn1.RawContent - Algo pkix.AlgorithmIdentifier - PublicKey asn1.BitString -} - -// Encrypted pkcs8 -// Based on https://github.com/youmark/pkcs8 -// MIT license -type prfParam struct { - Algo asn1.ObjectIdentifier - NullParam asn1.RawValue -} - -type pbkdf2Params struct { - Salt []byte - IterationCount int - PrfParam prfParam `asn1:"optional"` -} - -type pbkdf2Algorithms struct { - Algo asn1.ObjectIdentifier - PBKDF2Params pbkdf2Params -} - -type pbkdf2Encs struct { - EncryAlgo asn1.ObjectIdentifier - IV []byte -} - -type pbes2Params struct { - KeyDerivationFunc pbkdf2Algorithms - EncryptionScheme pbkdf2Encs -} - -type encryptedlAlgorithmIdentifier struct { - Algorithm asn1.ObjectIdentifier - Parameters pbes2Params -} - -type encryptedPrivateKeyInfo struct { - Algo encryptedlAlgorithmIdentifier - PrivateKey []byte -} - -var ( - // key derivation functions - oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} - oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} - oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} - - // encryption - oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - oidAES192CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22} - oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - oidDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} - oidD3DESCBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} -) - -// rfc1423Algo holds a method for enciphering a PEM block. -type rfc1423Algo struct { - cipher x509.PEMCipher - name string - cipherFunc func(key []byte) (cipher.Block, error) - keySize int - blockSize int - identifier asn1.ObjectIdentifier -} - -// rfc1423Algos holds a slice of the possible ways to encrypt a PEM -// block. The ivSize numbers were taken from the OpenSSL source. -var rfc1423Algos = []rfc1423Algo{{ - cipher: x509.PEMCipherDES, - name: "DES-CBC", - cipherFunc: des.NewCipher, - keySize: 8, - blockSize: des.BlockSize, - identifier: oidDESCBC, -}, { - cipher: x509.PEMCipher3DES, - name: "DES-EDE3-CBC", - cipherFunc: des.NewTripleDESCipher, - keySize: 24, - blockSize: des.BlockSize, - identifier: oidD3DESCBC, -}, { - cipher: x509.PEMCipherAES128, - name: "AES-128-CBC", - cipherFunc: aes.NewCipher, - keySize: 16, - blockSize: aes.BlockSize, - identifier: oidAES128CBC, -}, { - cipher: x509.PEMCipherAES192, - name: "AES-192-CBC", - cipherFunc: aes.NewCipher, - keySize: 24, - blockSize: aes.BlockSize, - identifier: oidAES192CBC, -}, { - cipher: x509.PEMCipherAES256, - name: "AES-256-CBC", - cipherFunc: aes.NewCipher, - keySize: 32, - blockSize: aes.BlockSize, - identifier: oidAES256CBC, -}, -} - -func cipherByKey(key x509.PEMCipher) *rfc1423Algo { - for i := range rfc1423Algos { - alg := &rfc1423Algos[i] - if alg.cipher == key { - return alg - } - } - return nil -} - -// deriveKey uses a key derivation function to stretch the password into a key -// with the number of bits our cipher requires. This algorithm was derived from -// the OpenSSL source. -func (c rfc1423Algo) deriveKey(password, salt []byte, h func() hash.Hash) []byte { - return pbkdf2.Key(password, salt, PBKDF2Iterations, c.keySize, h) -} - -// DecryptPEMBlock takes a password encrypted PEM block and the password used -// to encrypt it and returns a slice of decrypted DER encoded bytes. -// -// If the PEM blocks has the Proc-Type header set to "4,ENCRYPTED" it uses -// x509.DecryptPEMBlock to decrypt the block. If not it tries to decrypt the -// block using AES-128-CBC, AES-192-CBC, AES-256-CBC, DES, or 3DES using the -// key derived using PBKDF2 over the given password. -func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - return x509.DecryptPEMBlock(block, password) - } - - // PKCS#8 header defined in RFC7468 section 11 - if block.Type == "ENCRYPTED PRIVATE KEY" { - return DecryptPKCS8PrivateKey(block.Bytes, password) - } - - return nil, errors.New("unsupported encrypted PEM") -} - -// DecryptPKCS8PrivateKey takes a password encrypted private key using the -// PKCS#8 encoding and returns the decrypted data in PKCS#8 form. If an -// incorrect password is detected an x509.IncorrectPasswordError is returned. -// Because of deficiencies in the format, it's not always possible to detect an -// incorrect password. In these cases no error will be returned but the -// decrypted DER bytes will be random noise. -// -// It supports AES-128-CBC, AES-192-CBC, AES-256-CBC, DES, or 3DES encrypted -// data using the key derived with PBKDF2 over the given password. -func DecryptPKCS8PrivateKey(data, password []byte) ([]byte, error) { - var pki encryptedPrivateKeyInfo - if _, err := asn1.Unmarshal(data, &pki); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal private key") - } - - if !pki.Algo.Algorithm.Equal(oidPBES2) { - return nil, errors.New("unsupported encrypted PEM: only PBES2 is supported") - } - - if !pki.Algo.Parameters.KeyDerivationFunc.Algo.Equal(oidPKCS5PBKDF2) { - return nil, errors.New("unsupported encrypted PEM: only PBKDF2 is supported") - } - - encParam := pki.Algo.Parameters.EncryptionScheme - kdfParam := pki.Algo.Parameters.KeyDerivationFunc.PBKDF2Params - - iv := encParam.IV - salt := kdfParam.Salt - iter := kdfParam.IterationCount - - // pbkdf2 hash function - keyHash := sha1.New - if kdfParam.PrfParam.Algo.Equal(oidHMACWithSHA256) { - keyHash = sha256.New - } - - var symkey []byte - var block cipher.Block - var err error - switch { - // AES-128-CBC, AES-192-CBC, AES-256-CBC - case encParam.EncryAlgo.Equal(oidAES128CBC): - symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES192CBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES256CBC): - symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) - block, err = aes.NewCipher(symkey) - // DES, TripleDES - case encParam.EncryAlgo.Equal(oidDESCBC): - symkey = pbkdf2.Key(password, salt, iter, 8, keyHash) - block, err = des.NewCipher(symkey) //nolint:gosec // support for legacy keys - case encParam.EncryAlgo.Equal(oidD3DESCBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = des.NewTripleDESCipher(symkey) //nolint:gosec // support for legacy keys - default: - return nil, errors.Errorf("unsupported encrypted PEM: unknown algorithm %v", encParam.EncryAlgo) - } - if err != nil { - return nil, err - } - - data = pki.PrivateKey - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(data, data) - - // Blocks are padded using a scheme where the last n bytes of padding are all - // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. - // For example: - // [x y z 2 2] - // [x y 7 7 7 7 7 7 7] - // If we detect a bad padding, we assume it is an invalid password. - blockSize := block.BlockSize() - dlen := len(data) - if dlen == 0 || dlen%blockSize != 0 { - return nil, errors.New("error decrypting PEM: invalid padding") - } - - last := int(data[dlen-1]) - if dlen < last { - return nil, x509.IncorrectPasswordError - } - if last == 0 || last > blockSize { - return nil, x509.IncorrectPasswordError - } - for _, val := range data[dlen-last:] { - if int(val) != last { - return nil, x509.IncorrectPasswordError - } - } - - return data[:dlen-last], nil -} - -// EncryptPKCS8PrivateKey returns a PEM block holding the given PKCS#8 encroded -// private key, encrypted with the specified algorithm and a PBKDF2 derived key -// from the given password. -func EncryptPKCS8PrivateKey(rand io.Reader, data, password []byte, alg x509.PEMCipher) (*pem.Block, error) { - ciph := cipherByKey(alg) - if ciph == nil { - return nil, errors.Errorf("failed to encrypt PEM: unknown algorithm %v", alg) - } - - salt := make([]byte, PBKDF2SaltSize) - if _, err := io.ReadFull(rand, salt); err != nil { - return nil, errors.Wrap(err, "failed to generate salt") - } - iv := make([]byte, ciph.blockSize) - if _, err := io.ReadFull(rand, iv); err != nil { - return nil, errors.Wrap(err, "failed to generate IV") - } - - key := ciph.deriveKey(password, salt, sha256.New) - block, err := ciph.cipherFunc(key) - if err != nil { - return nil, errors.Wrap(err, "failed to create cipher") - } - enc := cipher.NewCBCEncrypter(block, iv) - pad := ciph.blockSize - len(data)%ciph.blockSize - encrypted := make([]byte, len(data), len(data)+pad) - // We could save this copy by encrypting all the whole blocks in - // the data separately, but it doesn't seem worth the additional - // code. - copy(encrypted, data) - // See RFC 1423, section 1.1 - for i := 0; i < pad; i++ { - encrypted = append(encrypted, byte(pad)) - } - enc.CryptBlocks(encrypted, encrypted) - - // Build encrypted asn1 data - pki := encryptedPrivateKeyInfo{ - Algo: encryptedlAlgorithmIdentifier{ - Algorithm: oidPBES2, - Parameters: pbes2Params{ - KeyDerivationFunc: pbkdf2Algorithms{ - Algo: oidPKCS5PBKDF2, - PBKDF2Params: pbkdf2Params{ - Salt: salt, - IterationCount: PBKDF2Iterations, - PrfParam: prfParam{ - Algo: oidHMACWithSHA256, - NullParam: asn1.NullRawValue, - }, - }, - }, - EncryptionScheme: pbkdf2Encs{ - EncryAlgo: ciph.identifier, - IV: iv, - }, - }, - }, - PrivateKey: encrypted, - } - - b, err := asn1.Marshal(pki) - if err != nil { - return nil, errors.Wrap(err, "error marshaling encrypted key") - } - return &pem.Block{ - Type: "ENCRYPTED PRIVATE KEY", - Bytes: b, - }, nil -} diff --git a/vendor/go.step.sm/crypto/pemutil/ssh.go b/vendor/go.step.sm/crypto/pemutil/ssh.go deleted file mode 100644 index 00698dae19..0000000000 --- a/vendor/go.step.sm/crypto/pemutil/ssh.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pemutil - -import ( - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rand" - "crypto/rsa" - "encoding/binary" - "encoding/pem" - "math/big" - - "github.com/pkg/errors" - bcryptpbkdf "go.step.sm/crypto/internal/bcrypt_pbkdf" - "go.step.sm/crypto/randutil" - "golang.org/x/crypto/ssh" -) - -const ( - sshMagic = "openssh-key-v1\x00" - sshDefaultKdf = "bcrypt" - sshDefaultCiphername = "aes256-ctr" - sshDefaultKeyLength = 32 - sshDefaultSaltLength = 16 - sshDefaultRounds = 16 -) - -type openSSHPrivateKey struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte -} - -type openSSHPrivateKeyBlock struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` -} - -// ParseOpenSSHPrivateKey parses a private key in OpenSSH PEM format. -// -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -// -// This method is based on the implementation at -// https://github.com/golang/crypto/blob/master/ssh/keys.go -func ParseOpenSSHPrivateKey(pemBytes []byte, opts ...Options) (crypto.PrivateKey, error) { - // Populate options - ctx := newContext("PEM") - if err := ctx.apply(opts); err != nil { - return nil, err - } - - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.Errorf("error decoding %s: not a valid PEM encoded block", ctx.filename) - } - - if len(block.Bytes) < len(sshMagic) || string(block.Bytes[:len(sshMagic)]) != sshMagic { - return nil, errors.New("invalid openssh private key format") - } - remaining := block.Bytes[len(sshMagic):] - - var w openSSHPrivateKey - if err := ssh.Unmarshal(remaining, &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling private key") - } - - var err error - var key crypto.PrivateKey - if w.KdfName != "none" || w.CipherName != "none" { - password, err := ctx.promptPassword() - if err != nil { - return nil, err - } - key, err = ssh.ParseRawPrivateKeyWithPassphrase(pemBytes, password) - if err != nil { - return nil, errors.Wrap(err, "error parsing private key") - } - } else { - key, err = ssh.ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, errors.Wrap(err, "error parsing private key") - } - } - - // Convert *ed25519.PrivateKey to ed25519.PrivateKey: - switch k := key.(type) { - case *ed25519.PrivateKey: - return *k, nil - default: - return k, nil - } -} - -// SerializeOpenSSHPrivateKey serialize a private key in the OpenSSH PEM format. -func SerializeOpenSSHPrivateKey(key crypto.PrivateKey, opts ...Options) (*pem.Block, error) { - ctx := new(context) - if err := ctx.apply(opts); err != nil { - return nil, err - } - - // Random check bytes. - var check uint32 - if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { - return nil, errors.Wrap(err, "error generating random check ") - } - - w := openSSHPrivateKey{ - NumKeys: 1, - } - pk1 := openSSHPrivateKeyBlock{ - Check1: check, - Check2: check, - } - - password, err := ctx.promptEncryptPassword() - if err != nil { - return nil, err - } - - var blockSize int - if password == nil { - w.CipherName = "none" - w.KdfName = "none" - blockSize = 8 - } else { - w.CipherName = sshDefaultCiphername - w.KdfName = sshDefaultKdf - blockSize = aes.BlockSize - } - - switch k := key.(type) { - case *rsa.PrivateKey: - e := new(big.Int).SetInt64(int64(k.PublicKey.E)) - // Marshal public key: - // E and N are in reversed order in the public and private key. - pubKey := struct { - KeyType string - E *big.Int - N *big.Int - }{ - ssh.KeyAlgoRSA, - e, k.PublicKey.N, - } - w.PubKey = ssh.Marshal(pubKey) - - // Marshal private key. - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - }{ - k.PublicKey.N, e, - k.D, k.Precomputed.Qinv, k.Primes[0], k.Primes[1], - ctx.comment, - } - pk1.Keytype = ssh.KeyAlgoRSA - pk1.Rest = ssh.Marshal(key) - case *ecdsa.PrivateKey: - var curve, keyType string - switch k.Curve.Params().Name { - case "P-256": - curve = "nistp256" - keyType = ssh.KeyAlgoECDSA256 - case "P-384": - curve = "nistp384" - keyType = ssh.KeyAlgoECDSA384 - case "P-521": - curve = "nistp521" - keyType = ssh.KeyAlgoECDSA521 - default: - return nil, errors.Errorf("error serializing key: unsupported curve %s", k.Curve.Params().Name) - } - - p, err := k.PublicKey.ECDH() - if err != nil { - return nil, errors.Wrapf(err, "failed converting *ecdsa.PublicKey to *ecdh.PublicKey") - } - - // Marshal public key. - pubKey := struct { - KeyType string - Curve string - Pub []byte - }{ - keyType, curve, p.Bytes(), - } - w.PubKey = ssh.Marshal(pubKey) - - // Marshal private key. - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - }{ - curve, p.Bytes(), k.D, - ctx.comment, - } - pk1.Keytype = keyType - pk1.Rest = ssh.Marshal(key) - case ed25519.PrivateKey: - pub := make([]byte, ed25519.PublicKeySize) - priv := make([]byte, ed25519.PrivateKeySize) - copy(pub, k[ed25519.PublicKeySize:]) - copy(priv, k) - - // Marshal public key. - pubKey := struct { - KeyType string - Pub []byte - }{ - ssh.KeyAlgoED25519, pub, - } - w.PubKey = ssh.Marshal(pubKey) - - // Marshal private key. - key := struct { - Pub []byte - Priv []byte - Comment string - }{ - pub, priv, - ctx.comment, - } - pk1.Keytype = ssh.KeyAlgoED25519 - pk1.Rest = ssh.Marshal(key) - default: - return nil, errors.Errorf("unsupported key type %T", k) - } - - w.PrivKeyBlock = ssh.Marshal(pk1) - - // Add padding until the private key block matches the block size, - // 16 with AES encryption, 8 without. - for i, l := 0, len(w.PrivKeyBlock); (l+i)%blockSize != 0; i++ { - w.PrivKeyBlock = append(w.PrivKeyBlock, byte(i+1)) - } - - if password != nil { - // Create encryption key derivation the password. - salt, err := randutil.Salt(sshDefaultSaltLength) - if err != nil { - return nil, err - } - kdfOpts := struct { - Salt []byte - Rounds uint32 - }{salt, sshDefaultRounds} - w.KdfOpts = string(ssh.Marshal(kdfOpts)) - - // Derive key to encrypt the private key block. - k, err := bcryptpbkdf.Key(password, salt, sshDefaultRounds, sshDefaultKeyLength+aes.BlockSize) - if err != nil { - return nil, errors.Wrap(err, "error deriving decryption key") - } - - // Encrypt the private key using the derived secret. - dst := make([]byte, len(w.PrivKeyBlock)) - iv := k[sshDefaultKeyLength : sshDefaultKeyLength+aes.BlockSize] - block, err := aes.NewCipher(k[:sshDefaultKeyLength]) - if err != nil { - return nil, errors.Wrap(err, "error creating cipher") - } - - stream := cipher.NewCTR(block, iv) - stream.XORKeyStream(dst, w.PrivKeyBlock) - w.PrivKeyBlock = dst - } - - b := ssh.Marshal(w) - block := &pem.Block{ - Type: "OPENSSH PRIVATE KEY", - Bytes: append([]byte(sshMagic), b...), - } - - if ctx.filename != "" { - if err := WriteFile(ctx.filename, pem.EncodeToMemory(block), ctx.perm); err != nil { - return nil, err - } - } - - return block, nil -} diff --git a/vendor/go.step.sm/crypto/randutil/random.go b/vendor/go.step.sm/crypto/randutil/random.go deleted file mode 100644 index dce7931b18..0000000000 --- a/vendor/go.step.sm/crypto/randutil/random.go +++ /dev/null @@ -1,113 +0,0 @@ -// Package randutil provides methods to generate random strings and salts. -package randutil - -import ( - "crypto/rand" - "encoding/hex" - "io" - "math/big" - - "github.com/pkg/errors" -) - -var ascii string - -func init() { - // initialize the charcters in ascii - aciiBytes := make([]byte, 94) - for i := range aciiBytes { - aciiBytes[i] = byte(i + 33) - } - ascii = string(aciiBytes) -} - -// Salt generates a new random salt of the given size. -func Salt(size int) ([]byte, error) { - salt := make([]byte, size) - _, err := io.ReadFull(rand.Reader, salt) - if err != nil { - return nil, errors.Wrap(err, "error generating salt") - } - return salt, nil -} - -// Bytes generates a new byte slice of the given size. -func Bytes(size int) ([]byte, error) { - bytes := make([]byte, size) - _, err := io.ReadFull(rand.Reader, bytes) - if err != nil { - return nil, errors.Wrap(err, "error generating bytes") - } - return bytes, nil -} - -// String returns a random string of a given length using the characters in -// the given string. It splits the string on runes to support UTF-8 -// characters. -func String(length int, chars string) (string, error) { - result := make([]rune, length) - runes := []rune(chars) - x := int64(len(runes)) - for i := range result { - num, err := rand.Int(rand.Reader, big.NewInt(x)) - if err != nil { - return "", errors.Wrap(err, "error creating random number") - } - result[i] = runes[num.Int64()] - } - return string(result), nil -} - -// Hex returns a random string of the given length using the hexadecimal -// characters in lower case (0-9+a-f). -func Hex(length int) (string, error) { - return String(length, "0123456789abcdef") -} - -// Alphanumeric returns a random string of the given length using the 62 -// alphanumeric characters in the POSIX/C locale (a-z+A-Z+0-9). -func Alphanumeric(length int) (string, error) { - return String(length, "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") -} - -// ASCII returns a securely generated random ASCII string. It reads random -// numbers from crypto/rand and searches for printable characters. It will -// return an error if the system's secure random number generator fails to -// function correctly, in which case the caller must not continue. -func ASCII(length int) (string, error) { - return String(length, ascii) -} - -// Alphabet returns a random string of the given length using the 52 -// alphabetic characters in the POSIX/C locale (a-z+A-Z). -func Alphabet(length int) (string, error) { - return String(length, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") -} - -// UUIDv4 returns the string representation of a UUID version 4. Because 6 bits -// are used to indicate the version 4 and the variant 10, the randomly generated -// part has 122 bits. -func UUIDv4() (string, error) { - var uuid [16]byte - _, err := io.ReadFull(rand.Reader, uuid[:]) - if err != nil { - return "", errors.Wrap(err, "error generating uuid") - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return encodeUUID(uuid), nil -} - -func encodeUUID(uuid [16]byte) string { - buf := make([]byte, 36) - hex.Encode(buf, uuid[:4]) - buf[8] = '-' - hex.Encode(buf[9:13], uuid[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], uuid[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], uuid[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], uuid[10:]) - return string(buf) -} diff --git a/vendor/go.step.sm/crypto/x25519/x25519.go b/vendor/go.step.sm/crypto/x25519/x25519.go deleted file mode 100644 index c6d239bfe2..0000000000 --- a/vendor/go.step.sm/crypto/x25519/x25519.go +++ /dev/null @@ -1,321 +0,0 @@ -package x25519 - -import ( - "bytes" - "crypto" - "crypto/ed25519" - "crypto/sha512" - "crypto/subtle" - "errors" - "io" - "strconv" - - "filippo.io/edwards25519" - "filippo.io/edwards25519/field" - "golang.org/x/crypto/curve25519" -) - -const ( - // PrivateKeySize is the size in bytes of a X25519 private key. - PrivateKeySize = 32 - - // PublicKeySize is the size in bytes of a X25519 public key. - PublicKeySize = 32 - - SignatureSize = 64 -) - -var one = (&field.Element{}).One() - -// PrivateKey is the type used to represent a X25519 private key. -type PrivateKey []byte - -// PublicKey is the type used to represent a X25519 public key. -type PublicKey []byte - -// GenerateKey generates a public/private key pair using entropy from rand. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - priv := make([]byte, PrivateKeySize) - if _, err := io.ReadFull(rand, priv); err != nil { - return nil, nil, err - } - - pub, err := curve25519.X25519(priv, curve25519.Basepoint) - if err != nil { - return nil, nil, err - } - - return pub, priv, err -} - -// ToEd25519 converts the public key p into a ed25519 key. -// -// (x, y) = (sqrt(-486664)*u/v, (u-1)/(u+1)) -func (p PublicKey) ToEd25519() (ed25519.PublicKey, error) { - a, err := convertMont(p) - if err != nil { - return nil, err - } - return a.Bytes(), nil -} - -// Equal reports whether p and x have the same value. -func (p PublicKey) Equal(x crypto.PublicKey) bool { - xx, ok := x.(PublicKey) - if !ok { - return false - } - return bytes.Equal(p, xx) -} - -// Public returns the public key using scalar multiplication (scalar * point) -// using the Curve25519 basepoint. It will return nil if the private key is not -// a valid one. -func (p PrivateKey) Public() crypto.PublicKey { - pub, _ := p.PublicKey() - return pub -} - -// Equal reports whether p and x have the same value. -func (p PrivateKey) Equal(x crypto.PrivateKey) bool { - xx, ok := x.(PrivateKey) - if !ok { - return false - } - return bytes.Equal(p, xx) -} - -// Public returns the public key using scalar multiplication (scalar * point) -// using the Curve25519 basepoint. -func (p PrivateKey) PublicKey() (PublicKey, error) { - pub, err := curve25519.X25519(p, curve25519.Basepoint) - if err != nil { - return nil, err - } - return pub, nil -} - -// SharedKey returns the result of the scalar multiplication (scalar * point), -// using the PrivateKey as the scalar value and the given key as the point. Both -// scalar and point must be slices of 32 bytes. -func (p PrivateKey) SharedKey(peerPublicKey []byte) ([]byte, error) { - sharedKey, err := curve25519.X25519(p, peerPublicKey) - if err != nil { - return nil, err - } - return sharedKey, nil -} - -// Sign signs the given message with the private key p and returns a signature. -// -// It implements the XEdDSA sign method defined in -// https://signal.org/docs/specifications/xeddsa/#xeddsa -// -// XEdDSA performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to indicate -// the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (p PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("x25519: cannot sign hashed message") - } - - return Sign(rand, p, message) -} - -// Sign signs the message with privateKey and returns a signature. It will panic -// if len(privateKey) is not PrivateKeySize. -// -// It implements the XEdDSA sign method defined in -// https://signal.org/docs/specifications/xeddsa/#xeddsa -// -// xeddsa_sign(k, M, Z): -// A, a = calculate_key_pair(k) -// r = hash1(a || M || Z) (mod q) -// R = rB -// h = hash(R || A || M) (mod q) -// s = r + ha (mod q) -// return R || s -func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) { - if l := len(p); l != PrivateKeySize { - panic("x25519: bad private key length: " + strconv.Itoa(l)) - } - - pub, priv, err := p.calculateKeyPair() - if err != nil { - return nil, err - } - - random := make([]byte, 64) - if _, err := io.ReadFull(rand, random); err != nil { - return nil, err - } - - // Using same prefix in libsignal-protocol-c implementation, but can be any - // 32 byte prefix. Golang's ed25519 implementation uses: - // - // ph := sha512.Sum512(a.Bytes()) - // prefix := ph[32:] - prefix := [32]byte{ - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } - - rh := sha512.New() - rh.Write(prefix[:]) - rh.Write(priv.Bytes()) - rh.Write(message) - rh.Write(random) - rDigest := make([]byte, 0, sha512.Size) - rDigest = rh.Sum(rDigest) - - r, err := edwards25519.NewScalar().SetUniformBytes(rDigest) - if err != nil { - return nil, err - } - - R := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs - - hh := sha512.New() - hh.Write(R.Bytes()) - hh.Write(pub) - hh.Write(message) - hDigest := make([]byte, 0, sha512.Size) - hDigest = hh.Sum(hDigest) - h, err := edwards25519.NewScalar().SetUniformBytes(hDigest) - if err != nil { - return nil, err - } - - s := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv)) - - sig := make([]byte, 64) - copy(sig[:32], R.Bytes()) - copy(sig[32:], s.Bytes()) - return sig, nil -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -// -// It implements the XEdDSA verify method defined in -// https://signal.org/docs/specifications/xeddsa/#xeddsa -// -// xeddsa_verify(u, M, (R || s)): -// if u >= p or R.y >= 2|p| or s >= 2|q|: -// return false -// A = convert_mont(u) -// if not on_curve(A): -// return false -// h = hash(R || A || M) (mod q) -// Rcheck = sB - hA -// if bytes_equal(R, Rcheck): -// return true -// return false -func Verify(publicKey PublicKey, message, sig []byte) bool { - // The following code should be equivalent to: - // - // pub, err := publicKey.ToEd25519() - // if err != nil { - // return false - // } - // return ed25519.Verify(pub, message, sig) - - if l := len(publicKey); l != PublicKeySize { - panic("x25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&0xE0 != 0 { - return false - } - - a, err := convertMont(publicKey) - - if err != nil { - return false - } - - hh := sha512.New() - hh.Write(sig[:32]) - hh.Write(a.Bytes()) - hh.Write(message) - hDigest := make([]byte, 0, sha512.Size) - hDigest = hh.Sum(hDigest) - h, err := edwards25519.NewScalar().SetUniformBytes(hDigest) - if err != nil { - return false - } - - s, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:]) - if err != nil { - return false - } - - minusA := (&edwards25519.Point{}).Negate(a) - r := (&edwards25519.Point{}).VarTimeDoubleScalarBaseMult(h, minusA, s) - return subtle.ConstantTimeCompare(sig[:32], r.Bytes()) == 1 -} - -// calculateKeyPair converts a Montgomery private key k to a twisted Edwards -// public key and private key (A, a) as defined in -// https://signal.org/docs/specifications/xeddsa/#elliptic-curve-conversions -// -// calculate_key_pair(k): -// E = kB -// A.y = E.y -// A.s = 0 -// if E.s == 1: -// a = -k (mod q) -// else: -// a = k (mod q) -// return A, a -func (p PrivateKey) calculateKeyPair() ([]byte, *edwards25519.Scalar, error) { - var pA edwards25519.Point - var sa edwards25519.Scalar - - k, err := (&edwards25519.Scalar{}).SetBytesWithClamping(p) - if err != nil { - return nil, nil, err - } - - pub := pA.ScalarBaseMult(k).Bytes() - signBit := (pub[31] & 0x80) >> 7 - - if signBit == 1 { - sa.Negate(k) - // Set sig bit to 0 - pub[31] &= 0x7F - } else { - sa.Set(k) - } - - return pub, &sa, nil -} - -// convertMont converts from a Montgomery u-coordinate to a twisted Edwards -// point P, according to -// https://signal.org/docs/specifications/xeddsa/#elliptic-curve-conversions -// -// convert_mont(u): -// umasked = u (mod 2|p|) -// P.y = u_to_y(umasked) -// P.s = 0 -// return P -func convertMont(u PublicKey) (*edwards25519.Point, error) { - um, err := (&field.Element{}).SetBytes(u) - if err != nil { - return nil, err - } - - // y = (u - 1)/(u + 1) - a := new(field.Element).Subtract(um, one) - b := new(field.Element).Add(um, one) - y := new(field.Element).Multiply(a, b.Invert(b)).Bytes() - - // Set sign to 0 - y[31] &= 0x7F - - return (&edwards25519.Point{}).SetBytes(y) -} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 5ef1aad9f0..212538cb5a 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -26,11 +26,7 @@ type pbeCipher interface { create(key []byte) (cipher.Block, error) // deriveKey returns a key derived from the given password and salt. deriveKey(salt, password []byte, iterations int) []byte -<<<<<<< HEAD // deriveIV returns an IV derived from the given password and salt. -======= - // deriveKey returns an IV derived from the given password and salt. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) deriveIV(salt, password []byte, iterations int) []byte } diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index 7ea6410a2f..ca645d9a1a 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,11 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -<<<<<<< HEAD // configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 -======= -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index 49c0ceb03f..5b516c55ff 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,11 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -<<<<<<< HEAD // fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -======= -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 02c51ae677..b2e2ed3373 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -375,10 +375,7 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool -<<<<<<< HEAD closedOnIdle bool // true if conn was closed for idleness -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1093,19 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). -<<<<<<< HEAD // If the conn was closed for idleness, we're racing the idle timer; // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { -======= - // - // This avoids a situation where an error early in a connection's lifetime - // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) st.canTakeNewRequest = true } @@ -1168,10 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true -<<<<<<< HEAD cc.closedOnIdle = true -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -2451,18 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. -<<<<<<< HEAD unusedWaitTime := 5 * time.Second if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } idleTime := cc.t.now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { -======= - const unusedWaitTime = 5 * time.Second - idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 05d5431813..0260935bab 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -251,15 +251,12 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh // token JSON), or the JSON configuration file for workload identity federation in non-Google cloud // platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential JSON/File/Stream) from an // external source for authentication to Google Cloud Platform, you must validate it before // providing it to any Google API or library. Providing an unvalidated credential configuration to // Google APIs can compromise the security of your systems and data. For more information, refer to // [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -303,15 +300,12 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params } // CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential JSON/File/Stream) from an // external source for authentication to Google Cloud Platform, you must validate it before // providing it to any Google API or library. Providing an unvalidated credential configuration to // Google APIs can compromise the security of your systems and data. For more information, refer to // [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { var params CredentialsParams params.Scopes = scopes diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go index 178eb8ec88..55d59999e0 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -28,11 +28,7 @@ import ( // AwsSecurityCredentials models AWS security credentials. type AwsSecurityCredentials struct { -<<<<<<< HEAD // AccessKeyID is the AWS Access Key ID - Required. -======= - // AccessKeyId is the AWS Access Key ID - Required. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AccessKeyID string `json:"AccessKeyID"` // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go index 9ff57b5f08..fc106347d8 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -278,7 +278,6 @@ type Format struct { type CredentialSource struct { // File is the location for file sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -287,13 +286,10 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) File string `json:"file"` // Url is the URL to call for URL sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -302,15 +298,12 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) URL string `json:"url"` // Headers are the headers to attach to the request for URL sourced credentials. Headers map[string]string `json:"headers"` // Executable is the configuration object for executable sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -319,13 +312,10 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Executable *ExecutableConfig `json:"executable"` // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -334,8 +324,6 @@ type CredentialSource struct { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) EnvironmentID string `json:"environment_id"` // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. RegionURL string `json:"region_url"` @@ -373,11 +361,7 @@ type SubjectTokenSupplier interface { type AwsSecurityCredentialsSupplier interface { // AwsRegion should return the AWS region or an error. AwsRegion(ctx context.Context, options SupplierOptions) (string, error) -<<<<<<< HEAD // AwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. -======= - // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The external account token source does not cache the returned security credentials, so caching // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9e8fc49eda..74f052aa9f 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,11 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string -<<<<<<< HEAD // Scopes specifies optional requested permissions. -======= - // Scope specifies optional requested permissions. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 0f1686bec4..3a73084a53 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -50,11 +50,7 @@ type Analyzer struct { // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a // package that contains parse or type errors. -<<<<<<< HEAD // The [Pass.TypeErrors] field may consequently be non-empty. -======= - // The Pass.TypeErrors field may consequently be non-empty. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) RunDespiteErrors bool // Requires is a set of analyzers that must run successfully @@ -160,23 +156,17 @@ type Pass struct { // AllPackageFacts returns a new slice containing all package // facts of the analysis's FactTypes in unspecified order. -<<<<<<< HEAD // See comments for AllObjectFacts. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AllPackageFacts func() []PackageFact // AllObjectFacts returns a new slice containing all object // facts of the analysis's FactTypes in unspecified order. -<<<<<<< HEAD // // The result includes all facts exported by packages // whose symbols are referenced by the current package // (by qualified identifiers or field/method selections). // And it includes all facts exported from the current // package by the current analysis pass. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) AllObjectFacts func() []ObjectFact /* Further fields may be added in future. */ diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index fdbe6671d0..e7434e8fed 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -15,10 +15,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" -<<<<<<< HEAD -======= - "golang.org/x/tools/internal/versions" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const Doc = "check //go:build and // +build directives" @@ -374,14 +370,6 @@ func (check *checker) finish() { // tags reports issues in go versions in tags within the expression e. func (check *checker) tags(pos token.Pos, e constraint.Expr) { -<<<<<<< HEAD -======= - // Check that constraint.GoVersion is meaningful (>= go1.21). - if versions.ConstraintGoVersion == nil { - return - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Use Eval to visit each tag. _ = e.Eval(func(tag string) bool { if malformedGoTag(tag) { @@ -399,15 +387,8 @@ func malformedGoTag(tag string) bool { // Check for close misspellings of the "go1." prefix. for _, pre := range []string{"go.", "g1.", "go"} { suffix := strings.TrimPrefix(tag, pre) -<<<<<<< HEAD if suffix != tag && validGoVersion("go1."+suffix) { return true -======= - if suffix != tag { - if valid, ok := validTag("go1." + suffix); ok && valid { - return true - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return false @@ -415,24 +396,10 @@ func malformedGoTag(tag string) bool { // The tag starts with "go1" so it is almost certainly a GoVersion. // Report it if it is not a valid build constraint. -<<<<<<< HEAD return !validGoVersion(tag) } // validGoVersion reports when a tag is a valid go version. func validGoVersion(tag string) bool { return constraint.GoVersion(&constraint.TagExpr{Tag: tag}) != "" -======= - valid, ok := validTag(tag) - return ok && !valid -} - -// validTag returns (valid, ok) where valid reports when a tag is valid, -// and ok reports determining if the tag is valid succeeded. -func validTag(tag string) (valid bool, ok bool) { - if versions.ConstraintGoVersion != nil { - return versions.ConstraintGoVersion(&constraint.TagExpr{Tag: tag}) != "", true - } - return false, false ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index d53312f0cd..171ad20137 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -433,12 +433,9 @@ func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, return nil, 0 } -<<<<<<< HEAD // Facts are associated with generic declarations, not instantiations. fn = fn.Origin() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, ok := isPrint[fn.FullName()] if !ok { // Next look up just "printf", for use with -printf.funcs. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index 14318f4cdb..4115ef7694 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -89,11 +89,7 @@ var checkTagSpaces = map[string]bool{"json": true, "xml": true, "asn1": true} // checkCanonicalFieldTag checks a single struct field tag. func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, seen *namesSeen) { switch pass.Pkg.Path() { -<<<<<<< HEAD case "encoding/json", "encoding/json/v2", "encoding/xml": -======= - case "encoding/json", "encoding/xml": ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // These packages know how to use their own APIs. // Sometimes they are testing what happens to incorrect programs. return diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 4d6d13b15e..cfda893433 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -36,10 +36,7 @@ package inspector import ( "go/ast" -<<<<<<< HEAD _ "unsafe" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // An Inspector provides methods for inspecting @@ -48,12 +45,9 @@ type Inspector struct { events []event } -<<<<<<< HEAD //go:linkname events func events(in *Inspector) []event { return in.events } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -62,16 +56,10 @@ func New(files []*ast.File) *Inspector { // An event represents a push or a pop // of an ast.Node during a traversal. type event struct { -<<<<<<< HEAD node ast.Node typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events index int32 // index of corresponding push or pop event parent int32 // index of parent's push node (defined for push nodes only) -======= - node ast.Node - typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events - index int // index of corresponding push or pop event ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -100,11 +88,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // }) mask := maskOf(types) -<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { -======= - for i := 0; i < len(in.events); { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -134,11 +118,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // matches an element of the types slice. func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) -<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { -======= - for i := 0; i < len(in.events); { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -172,11 +152,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node -<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { -======= - for i := 0; i < len(in.events); { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -225,17 +201,12 @@ func traverse(files []*ast.File) []event { events := make([]event, 0, capacity) var stack []event -<<<<<<< HEAD stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent -======= - stack = append(stack, event{}) // include an extra event so file nodes have a parent ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, f := range files { ast.Inspect(f, func(n ast.Node) bool { if n != nil { // push ev := event{ -<<<<<<< HEAD node: n, typ: 0, // temporarily used to accumulate type bits of subtree index: int32(len(events)), // push event temporarily holds own index @@ -248,14 +219,6 @@ func traverse(files []*ast.File) []event { if int32(len(events)) < 0 { panic("event index exceeded int32") } -======= - node: n, - typ: 0, // temporarily used to accumulate type bits of subtree - index: len(events), // push event temporarily holds own index - } - stack = append(stack, ev) - events = append(events, ev) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else { // pop top := len(stack) - 1 @@ -264,15 +227,9 @@ func traverse(files []*ast.File) []event { push := ev.index parent := top - 1 -<<<<<<< HEAD events[push].typ = typ // set type of push stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. events[push].index = int32(len(events)) // make push refer to pop -======= - events[push].typ = typ // set type of push - stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. - events[push].index = len(events) // make push refer to pop ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stack = stack[:top] events = append(events, ev) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go index 1dd0b70743..c576dc70ac 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/iter.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -26,11 +26,7 @@ func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { return func(yield func(ast.Node) bool) { mask := maskOf(types) -<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { -======= - for i := 0; i < len(in.events); { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push @@ -67,11 +63,7 @@ func All[N interface { mask := typeOf((N)(nil)) return func(yield func(N) bool) { -<<<<<<< HEAD for i := int32(0); i < int32(len(in.events)); { -======= - for i := 0; i < len(in.events); { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ev := in.events[i] if ev.index > i { // push diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 6c1be29f5c..40b1bfd7e6 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,11 +12,8 @@ package inspector import ( "go/ast" "math" -<<<<<<< HEAD _ "unsafe" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -220,10 +217,7 @@ func typeOf(n ast.Node) uint64 { return 0 } -<<<<<<< HEAD //go:linkname maskOf -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func maskOf(nodes []ast.Node) uint64 { if nodes == nil { return math.MaxUint64 // match all node types diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 1a721098b2..65fe2628e9 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -106,16 +106,11 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) -<<<<<<< HEAD size, err := gcimporter.FindExportData(buf) -======= - _, size, err := gcimporter.FindExportData(buf) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } -<<<<<<< HEAD // We were given an archive and found the __.PKGDEF in it. // This tells us the size of the export data, and we don't // need to return the entire file. @@ -123,21 +118,6 @@ func NewReader(r io.Reader) (io.Reader, error) { R: buf, N: size, }, nil -======= - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // readAll works the same way as io.ReadAll, but avoids allocations and copies diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index b8f9e4ae2d..91bd62e83b 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,10 +13,7 @@ import ( "fmt" "os" "os/exec" -<<<<<<< HEAD "slices" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" ) @@ -135,11 +132,7 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) -<<<<<<< HEAD cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) -======= - cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -158,10 +151,3 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } -<<<<<<< HEAD -======= - -// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. -// TODO(adonovan): use go1.21 slices.Clip. -func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index aeb6497cc0..0458b4f9c4 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -322,10 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string -<<<<<<< HEAD Target string -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Export string GoFiles []string CompiledGoFiles []string @@ -509,22 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, -<<<<<<< HEAD Dir: p.Dir, Target: p.Target, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), -<<<<<<< HEAD ForTest: p.ForTest, -======= - forTest: p.ForTest, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) depsErrors: p.DepsErrors, Module: p.Module, } @@ -808,11 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } -<<<<<<< HEAD if cfg.Mode&NeedForTest != 0 { -======= - if cfg.Mode&needInternalForTest != 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -827,12 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } -<<<<<<< HEAD if cfg.Mode&NeedTarget != 0 { addFields("Target") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return "-json=" + strings.Join(fields, ",") } diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 6aee3e686a..69eec9f44d 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -23,17 +23,11 @@ var modes = [...]struct { {NeedSyntax, "NeedSyntax"}, {NeedTypesInfo, "NeedTypesInfo"}, {NeedTypesSizes, "NeedTypesSizes"}, -<<<<<<< HEAD {NeedForTest, "NeedForTest"}, {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, {NeedTarget, "NeedTarget"}, -======= - {NeedModule, "NeedModule"}, - {NeedEmbedFiles, "NeedEmbedFiles"}, - {NeedEmbedPatterns, "NeedEmbedPatterns"}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (mode LoadMode) String() string { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index a3b8e538b5..0147d9080a 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -43,7 +43,6 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // -<<<<<<< HEAD // The Mode flag is a union of several bits named NeedName, // NeedFiles, and so on, each of which determines whether // a given field of Package (Name, Files, etc) should be @@ -58,8 +57,6 @@ import ( // [LoadSyntax] ... plus type-annotated syntax // [LoadAllSyntax] ... for all dependencies // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: // - https://github.com/golang/go/issues/56633 @@ -72,11 +69,7 @@ const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota -<<<<<<< HEAD // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles -======= - // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -107,16 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors -<<<<<<< HEAD // NeedForTest adds ForTest. // // Tests must also be set on the context for this field to be populated. NeedForTest -======= - // needInternalForTest adds the internal forTest field. - // Tests must also be set on the context for this field to be populated. - needInternalForTest ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -131,18 +118,14 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns -<<<<<<< HEAD // NeedTarget adds Target. NeedTarget -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Be sure to update loadmode_string.go when adding new items! ) const ( // LoadFiles loads the name and file names for the initial packages. -<<<<<<< HEAD LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles // LoadImports loads the name, file names, and import mapping for the initial packages. @@ -155,35 +138,6 @@ const ( LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. -======= - // - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - - // LoadImports loads the name, file names, and import mapping for the initial packages. - // - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadImports = LoadFiles | NeedImports - - // LoadTypes loads exported type information for the initial packages. - // - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - - // LoadSyntax loads typed syntax for the initial packages. - // - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - - // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. - // - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. @@ -483,15 +437,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string -<<<<<<< HEAD // Dir is the directory associated with the package, if it exists. // // For packages listed by the go command, this is the directory containing // the package files. Dir string -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -531,13 +482,10 @@ type Package struct { // information for the package as provided by the build system. ExportFile string -<<<<<<< HEAD // Target is the absolute install path of the .a file, for libraries, // and of the executable file, for binaries. Target string -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package @@ -586,13 +534,8 @@ type Package struct { // -- internal -- -<<<<<<< HEAD // ForTest is the package under test, if any. ForTest string -======= - // forTest is the package under test, if any. - forTest string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -621,12 +564,6 @@ type ModuleError struct { } func init() { -<<<<<<< HEAD -======= - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } @@ -638,10 +575,6 @@ func init() { } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) -<<<<<<< HEAD -======= - packagesinternal.ForTest = int(needInternalForTest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // An Error describes a problem with a package's metadata, syntax, or types. diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go index 5595e9cb32..764b73529e 100644 --- a/vendor/golang.org/x/tools/go/ssa/const.go +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -12,15 +12,9 @@ import ( "go/token" "go/types" "strconv" -<<<<<<< HEAD "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" -======= - "strings" - - "golang.org/x/tools/internal/typeparams" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewConst returns a new constant of the specified value and type. @@ -84,11 +78,7 @@ func zeroConst(t types.Type) *Const { func (c *Const) RelString(from *types.Package) string { var s string if c.Value == nil { -<<<<<<< HEAD s, _ = typesinternal.ZeroString(c.typ, types.RelativeTo(from)) -======= - s = zeroString(c.typ, from) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if c.Value.Kind() == constant.String { s = constant.StringVal(c.Value) const max = 20 @@ -103,47 +93,6 @@ func (c *Const) RelString(from *types.Package) string { return s + ":" + relType(c.Type(), from) } -<<<<<<< HEAD -======= -// zeroString returns the string representation of the "zero" value of the type t. -func zeroString(t types.Type, from *types.Package) string { - switch t := t.(type) { - case *types.Basic: - switch { - case t.Info()&types.IsBoolean != 0: - return "false" - case t.Info()&types.IsNumeric != 0: - return "0" - case t.Info()&types.IsString != 0: - return `""` - case t.Kind() == types.UnsafePointer: - fallthrough - case t.Kind() == types.UntypedNil: - return "nil" - default: - panic(fmt.Sprint("zeroString for unexpected type:", t)) - } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return "nil" - case *types.Named, *types.Alias: - return zeroString(t.Underlying(), from) - case *types.Array, *types.Struct: - return relType(t, from) + "{}" - case *types.Tuple: - // Tuples are not normal values. - // We are currently format as "(t[0], ..., t[n])". Could be something else. - components := make([]string, t.Len()) - for i := 0; i < t.Len(); i++ { - components[i] = zeroString(t.At(i).Type(), from) - } - return "(" + strings.Join(components, ", ") + ")" - case *types.TypeParam: - return "*new(" + relType(t, from) + ")" - } - panic(fmt.Sprint("zeroString: unexpected ", t)) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *Const) Name() string { return c.RelString(nil) } diff --git a/vendor/golang.org/x/tools/go/ssa/dom.go b/vendor/golang.org/x/tools/go/ssa/dom.go index 30c8bd9c40..f490986140 100644 --- a/vendor/golang.org/x/tools/go/ssa/dom.go +++ b/vendor/golang.org/x/tools/go/ssa/dom.go @@ -318,10 +318,7 @@ func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. -<<<<<<< HEAD // (unused; retained for debugging) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func printDomTreeDot(buf *bytes.Buffer, f *Function) { fmt.Fprintln(buf, "//", f) fmt.Fprintln(buf, "digraph domtree {") diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index 7e52f1dbc6..aa070eacdc 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -14,10 +14,7 @@ import ( "io" "os" "sync" -<<<<<<< HEAD _ "unsafe" // for go:linkname hack -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/typeparams" @@ -412,20 +409,6 @@ func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctx } // Exposed to ssautil using the linkname hack. -<<<<<<< HEAD // //go:linkname isSyntactic golang.org/x/tools/go/ssa.isSyntactic func isSyntactic(pkg *Package) bool { return pkg.syntax } -======= -func isSyntactic(pkg *Package) bool { return pkg.syntax } - -// mapValues returns a new unordered array of map values. -func mapValues[K comparable, V any](m map[K]V) []V { - vals := make([]V, 0, len(m)) - for _, fn := range m { - vals = append(vals, fn) - } - return vals - -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 5e9538885a..93b3090c68 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -2,43 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -<<<<<<< HEAD // Package typeutil defines various utilities for types, such as [Map], // a hash table that maps [types.Type] to any value. package typeutil -======= -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to any values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) import ( "bytes" "fmt" "go/types" -<<<<<<< HEAD "hash/maphash" "unsafe" -======= - "reflect" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/internal/typeparams" ) // Map is a hash-table-based mapping from types (types.Type) to -<<<<<<< HEAD // arbitrary values. The concrete types that implement -======= -// arbitrary any values. The concrete types that implement ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // the Type interface are pointers. Since they are not canonicalized, // == cannot be used to check for equivalence, and thus we cannot // simply use a Go map. // // Just as with map[K]V, a nil *Map is a valid empty map. // -<<<<<<< HEAD // Read-only map operations ([Map.At], [Map.Len], and so on) may // safely be called concurrently. // @@ -46,11 +31,6 @@ import ( // and 69559, if the latter proposals for a generic hash-map type and // a types.Hash function are accepted. type Map struct { -======= -// Not thread-safe. -type Map struct { - hasher Hasher // shared by many Maps ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused length int // number of map entries } @@ -61,47 +41,17 @@ type entry struct { value any } -<<<<<<< HEAD // SetHasher has no effect. // // It is a relic of an optimization that is no longer profitable. Do // not use [Hasher], [MakeHasher], or [SetHasher] in new code. func (m *Map) SetHasher(Hasher) {} -======= -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Delete removes the entry with the given key, if any. // It returns true if the entry was found. func (m *Map) Delete(key types.Type) bool { if m != nil && m.table != nil { -<<<<<<< HEAD hash := hash(key) -======= - hash := m.hasher.Hash(key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bucket := m.table[hash] for i, e := range bucket { if e.key != nil && types.Identical(key, e.key) { @@ -120,11 +70,7 @@ func (m *Map) Delete(key types.Type) bool { // The result is nil if the entry is not present. func (m *Map) At(key types.Type) any { if m != nil && m.table != nil { -<<<<<<< HEAD for _, e := range m.table[hash(key)] { -======= - for _, e := range m.table[m.hasher.Hash(key)] { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if e.key != nil && types.Identical(key, e.key) { return e.value } @@ -137,11 +83,7 @@ func (m *Map) At(key types.Type) any { // and returns the previous entry, if any. func (m *Map) Set(key types.Type, value any) (prev any) { if m.table != nil { -<<<<<<< HEAD hash := hash(key) -======= - hash := m.hasher.Hash(key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bucket := m.table[hash] var hole *entry for i, e := range bucket { @@ -160,14 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) { m.table[hash] = append(bucket, entry{key, value}) } } else { -<<<<<<< HEAD hash := hash(key) -======= - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) m.table = map[uint32][]entry{hash: {entry{key, value}}} } @@ -244,7 +179,6 @@ func (m *Map) KeysString() string { return m.toString(false) } -<<<<<<< HEAD // -- Hasher -- // hash returns the hash of type t. @@ -262,48 +196,10 @@ var theHasher Hasher // MakeHasher returns Hasher{}. // Hashers are stateless; all are equivalent. func MakeHasher() Hasher { return theHasher } -======= -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 - - // ptrMap records pointer identity. - ptrMap map[any]uint32 - - // sigTParams holds type parameters from the signature being hashed. - // Signatures are considered identical modulo renaming of type parameters, so - // within the scope of a signature type the identity of the signature's type - // parameters is just their index. - // - // Since the language does not currently support referring to uninstantiated - // generic types or functions, and instantiated signatures do not have type - // parameter lists, we should never encounter a second non-empty type - // parameter list when hashing a generic signature. - sigTParams *types.TypeParamList -} - -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{ - memo: make(map[types.Type]uint32), - ptrMap: make(map[any]uint32), - sigTParams: nil, - } -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Hash computes a hash value for the given type t such that // Identical(t, t') => Hash(t) == Hash(t'). func (h Hasher) Hash(t types.Type) uint32 { -<<<<<<< HEAD return hasher{inGenericSig: false}.hash(t) } @@ -312,16 +208,6 @@ func (h Hasher) Hash(t types.Type) uint32 { // optimize [hasher.hashTypeParam]. type hasher struct{ inGenericSig bool } -======= - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // hashString computes the Fowler–Noll–Vo hash of s. func hashString(s string) uint32 { var h uint32 @@ -332,20 +218,14 @@ func hashString(s string) uint32 { return h } -<<<<<<< HEAD // hash computes the hash of t. func (h hasher) hash(t types.Type) uint32 { -======= -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // See Identical for rationale. switch t := t.(type) { case *types.Basic: return uint32(t.Kind()) case *types.Alias: -<<<<<<< HEAD return h.hash(types.Unalias(t)) case *types.Array: @@ -353,15 +233,6 @@ func (h Hasher) hashFor(t types.Type) uint32 { case *types.Slice: return 9049 + 2*h.hash(t.Elem()) -======= - return h.Hash(types.Unalias(t)) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *types.Struct: var hash uint32 = 9059 @@ -372,20 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 { } hash += hashString(t.Tag(i)) hash += hashString(f.Name()) // (ignore f.Pkg) -<<<<<<< HEAD hash += h.hash(f.Type()) -======= - hash += h.Hash(f.Type()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash case *types.Pointer: -<<<<<<< HEAD return 9067 + 2*h.hash(t.Elem()) -======= - return 9067 + 2*h.Hash(t.Elem()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) case *types.Signature: var hash uint32 = 9091 @@ -393,41 +256,11 @@ func (h Hasher) hashFor(t types.Type) uint32 { hash *= 8863 } -<<<<<<< HEAD tparams := t.TypeParams() for i := range tparams.Len() { h.inGenericSig = true tparam := tparams.At(i) hash += 7 * h.hash(tparam.Constraint()) -======= - // Use a separate hasher for types inside of the signature, where type - // parameter identity is modified to be (index, constraint). We must use a - // new memo for this hasher as type identity may be affected by this - // masking. For example, in func[T any](*T), the identity of *T depends on - // whether we are mapping the argument in isolation, or recursively as part - // of hashing the signature. - // - // We should never encounter a generic signature while hashing another - // generic signature, but defensively set sigTParams only if h.mask is - // unset. - tparams := t.TypeParams() - if h.sigTParams == nil && tparams.Len() != 0 { - h = Hasher{ - // There may be something more efficient than discarding the existing - // memo, but it would require detecting whether types are 'tainted' by - // references to type parameters. - memo: make(map[types.Type]uint32), - // Re-using ptrMap ensures that pointer identity is preserved in this - // hasher. - ptrMap: h.ptrMap, - sigTParams: tparams, - } - } - - for i := 0; i < tparams.Len(); i++ { - tparam := tparams.At(i) - hash += 7 * h.Hash(tparam.Constraint()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) @@ -461,7 +294,6 @@ func (h Hasher) hashFor(t types.Type) uint32 { return hash case *types.Map: -<<<<<<< HEAD return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) case *types.Chan: @@ -473,19 +305,6 @@ func (h Hasher) hashFor(t types.Type) uint32 { for i := 0; i < targs.Len(); i++ { targ := targs.At(i) hash += 2 * h.hash(targ) -======= - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) - - case *types.Named: - hash := h.hashPtr(t.Obj()) - targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) - hash += 2 * h.Hash(targ) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash @@ -499,30 +318,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { panic(fmt.Sprintf("%T: %v", t, t)) } -<<<<<<< HEAD func (h hasher) hashTuple(tuple *types.Tuple) uint32 { // See go/types.identicalTypes for rationale. n := tuple.Len() hash := 9137 + 2*uint32(n) for i := range n { hash += 3 * h.hash(tuple.At(i).Type()) -======= -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return hash } -<<<<<<< HEAD func (h hasher) hashUnion(t *types.Union) uint32 { -======= -func (h Hasher) hashUnion(t *types.Union) uint32 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -533,19 +339,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 { return h.hashTermSet(terms) } -<<<<<<< HEAD func (h hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. termHash := h.hash(term.Type()) -======= -func (h Hasher) hashTermSet(terms []*types.Term) uint32 { - hash := 9157 + 2*uint32(len(terms)) - for _, term := range terms { - // term order is not significant. - termHash := h.Hash(term.Type()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if term.Tilde() { termHash *= 9161 } @@ -554,7 +352,6 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 { return hash } -<<<<<<< HEAD // hashTypeParam returns the hash of a type parameter. func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { // Within the signature of a generic function, TypeParams are @@ -591,38 +388,6 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 { // the same name.) hash := uintptr(unsafe.Pointer(tname)) return uint32(hash ^ (hash >> 32)) -======= -// hashTypeParam returns a hash of the type parameter t, with a hash value -// depending on whether t is contained in h.sigTParams. -// -// If h.sigTParams is set and contains t, then we are in the process of hashing -// a signature, and the hash value of t must depend only on t's index and -// constraint: signatures are considered identical modulo type parameter -// renaming. To avoid infinite recursion, we only hash the type parameter -// index, and rely on types.Identical to handle signatures where constraints -// are not identical. -// -// Otherwise the hash of t depends only on t's pointer identity. -func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { - if h.sigTParams != nil { - i := t.Index() - if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { - return 9173 + 3*uint32(i) - } - } - return h.hashPtr(t.Obj()) -} - -// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that -// pointers values are not dependent on the GC. -func (h Hasher) hashPtr(ptr any) uint32 { - if hash, ok := h.ptrMap[ptr]; ok { - return hash - } - hash := uint32(reflect.ValueOf(ptr).Pointer()) - h.ptrMap[ptr] = hash - return hash ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // shallowHash computes a hash of t without looking at any of its @@ -639,11 +404,7 @@ func (h Hasher) hashPtr(ptr any) uint32 { // include m itself; there is no mention of the named type X that // might help us break the cycle. // (See comment in go/types.identical, case *Interface, for more.) -<<<<<<< HEAD func (h hasher) shallowHash(t types.Type) uint32 { -======= -func (h Hasher) shallowHash(t types.Type) uint32 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // t is the type of an interface method (Signature), // its params or results (Tuples), or their immediate // elements (mostly Slice, Pointer, Basic, Named), @@ -664,11 +425,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Tuple: n := t.Len() hash := 9137 + 2*uint32(n) -<<<<<<< HEAD for i := range n { -======= - for i := 0; i < n; i++ { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) hash += 53471161 * h.shallowHash(t.At(i).Type()) } return hash @@ -701,17 +458,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 { return 9127 case *types.Named: -<<<<<<< HEAD return h.hashTypeName(t.Obj()) case *types.TypeParam: return h.hashTypeParam(t) -======= - return h.hashPtr(t.Obj()) - - case *types.TypeParam: - return h.hashPtr(t.Obj()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) } diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 2fb8d18b5b..58615232ff 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -15,10 +15,6 @@ import ( "go/types" "os" pathpkg "path" -<<<<<<< HEAD -======= - "strconv" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/go/analysis" ) @@ -69,267 +65,6 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos return end } -<<<<<<< HEAD -======= -func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - // TODO(adonovan): think about generics, and also generic aliases. - under := types.Unalias(typ) - // Don't call Underlying unconditionally: although it removes - // Named and Alias, it also removes TypeParam. - if n, ok := under.(*types.Named); ok { - under = n.Underlying() - } - switch under := under.(type) { - case *types.Basic: - switch { - case under.Info()&types.IsNumeric != 0: - return &ast.BasicLit{Kind: token.INT, Value: "0"} - case under.Info()&types.IsBoolean != 0: - return &ast.Ident{Name: "false"} - case under.Info()&types.IsString != 0: - return &ast.BasicLit{Kind: token.STRING, Value: `""`} - default: - panic(fmt.Sprintf("unknown basic type %v", under)) - } - case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: - return ast.NewIdent("nil") - case *types.Struct: - texpr := TypeExpr(f, pkg, typ) // typ because we want the name here. - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: texpr, - } - } - return nil -} - -// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of -// analysisinternal.ZeroValue) -func IsZeroValue(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false - } -} - -// TypeExpr returns syntax for the specified type. References to -// named types from packages other than pkg are qualified by an appropriate -// package name, as defined by the import environment of file. -func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { - case *types.Basic: - switch t.Kind() { - case types.UnsafePointer: - return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} - default: - return ast.NewIdent(t.Name()) - } - case *types.Pointer: - x := TypeExpr(f, pkg, t.Elem()) - if x == nil { - return nil - } - return &ast.UnaryExpr{ - Op: token.MUL, - X: x, - } - case *types.Array: - elt := TypeExpr(f, pkg, t.Elem()) - if elt == nil { - return nil - } - return &ast.ArrayType{ - Len: &ast.BasicLit{ - Kind: token.INT, - Value: fmt.Sprintf("%d", t.Len()), - }, - Elt: elt, - } - case *types.Slice: - elt := TypeExpr(f, pkg, t.Elem()) - if elt == nil { - return nil - } - return &ast.ArrayType{ - Elt: elt, - } - case *types.Map: - key := TypeExpr(f, pkg, t.Key()) - value := TypeExpr(f, pkg, t.Elem()) - if key == nil || value == nil { - return nil - } - return &ast.MapType{ - Key: key, - Value: value, - } - case *types.Chan: - dir := ast.ChanDir(t.Dir()) - if t.Dir() == types.SendRecv { - dir = ast.SEND | ast.RECV - } - value := TypeExpr(f, pkg, t.Elem()) - if value == nil { - return nil - } - return &ast.ChanType{ - Dir: dir, - Value: value, - } - case *types.Signature: - var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { - p := TypeExpr(f, pkg, t.Params().At(i).Type()) - if p == nil { - return nil - } - params = append(params, &ast.Field{ - Type: p, - Names: []*ast.Ident{ - { - Name: t.Params().At(i).Name(), - }, - }, - }) - } - if t.Variadic() { - last := params[len(params)-1] - last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} - } - var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { - r := TypeExpr(f, pkg, t.Results().At(i).Type()) - if r == nil { - return nil - } - returns = append(returns, &ast.Field{ - Type: r, - }) - } - return &ast.FuncType{ - Params: &ast.FieldList{ - List: params, - }, - Results: &ast.FieldList{ - List: returns, - }, - } - case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} - if t.Obj().Pkg() == nil { - return ast.NewIdent(t.Obj().Name()) - } - if t.Obj().Pkg() == pkg { - return ast.NewIdent(t.Obj().Name()) - } - pkgName := t.Obj().Pkg().Name() - - // If the file already imports the package under another name, use that. - for _, cand := range f.Imports { - if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { - if cand.Name != nil && cand.Name.Name != "" { - pkgName = cand.Name.Name - } - } - } - if pkgName == "." { - return ast.NewIdent(t.Obj().Name()) - } - return &ast.SelectorExpr{ - X: ast.NewIdent(pkgName), - Sel: ast.NewIdent(t.Obj().Name()), - } - case *types.Struct: - return ast.NewIdent(t.String()) - case *types.Interface: - return ast.NewIdent(t.String()) - default: - return nil - } -} - -// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. -// Some examples: -// -// Basic Example: -// z := 1 -// y := z + x -// If x is undeclared, then this function would return `y := z + x`, so that we -// can insert `x := ` on the line before `y := z + x`. -// -// If stmt example: -// if z == 1 { -// } else if z == y {} -// If y is undeclared, then this function would return `if z == 1 {`, because we cannot -// insert a statement between an if and an else if statement. As a result, we need to find -// the top of the if chain to insert `y := ` before. -func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { - enclosingIndex := -1 - for i, p := range path { - if _, ok := p.(ast.Stmt); ok { - enclosingIndex = i - break - } - } - if enclosingIndex == -1 { - return nil - } - enclosingStmt := path[enclosingIndex] - switch enclosingStmt.(type) { - case *ast.IfStmt: - // The enclosingStmt is inside of the if declaration, - // We need to check if we are in an else-if stmt and - // get the base if statement. - return baseIfStmt(path, enclosingIndex) - case *ast.CaseClause: - // Get the enclosing switch stmt if the enclosingStmt is - // inside of the case statement. - for i := enclosingIndex + 1; i < len(path); i++ { - if node, ok := path[i].(*ast.SwitchStmt); ok { - return node - } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { - return node - } - } - } - if len(path) <= enclosingIndex+1 { - return enclosingStmt.(ast.Stmt) - } - // Check if the enclosing statement is inside another node. - switch expr := path[enclosingIndex+1].(type) { - case *ast.IfStmt: - // Get the base if statement. - return baseIfStmt(path, enclosingIndex+1) - case *ast.ForStmt: - if expr.Init == enclosingStmt || expr.Post == enclosingStmt { - return expr - } - case *ast.SwitchStmt, *ast.TypeSwitchStmt: - return expr.(ast.Stmt) - } - return enclosingStmt.(ast.Stmt) -} - -// baseIfStmt walks up the if/else-if chain until we get to -// the top of the current if chain. -func baseIfStmt(path []ast.Node, index int) ast.Stmt { - stmt := path[index] - for i := index + 1; i < len(path); i++ { - if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { - stmt = node - continue - } - break - } - return stmt.(ast.Stmt) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WalkASTWithParent walks the AST rooted at n. The semantics are // similar to ast.Inspect except it does not call f(nil). func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index 375d5c0d41..5662a311da 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,20 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -<<<<<<< HEAD // This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. // This file also additionally implements FindExportData for gcexportdata.NewReader. -======= -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gcimporter import ( "bufio" -<<<<<<< HEAD "bytes" "errors" "fmt" @@ -186,43 +179,6 @@ func ReadUnified(r *bufio.Reader) (data []byte, err error) { func FindPackageDefinition(r *bufio.Reader) (size int, err error) { // Uses ReadSlice to limit risk of malformed inputs. -======= - "fmt" - "io" - "strconv" - "strings" -) - -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) - if err != nil { - return - } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") - return - } - name = strings.TrimSpace(string(hdr[:16])) - return -} - -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -230,7 +186,6 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } -<<<<<<< HEAD // Is the first line an archive file signature? if string(line) != "!\n" { err = fmt.Errorf("not the start of an archive file (%q)", line) @@ -242,54 +197,10 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { if size <= 0 { err = fmt.Errorf("not a package file") return -======= - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") - return - } - - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return } -<<<<<<< HEAD // ReadObjectHeaders reads object headers from the reader. Object headers are // lines that do not start with an end-of-section marker "$$". The first header @@ -508,5 +419,3 @@ func lookupGorootExport(pkgDir string) (string, error) { return f.(func() (string, error))() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index ffe8ab0d71..3dbd21d1b9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,24 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" -<<<<<<< HEAD "fmt" -======= - "bytes" - "fmt" - "go/build" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go/token" "go/types" "io" "os" -<<<<<<< HEAD -======= - "os/exec" - "path/filepath" - "strings" - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( @@ -52,7 +39,6 @@ const ( trace = false ) -<<<<<<< HEAD // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. @@ -61,127 +47,6 @@ const ( func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var id string -======= -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { - var rc io.ReadCloser - var filename, id string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -200,21 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { -<<<<<<< HEAD var filename string filename, id, err = FindPkg(path, srcDir) -======= - filename, id = FindPkg(path, srcDir) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } -<<<<<<< HEAD return nil, err -======= - return nil, fmt.Errorf("can't find import: %q", id) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // no need to re-import if the package was imported completely before @@ -237,7 +94,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() -<<<<<<< HEAD buf := bufio.NewReader(rc) data, err := ReadUnified(buf) if err != nil { @@ -250,64 +106,3 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func return } -======= - var hdr string - var size int64 - buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 2f1c6ed11a..69b1d697cb 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -5,11 +5,6 @@ // Indexed package import. // See iexport.go for the export data format. -<<<<<<< HEAD -======= -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package gcimporter import ( @@ -1114,12 +1109,9 @@ func (r *importReader) byte() byte { } return x } -<<<<<<< HEAD type byPath []*types.Package func (a byPath) Len() int { return len(a) } func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 3e54e4d2a3..6cdab448ec 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,10 +11,6 @@ import ( "go/token" "go/types" "sort" -<<<<<<< HEAD -======= - "strings" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" @@ -74,10 +70,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) -<<<<<<< HEAD -======= - s = s[:strings.LastIndex(s, "\n$$\n")] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -272,16 +264,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { -<<<<<<< HEAD // cmd/compile emits path="main" for main packages because // that's the linker symbol prefix it used; but we need // the package's path as it would be reported by go list, // hence "main" below. // See test at go/packages.TestMainPackagePathInModeTypes. case "", "main": -======= - case "": ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) path = r.p.PkgPath() case "builtin": return nil // universe diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go index bdcfdc8db6..cbe4f3c5ba 100644 --- a/vendor/golang.org/x/tools/internal/imports/source.go +++ b/vendor/golang.org/x/tools/internal/imports/source.go @@ -59,9 +59,5 @@ type Source interface { // candidates satisfy all missing references for that package name. It is up // to each data source to select the best result for each entry in the // missing map. -<<<<<<< HEAD ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) -======= - ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go index ab1c005794..d14abaa319 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_env.go +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -48,11 +48,7 @@ func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, return r.loadPackageNames(unknown, srcDir) } -<<<<<<< HEAD func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { -======= -func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -125,13 +121,9 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin if err := g.Wait(); err != nil { return nil, err } -<<<<<<< HEAD var ans []*Result for _, x := range results { ans = append(ans, x) } return ans, nil -======= - return results, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index b9a89704ab..66e69b4389 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -<<<<<<< HEAD -======= -var GetForTest = func(p interface{}) string { return "" } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var GetDepsErrors = func(p interface{}) []*PackageError { return nil } type PackageError struct { @@ -19,10 +15,6 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -<<<<<<< HEAD -======= -var ForTest int // must be set as a LoadMode to call GetForTest ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var SetModFlag = func(config interface{}, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 3e1f421943..9f0b871ff6 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -268,11 +268,8 @@ var PackageSymbols = map[string][]Symbol{ {"ErrTooLarge", Var, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, -<<<<<<< HEAD {"FieldsFuncSeq", Func, 24}, {"FieldsSeq", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -285,10 +282,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, -<<<<<<< HEAD {"Lines", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Map", Func, 0}, {"MinRead", Const, 0}, {"NewBuffer", Func, 0}, @@ -302,13 +296,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, -<<<<<<< HEAD {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, {"SplitSeq", Func, 24}, -======= - {"SplitN", Func, 0}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -550,10 +540,7 @@ var PackageSymbols = map[string][]Symbol{ {"NewCTR", Func, 0}, {"NewGCM", Func, 2}, {"NewGCMWithNonceSize", Func, 5}, -<<<<<<< HEAD {"NewGCMWithRandomNonce", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"NewGCMWithTagSize", Func, 11}, {"NewOFB", Func, 0}, {"Stream", Type, 0}, @@ -692,7 +679,6 @@ var PackageSymbols = map[string][]Symbol{ {"Unmarshal", Func, 0}, {"UnmarshalCompressed", Func, 15}, }, -<<<<<<< HEAD "crypto/fips140": { {"Enabled", Func, 24}, }, @@ -701,8 +687,6 @@ var PackageSymbols = map[string][]Symbol{ {"Extract", Func, 24}, {"Key", Func, 24}, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/hmac": { {"Equal", Func, 1}, {"New", Func, 0}, @@ -713,7 +697,6 @@ var PackageSymbols = map[string][]Symbol{ {"Size", Const, 0}, {"Sum", Func, 2}, }, -<<<<<<< HEAD "crypto/mlkem": { {"(*DecapsulationKey1024).Bytes", Method, 24}, {"(*DecapsulationKey1024).Decapsulate", Method, 24}, @@ -745,17 +728,12 @@ var PackageSymbols = map[string][]Symbol{ "crypto/pbkdf2": { {"Key", Func, 24}, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/rand": { {"Int", Func, 0}, {"Prime", Func, 0}, {"Read", Func, 0}, {"Reader", Var, 0}, -<<<<<<< HEAD {"Text", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, "crypto/rc4": { {"(*Cipher).Reset", Method, 0}, @@ -834,7 +812,6 @@ var PackageSymbols = map[string][]Symbol{ {"Sum224", Func, 2}, {"Sum256", Func, 2}, }, -<<<<<<< HEAD "crypto/sha3": { {"(*SHA3).AppendBinary", Method, 24}, {"(*SHA3).BlockSize", Method, 24}, @@ -868,8 +845,6 @@ var PackageSymbols = map[string][]Symbol{ {"SumSHAKE128", Func, 24}, {"SumSHAKE256", Func, 24}, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "crypto/sha512": { {"BlockSize", Const, 0}, {"New", Func, 0}, @@ -892,10 +867,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConstantTimeEq", Func, 0}, {"ConstantTimeLessOrEq", Func, 2}, {"ConstantTimeSelect", Func, 0}, -<<<<<<< HEAD {"WithDataIndependentTiming", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"XORBytes", Func, 20}, }, "crypto/tls": { @@ -972,10 +944,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo", Type, 4}, {"ClientHelloInfo.CipherSuites", Field, 4}, {"ClientHelloInfo.Conn", Field, 8}, -<<<<<<< HEAD {"ClientHelloInfo.Extensions", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"ClientHelloInfo.ServerName", Field, 4}, {"ClientHelloInfo.SignatureSchemes", Field, 8}, {"ClientHelloInfo.SupportedCurves", Field, 4}, @@ -993,10 +962,7 @@ var PackageSymbols = map[string][]Symbol{ {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, {"Config.EncryptedClientHelloConfigList", Field, 23}, -<<<<<<< HEAD {"Config.EncryptedClientHelloKeys", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, @@ -1050,13 +1016,10 @@ var PackageSymbols = map[string][]Symbol{ {"ECHRejectionError", Type, 23}, {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, -<<<<<<< HEAD {"EncryptedClientHelloKey", Type, 24}, {"EncryptedClientHelloKey.Config", Field, 24}, {"EncryptedClientHelloKey.PrivateKey", Field, 24}, {"EncryptedClientHelloKey.SendAsRetry", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, {"LoadX509KeyPair", Func, 0}, @@ -1155,10 +1118,7 @@ var PackageSymbols = map[string][]Symbol{ {"VersionTLS12", Const, 2}, {"VersionTLS13", Const, 12}, {"X25519", Const, 8}, -<<<<<<< HEAD {"X25519MLKEM768", Const, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"X509KeyPair", Func, 0}, }, "crypto/x509": { @@ -1183,11 +1143,8 @@ var PackageSymbols = map[string][]Symbol{ {"(ConstraintViolationError).Error", Method, 0}, {"(HostnameError).Error", Method, 0}, {"(InsecureAlgorithmError).Error", Method, 6}, -<<<<<<< HEAD {"(OID).AppendBinary", Method, 24}, {"(OID).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, {"(OID).MarshalBinary", Method, 23}, @@ -1216,13 +1173,10 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.Extensions", Field, 2}, {"Certificate.ExtraExtensions", Field, 2}, {"Certificate.IPAddresses", Field, 1}, -<<<<<<< HEAD {"Certificate.InhibitAnyPolicy", Field, 24}, {"Certificate.InhibitAnyPolicyZero", Field, 24}, {"Certificate.InhibitPolicyMapping", Field, 24}, {"Certificate.InhibitPolicyMappingZero", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Certificate.IsCA", Field, 0}, {"Certificate.Issuer", Field, 0}, {"Certificate.IssuingCertificateURL", Field, 2}, @@ -1239,10 +1193,7 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.PermittedURIDomains", Field, 10}, {"Certificate.Policies", Field, 22}, {"Certificate.PolicyIdentifiers", Field, 0}, -<<<<<<< HEAD {"Certificate.PolicyMappings", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Certificate.PublicKey", Field, 0}, {"Certificate.PublicKeyAlgorithm", Field, 0}, {"Certificate.Raw", Field, 0}, @@ -1250,11 +1201,8 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.RawSubject", Field, 0}, {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, {"Certificate.RawTBSCertificate", Field, 0}, -<<<<<<< HEAD {"Certificate.RequireExplicitPolicy", Field, 24}, {"Certificate.RequireExplicitPolicyZero", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Certificate.SerialNumber", Field, 0}, {"Certificate.Signature", Field, 0}, {"Certificate.SignatureAlgorithm", Field, 0}, @@ -1346,10 +1294,7 @@ var PackageSymbols = map[string][]Symbol{ {"NameConstraintsWithoutSANs", Const, 10}, {"NameMismatch", Const, 8}, {"NewCertPool", Func, 0}, -<<<<<<< HEAD {"NoValidChains", Const, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"NotAuthorizedToSign", Const, 0}, {"OID", Type, 22}, {"OIDFromInts", Func, 22}, @@ -1371,12 +1316,9 @@ var PackageSymbols = map[string][]Symbol{ {"ParsePKCS8PrivateKey", Func, 0}, {"ParsePKIXPublicKey", Func, 0}, {"ParseRevocationList", Func, 19}, -<<<<<<< HEAD {"PolicyMapping", Type, 24}, {"PolicyMapping.IssuerDomainPolicy", Field, 24}, {"PolicyMapping.SubjectDomainPolicy", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"PublicKeyAlgorithm", Type, 0}, {"PureEd25519", Const, 13}, {"RSA", Const, 0}, @@ -1423,10 +1365,7 @@ var PackageSymbols = map[string][]Symbol{ {"UnknownPublicKeyAlgorithm", Const, 0}, {"UnknownSignatureAlgorithm", Const, 0}, {"VerifyOptions", Type, 0}, -<<<<<<< HEAD {"VerifyOptions.CertificatePolicies", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"VerifyOptions.CurrentTime", Field, 0}, {"VerifyOptions.DNSName", Field, 0}, {"VerifyOptions.Intermediates", Field, 0}, @@ -2137,11 +2076,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).DynString", Method, 1}, {"(*File).DynValue", Method, 21}, {"(*File).DynamicSymbols", Method, 4}, -<<<<<<< HEAD {"(*File).DynamicVersionNeeds", Method, 24}, {"(*File).DynamicVersions", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*File).ImportedLibraries", Method, 0}, {"(*File).ImportedSymbols", Method, 0}, {"(*File).Section", Method, 0}, @@ -2407,7 +2343,6 @@ var PackageSymbols = map[string][]Symbol{ {"DynFlag", Type, 0}, {"DynFlag1", Type, 21}, {"DynTag", Type, 0}, -<<<<<<< HEAD {"DynamicVersion", Type, 24}, {"DynamicVersion.Deps", Field, 24}, {"DynamicVersion.Flags", Field, 24}, @@ -2421,8 +2356,6 @@ var PackageSymbols = map[string][]Symbol{ {"DynamicVersionNeed", Type, 24}, {"DynamicVersionNeed.Name", Field, 24}, {"DynamicVersionNeed.Needs", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"EI_ABIVERSION", Const, 0}, {"EI_CLASS", Const, 0}, {"EI_DATA", Const, 0}, @@ -3909,7 +3842,6 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Size", Field, 0}, {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, -<<<<<<< HEAD {"Symbol.VersionIndex", Field, 24}, {"Symbol.VersionScope", Field, 24}, {"SymbolVersionScope", Type, 24}, @@ -3923,10 +3855,6 @@ var PackageSymbols = map[string][]Symbol{ {"VersionScopeLocal", Const, 24}, {"VersionScopeNone", Const, 24}, {"VersionScopeSpecific", Const, 24}, -======= - {"Type", Type, 0}, - {"Version", Type, 0}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -4652,15 +4580,10 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16}, }, "encoding": { -<<<<<<< HEAD {"BinaryAppender", Type, 24}, {"BinaryMarshaler", Type, 2}, {"BinaryUnmarshaler", Type, 2}, {"TextAppender", Type, 24}, -======= - {"BinaryMarshaler", Type, 2}, - {"BinaryUnmarshaler", Type, 2}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"TextMarshaler", Type, 2}, {"TextUnmarshaler", Type, 2}, }, @@ -6190,24 +6113,16 @@ var PackageSymbols = map[string][]Symbol{ {"(*Interface).Complete", Method, 5}, {"(*Interface).Embedded", Method, 5}, {"(*Interface).EmbeddedType", Method, 11}, -<<<<<<< HEAD {"(*Interface).EmbeddedTypes", Method, 24}, {"(*Interface).Empty", Method, 5}, {"(*Interface).ExplicitMethod", Method, 5}, {"(*Interface).ExplicitMethods", Method, 24}, -======= - {"(*Interface).Empty", Method, 5}, - {"(*Interface).ExplicitMethod", Method, 5}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Interface).IsComparable", Method, 18}, {"(*Interface).IsImplicit", Method, 18}, {"(*Interface).IsMethodSet", Method, 18}, {"(*Interface).MarkImplicit", Method, 18}, {"(*Interface).Method", Method, 5}, -<<<<<<< HEAD {"(*Interface).Methods", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Interface).NumEmbeddeds", Method, 5}, {"(*Interface).NumExplicitMethods", Method, 5}, {"(*Interface).NumMethods", Method, 5}, @@ -6228,17 +6143,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*MethodSet).At", Method, 5}, {"(*MethodSet).Len", Method, 5}, {"(*MethodSet).Lookup", Method, 5}, -<<<<<<< HEAD {"(*MethodSet).Methods", Method, 24}, {"(*MethodSet).String", Method, 5}, {"(*Named).AddMethod", Method, 5}, {"(*Named).Method", Method, 5}, {"(*Named).Methods", Method, 24}, -======= - {"(*MethodSet).String", Method, 5}, - {"(*Named).AddMethod", Method, 5}, - {"(*Named).Method", Method, 5}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Named).NumMethods", Method, 5}, {"(*Named).Obj", Method, 5}, {"(*Named).Origin", Method, 18}, @@ -6279,10 +6188,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).String", Method, 5}, {"(*Pointer).Underlying", Method, 5}, {"(*Scope).Child", Method, 5}, -<<<<<<< HEAD {"(*Scope).Children", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Scope).Contains", Method, 5}, {"(*Scope).End", Method, 5}, {"(*Scope).Innermost", Method, 5}, @@ -6318,10 +6224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*StdSizes).Offsetsof", Method, 5}, {"(*StdSizes).Sizeof", Method, 5}, {"(*Struct).Field", Method, 5}, -<<<<<<< HEAD {"(*Struct).Fields", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Struct).NumFields", Method, 5}, {"(*Struct).String", Method, 5}, {"(*Struct).Tag", Method, 5}, @@ -6333,15 +6236,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Tuple).Len", Method, 5}, {"(*Tuple).String", Method, 5}, {"(*Tuple).Underlying", Method, 5}, -<<<<<<< HEAD {"(*Tuple).Variables", Method, 24}, {"(*TypeList).At", Method, 18}, {"(*TypeList).Len", Method, 18}, {"(*TypeList).Types", Method, 24}, -======= - {"(*TypeList).At", Method, 18}, - {"(*TypeList).Len", Method, 18}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*TypeName).Exported", Method, 5}, {"(*TypeName).Id", Method, 5}, {"(*TypeName).IsAlias", Method, 9}, @@ -6359,17 +6257,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeParam).Underlying", Method, 18}, {"(*TypeParamList).At", Method, 18}, {"(*TypeParamList).Len", Method, 18}, -<<<<<<< HEAD {"(*TypeParamList).TypeParams", Method, 24}, {"(*Union).Len", Method, 18}, {"(*Union).String", Method, 18}, {"(*Union).Term", Method, 18}, {"(*Union).Terms", Method, 24}, -======= - {"(*Union).Len", Method, 18}, - {"(*Union).String", Method, 18}, - {"(*Union).Term", Method, 18}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Union).Underlying", Method, 18}, {"(*Var).Anonymous", Method, 5}, {"(*Var).Embedded", Method, 11}, @@ -6640,18 +6532,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*Hash).WriteByte", Method, 14}, {"(*Hash).WriteString", Method, 14}, {"Bytes", Func, 19}, -<<<<<<< HEAD {"Comparable", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Hash", Type, 14}, {"MakeSeed", Func, 14}, {"Seed", Type, 14}, {"String", Func, 19}, -<<<<<<< HEAD {"WriteComparable", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, "html": { {"EscapeString", Func, 0}, @@ -7338,10 +7224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*JSONHandler).WithGroup", Method, 21}, {"(*Level).UnmarshalJSON", Method, 21}, {"(*Level).UnmarshalText", Method, 21}, -<<<<<<< HEAD {"(*LevelVar).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*LevelVar).Level", Method, 21}, {"(*LevelVar).MarshalText", Method, 21}, {"(*LevelVar).Set", Method, 21}, @@ -7370,10 +7253,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Attr).Equal", Method, 21}, {"(Attr).String", Method, 21}, {"(Kind).String", Method, 21}, -<<<<<<< HEAD {"(Level).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Level).Level", Method, 21}, {"(Level).MarshalJSON", Method, 21}, {"(Level).MarshalText", Method, 21}, @@ -7404,10 +7284,7 @@ var PackageSymbols = map[string][]Symbol{ {"Debug", Func, 21}, {"DebugContext", Func, 21}, {"Default", Func, 21}, -<<<<<<< HEAD {"DiscardHandler", Var, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Duration", Func, 21}, {"DurationValue", Func, 21}, {"Error", Func, 21}, @@ -7643,10 +7520,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Float).Acc", Method, 5}, {"(*Float).Add", Method, 5}, {"(*Float).Append", Method, 5}, -<<<<<<< HEAD {"(*Float).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Float).Cmp", Method, 5}, {"(*Float).Copy", Method, 5}, {"(*Float).Float32", Method, 5}, @@ -7693,10 +7567,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).And", Method, 0}, {"(*Int).AndNot", Method, 0}, {"(*Int).Append", Method, 6}, -<<<<<<< HEAD {"(*Int).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Int).Binomial", Method, 0}, {"(*Int).Bit", Method, 0}, {"(*Int).BitLen", Method, 0}, @@ -7753,10 +7624,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).Xor", Method, 0}, {"(*Rat).Abs", Method, 0}, {"(*Rat).Add", Method, 0}, -<<<<<<< HEAD {"(*Rat).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Rat).Cmp", Method, 0}, {"(*Rat).Denom", Method, 0}, {"(*Rat).Float32", Method, 4}, @@ -7939,19 +7807,13 @@ var PackageSymbols = map[string][]Symbol{ {"Zipf", Type, 0}, }, "math/rand/v2": { -<<<<<<< HEAD {"(*ChaCha8).AppendBinary", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*ChaCha8).MarshalBinary", Method, 22}, {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, -<<<<<<< HEAD {"(*PCG).AppendBinary", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*PCG).MarshalBinary", Method, 22}, {"(*PCG).Seed", Method, 22}, {"(*PCG).Uint64", Method, 22}, @@ -8219,10 +8081,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SyscallConn", Method, 10}, {"(Flags).String", Method, 0}, {"(HardwareAddr).String", Method, 0}, -<<<<<<< HEAD {"(IP).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(IP).DefaultMask", Method, 0}, {"(IP).Equal", Method, 0}, {"(IP).IsGlobalUnicast", Method, 0}, @@ -8423,12 +8282,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*MaxBytesError).Error", Method, 19}, {"(*ProtocolError).Error", Method, 0}, {"(*ProtocolError).Is", Method, 21}, -<<<<<<< HEAD {"(*Protocols).SetHTTP1", Method, 24}, {"(*Protocols).SetHTTP2", Method, 24}, {"(*Protocols).SetUnencryptedHTTP2", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Request).AddCookie", Method, 0}, {"(*Request).BasicAuth", Method, 4}, {"(*Request).Clone", Method, 13}, @@ -8488,13 +8344,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14}, {"(Header).Write", Method, 0}, {"(Header).WriteSubset", Method, 0}, -<<<<<<< HEAD {"(Protocols).HTTP1", Method, 24}, {"(Protocols).HTTP2", Method, 24}, {"(Protocols).String", Method, 24}, {"(Protocols).UnencryptedHTTP2", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"AllowQuerySemicolons", Func, 17}, {"CanonicalHeaderKey", Func, 0}, {"Client", Type, 0}, @@ -8557,7 +8410,6 @@ var PackageSymbols = map[string][]Symbol{ {"FileSystem", Type, 0}, {"Flusher", Type, 0}, {"Get", Func, 0}, -<<<<<<< HEAD {"HTTP2Config", Type, 24}, {"HTTP2Config.CountError", Field, 24}, {"HTTP2Config.MaxConcurrentStreams", Field, 24}, @@ -8570,8 +8422,6 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PingTimeout", Field, 24}, {"HTTP2Config.SendPingTimeout", Field, 24}, {"HTTP2Config.WriteByteTimeout", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Handle", Func, 0}, {"HandleFunc", Func, 0}, {"Handler", Type, 0}, @@ -8612,10 +8462,7 @@ var PackageSymbols = map[string][]Symbol{ {"PostForm", Func, 0}, {"ProtocolError", Type, 0}, {"ProtocolError.ErrorString", Field, 0}, -<<<<<<< HEAD {"Protocols", Type, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"ProxyFromEnvironment", Func, 0}, {"ProxyURL", Func, 0}, {"PushOptions", Type, 8}, @@ -8685,17 +8532,11 @@ var PackageSymbols = map[string][]Symbol{ {"Server.ConnState", Field, 3}, {"Server.DisableGeneralOptionsHandler", Field, 20}, {"Server.ErrorLog", Field, 3}, -<<<<<<< HEAD {"Server.HTTP2", Field, 24}, {"Server.Handler", Field, 0}, {"Server.IdleTimeout", Field, 8}, {"Server.MaxHeaderBytes", Field, 0}, {"Server.Protocols", Field, 24}, -======= - {"Server.Handler", Field, 0}, - {"Server.IdleTimeout", Field, 8}, - {"Server.MaxHeaderBytes", Field, 0}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Server.ReadHeaderTimeout", Field, 8}, {"Server.ReadTimeout", Field, 0}, {"Server.TLSConfig", Field, 0}, @@ -8785,20 +8626,14 @@ var PackageSymbols = map[string][]Symbol{ {"Transport.ExpectContinueTimeout", Field, 6}, {"Transport.ForceAttemptHTTP2", Field, 13}, {"Transport.GetProxyConnectHeader", Field, 16}, -<<<<<<< HEAD {"Transport.HTTP2", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Transport.IdleConnTimeout", Field, 7}, {"Transport.MaxConnsPerHost", Field, 11}, {"Transport.MaxIdleConns", Field, 7}, {"Transport.MaxIdleConnsPerHost", Field, 0}, {"Transport.MaxResponseHeaderBytes", Field, 7}, {"Transport.OnProxyConnectResponse", Field, 20}, -<<<<<<< HEAD {"Transport.Protocols", Field, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Transport.Proxy", Field, 0}, {"Transport.ProxyConnectHeader", Field, 8}, {"Transport.ReadBufferSize", Field, 13}, @@ -8986,11 +8821,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*AddrPort).UnmarshalText", Method, 18}, {"(*Prefix).UnmarshalBinary", Method, 18}, {"(*Prefix).UnmarshalText", Method, 18}, -<<<<<<< HEAD {"(Addr).AppendBinary", Method, 24}, {"(Addr).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Addr).AppendTo", Method, 18}, {"(Addr).As16", Method, 18}, {"(Addr).As4", Method, 18}, @@ -9021,11 +8853,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Addr).WithZone", Method, 18}, {"(Addr).Zone", Method, 18}, {"(AddrPort).Addr", Method, 18}, -<<<<<<< HEAD {"(AddrPort).AppendBinary", Method, 24}, {"(AddrPort).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(AddrPort).AppendTo", Method, 18}, {"(AddrPort).Compare", Method, 22}, {"(AddrPort).IsValid", Method, 18}, @@ -9034,11 +8863,8 @@ var PackageSymbols = map[string][]Symbol{ {"(AddrPort).Port", Method, 18}, {"(AddrPort).String", Method, 18}, {"(Prefix).Addr", Method, 18}, -<<<<<<< HEAD {"(Prefix).AppendBinary", Method, 24}, {"(Prefix).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Prefix).AppendTo", Method, 18}, {"(Prefix).Bits", Method, 18}, {"(Prefix).Contains", Method, 18}, @@ -9223,10 +9049,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Error).Temporary", Method, 6}, {"(*Error).Timeout", Method, 6}, {"(*Error).Unwrap", Method, 13}, -<<<<<<< HEAD {"(*URL).AppendBinary", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*URL).EscapedFragment", Method, 15}, {"(*URL).EscapedPath", Method, 5}, {"(*URL).Hostname", Method, 8}, @@ -9326,7 +9149,6 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, -<<<<<<< HEAD {"(*Root).Close", Method, 24}, {"(*Root).Create", Method, 24}, {"(*Root).FS", Method, 24}, @@ -9338,8 +9160,6 @@ var PackageSymbols = map[string][]Symbol{ {"(*Root).OpenRoot", Method, 24}, {"(*Root).Remove", Method, 24}, {"(*Root).Stat", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*SyscallError).Error", Method, 0}, {"(*SyscallError).Timeout", Method, 10}, {"(*SyscallError).Unwrap", Method, 13}, @@ -9433,11 +9253,8 @@ var PackageSymbols = map[string][]Symbol{ {"O_WRONLY", Const, 0}, {"Open", Func, 0}, {"OpenFile", Func, 0}, -<<<<<<< HEAD {"OpenInRoot", Func, 24}, {"OpenRoot", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"PathError", Type, 0}, {"PathError.Err", Field, 0}, {"PathError.Op", Field, 0}, @@ -9459,10 +9276,7 @@ var PackageSymbols = map[string][]Symbol{ {"Remove", Func, 0}, {"RemoveAll", Func, 0}, {"Rename", Func, 0}, -<<<<<<< HEAD {"Root", Type, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"SEEK_CUR", Const, 0}, {"SEEK_END", Const, 0}, {"SEEK_SET", Const, 0}, @@ -9804,10 +9618,7 @@ var PackageSymbols = map[string][]Symbol{ {"Zero", Func, 0}, }, "regexp": { -<<<<<<< HEAD {"(*Regexp).AppendText", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*Regexp).Copy", Method, 6}, {"(*Regexp).Expand", Method, 0}, {"(*Regexp).ExpandString", Method, 0}, @@ -9988,11 +9799,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*StackRecord).Stack", Method, 0}, {"(*TypeAssertionError).Error", Method, 0}, {"(*TypeAssertionError).RuntimeError", Method, 0}, -<<<<<<< HEAD {"(Cleanup).Stop", Method, 24}, {"AddCleanup", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"BlockProfile", Func, 1}, {"BlockProfileRecord", Type, 1}, {"BlockProfileRecord.Count", Field, 1}, @@ -10003,10 +9811,7 @@ var PackageSymbols = map[string][]Symbol{ {"Caller", Func, 0}, {"Callers", Func, 0}, {"CallersFrames", Func, 7}, -<<<<<<< HEAD {"Cleanup", Type, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Compiler", Const, 0}, {"Error", Type, 0}, {"Frame", Type, 7}, @@ -10369,11 +10174,8 @@ var PackageSymbols = map[string][]Symbol{ {"EqualFold", Func, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, -<<<<<<< HEAD {"FieldsFuncSeq", Func, 24}, {"FieldsSeq", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -10386,10 +10188,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, -<<<<<<< HEAD {"Lines", Func, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Map", Func, 0}, {"NewReader", Func, 0}, {"NewReplacer", Func, 0}, @@ -10401,13 +10200,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, -<<<<<<< HEAD {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, {"SplitSeq", Func, 24}, -======= - {"SplitN", Func, 0}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -16823,13 +16618,9 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0}, }, "testing": { -<<<<<<< HEAD {"(*B).Chdir", Method, 24}, {"(*B).Cleanup", Method, 14}, {"(*B).Context", Method, 24}, -======= - {"(*B).Cleanup", Method, 14}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*B).Elapsed", Method, 20}, {"(*B).Error", Method, 0}, {"(*B).Errorf", Method, 0}, @@ -16841,10 +16632,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Helper", Method, 9}, {"(*B).Log", Method, 0}, {"(*B).Logf", Method, 0}, -<<<<<<< HEAD {"(*B).Loop", Method, 24}, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*B).Name", Method, 8}, {"(*B).ReportAllocs", Method, 1}, {"(*B).ReportMetric", Method, 13}, @@ -16862,13 +16650,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0}, {"(*B).TempDir", Method, 15}, {"(*F).Add", Method, 18}, -<<<<<<< HEAD {"(*F).Chdir", Method, 24}, {"(*F).Cleanup", Method, 18}, {"(*F).Context", Method, 24}, -======= - {"(*F).Cleanup", Method, 18}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*F).Error", Method, 18}, {"(*F).Errorf", Method, 18}, {"(*F).Fail", Method, 18}, @@ -16889,13 +16673,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18}, {"(*M).Run", Method, 4}, {"(*PB).Next", Method, 3}, -<<<<<<< HEAD {"(*T).Chdir", Method, 24}, {"(*T).Cleanup", Method, 14}, {"(*T).Context", Method, 24}, -======= - {"(*T).Cleanup", Method, 14}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(*T).Deadline", Method, 15}, {"(*T).Error", Method, 0}, {"(*T).Errorf", Method, 0}, @@ -17386,13 +17166,9 @@ var PackageSymbols = map[string][]Symbol{ {"(Time).Add", Method, 0}, {"(Time).AddDate", Method, 0}, {"(Time).After", Method, 0}, -<<<<<<< HEAD {"(Time).AppendBinary", Method, 24}, {"(Time).AppendFormat", Method, 5}, {"(Time).AppendText", Method, 24}, -======= - {"(Time).AppendFormat", Method, 5}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) {"(Time).Before", Method, 0}, {"(Time).Clock", Method, 0}, {"(Time).Compare", Method, 20}, @@ -17866,12 +17642,9 @@ var PackageSymbols = map[string][]Symbol{ {"String", Func, 0}, {"StringData", Func, 0}, }, -<<<<<<< HEAD "weak": { {"(Pointer).Value", Method, 24}, {"Make", Func, 24}, {"Pointer", Type, 24}, }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 52424273e0..cdae2b8e81 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -66,78 +66,3 @@ func IsTypeParam(t types.Type) bool { _, ok := types.Unalias(t).(*types.TypeParam) return ok } -<<<<<<< HEAD -======= - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantiation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { - V = types.Unalias(V) - T = types.Unalias(T) - - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := VN.TypeParams() - ttparams := TN.TypeParams() - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = types.NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := types.Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := types.Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index 5a2181d38d..e54accc69a 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -11,11 +11,8 @@ import ( // ReceiverNamed returns the named type (if any) associated with the // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. -<<<<<<< HEAD // // The named result may be nil in ill-typed code. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() if ptr, ok := types.Unalias(t).(*types.Pointer); ok { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 1c03e05d9c..a93d51f988 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -82,10 +82,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { type NamedOrAlias interface { types.Type Obj() *types.TypeName -<<<<<<< HEAD // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // TypeParams is a light shim around t.TypeParams(). diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go deleted file mode 100644 index 179063d484..0000000000 --- a/vendor/golang.org/x/tools/internal/versions/constraint.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -import "go/build/constraint" - -// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). -// Otherwise nil. -// -// Deprecate once x/tools is after go1.21. -var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go deleted file mode 100644 index 38011407d5..0000000000 --- a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package versions - -import "go/build/constraint" - -func init() { - ConstraintGoVersion = constraint.GoVersion -} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 8619965f6a..c3e8a4f591 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -145,27 +145,13 @@ func CheckResponse(res *http.Response) error { } slurp, err := io.ReadAll(res.Body) if err == nil { -<<<<<<< HEAD return CheckResponseWithBody(res, slurp) -======= - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - jerr.Error.Header = res.Header - return jerr.Error - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return &Error{ Code: res.StatusCode, Body: string(slurp), Header: res.Header, } -<<<<<<< HEAD } @@ -193,8 +179,6 @@ func CheckResponseWithBody(res *http.Response, body []byte) error { Body: string(body), Header: res.Header, } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // IsNotModified reports whether err is the result of the @@ -234,7 +218,6 @@ var WithDataWrapper = MarshalStyle(true) // WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. var WithoutDataWrapper = MarshalStyle(false) -<<<<<<< HEAD // JSONReader is like JSONBuffer, but returns an io.Reader instead. func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { buf, err := wrap.JSONBuffer(v) @@ -246,9 +229,6 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { // JSONBuffer encodes the body and wraps it if needed. func (wrap MarshalStyle) JSONBuffer(v interface{}) (*bytes.Buffer, error) { -======= -func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) buf := new(bytes.Buffer) if wrap { buf.Write([]byte(`{"data": `)) diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index d09915d639..559cab1385 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC. -======= -// Copyright 2024 Google LLC. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -61,19 +57,13 @@ import ( "errors" "fmt" "io" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strconv" "strings" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -97,10 +87,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version -<<<<<<< HEAD var _ = internallog.New -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) const apiId = "iamcredentials:v1" const apiName = "iamcredentials" @@ -131,12 +118,8 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } -<<<<<<< HEAD s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} s.Projects = NewProjectsService(s) -======= - s, err := New(client) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -155,21 +138,12 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } -<<<<<<< HEAD return NewService(context.Background(), option.WithHTTPClient(client)) -======= - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type Service struct { client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -553,12 +527,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateaccesstokenrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -574,10 +543,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -613,17 +579,11 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -674,12 +634,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateidtokenrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -695,10 +650,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*htt googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -734,17 +686,11 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -803,19 +749,11 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) ( if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/allowedLocations") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -823,10 +761,7 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -862,17 +797,11 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Do(opts ...googleapi.Ca }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -923,12 +852,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signblobrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -944,10 +868,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -983,17 +904,11 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -1044,12 +959,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signjwtrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -1065,10 +975,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -1104,16 +1011,10 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } diff --git a/vendor/google.golang.org/api/idtoken/idtoken.go b/vendor/google.golang.org/api/idtoken/idtoken.go index 057a874b09..c8bf7c9b1f 100644 --- a/vendor/google.golang.org/api/idtoken/idtoken.go +++ b/vendor/google.golang.org/api/idtoken/idtoken.go @@ -113,10 +113,7 @@ func newTokenSourceNewAuth(ctx context.Context, audience string, ds *internal.Di CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, Client: oauth2.NewClient(ctx, nil), -<<<<<<< HEAD Logger: ds.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index e842b631ab..86861e2438 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -139,10 +139,7 @@ func detectDefaultFromDialSettings(settings *DialSettings) (*auth.Credentials, e CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, -<<<<<<< HEAD Logger: settings.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) } diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 37f4859dc1..8c7435de3e 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -200,12 +200,9 @@ func (mi *MediaInfo) UploadType() string { // UploadRequest sets up an HTTP request for media upload. It adds headers // as necessary, and returns a replacement for the body and a function for http.Request.GetBody. func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { -<<<<<<< HEAD if body == nil { body = new(bytes.Buffer) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cleanup = func() {} if mi == nil { return body, nil, cleanup diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 9e7d3f044c..d74fe2a299 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -164,11 +164,8 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // and calls the returned functions after the request returns (see send.go). // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. -<<<<<<< HEAD // Upload does not parse the response into the error on a non 200 response; // it is the caller's responsibility to call resp.Body.Close. -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { // There are a couple of cases where it's possible for err and resp to both @@ -261,7 +258,6 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rCtx, cancel = context.WithTimeout(ctx, rx.ChunkTransferTimeout) } -<<<<<<< HEAD // We close the response's body here, since we definitely will not // return `resp` now. If we close it before the select case above, a // timer may fire and cause us to return a response with a closed body @@ -274,8 +270,6 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err io.Copy(io.Discard, resp.Body) resp.Body.Close() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) resp, err = rx.transferChunk(rCtx) var status int @@ -302,21 +296,11 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rx.attempts++ pause = bo.Pause() -<<<<<<< HEAD -======= - if resp != nil && resp.Body != nil { - resp.Body.Close() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. if statusResumeIncomplete(resp) { -<<<<<<< HEAD -======= - resp.Body.Close() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) continue } diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 79da7b37dd..1c91f147ab 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,10 +9,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "io" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "strings" "time" @@ -226,7 +223,6 @@ func DecodeResponse(target interface{}, res *http.Response) error { } return json.NewDecoder(res.Body).Decode(target) } -<<<<<<< HEAD // DecodeResponseBytes decodes the body of res into target and returns bytes read // from the body. If there is no body, target is unchanged. @@ -243,5 +239,3 @@ func DecodeResponseBytes(target interface{}, res *http.Response) ([]byte, error) } return b, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index d95ca43c60..fad0d7dbf9 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,8 +5,4 @@ package internal // Version is the current tagged release of the library. -<<<<<<< HEAD const Version = "0.219.0" -======= -const Version = "0.210.0" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 4bb1fd4b7d..e3321ca4a6 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -44,7 +44,6 @@ func (w withCredFile) Apply(o *internal.DialSettings) { // WithCredentialsFile returns a ClientOption that authenticates // API calls with the given service account or refresh token JSON // credentials file. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -53,8 +52,6 @@ func (w withCredFile) Apply(o *internal.DialSettings) { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func WithCredentialsFile(filename string) ClientOption { return withCredFile(filename) } @@ -62,7 +59,6 @@ func WithCredentialsFile(filename string) ClientOption { // WithServiceAccountFile returns a ClientOption that uses a Google service // account credentials file to authenticate. // -<<<<<<< HEAD // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google // Cloud Platform, you must validate it before providing it to any Google @@ -71,8 +67,6 @@ func WithCredentialsFile(filename string) ClientOption { // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Deprecated: Use WithCredentialsFile instead. func WithServiceAccountFile(filename string) ClientOption { return WithCredentialsFile(filename) @@ -81,7 +75,6 @@ func WithServiceAccountFile(filename string) ClientOption { // WithCredentialsJSON returns a ClientOption that authenticates // API calls with the given service account or refresh token JSON // credentials. -<<<<<<< HEAD // // Important: If you accept a credential configuration (credential // JSON/File/Stream) from an external source for authentication to Google @@ -90,8 +83,6 @@ func WithServiceAccountFile(filename string) ClientOption { // Google APIs can compromise the security of your systems and data. For // more information, refer to [Validate credential configurations from // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func WithCredentialsJSON(p []byte) ClientOption { return withCredentialsJSON(p) } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index a9813bb6c0..992c4c0145 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -93,11 +93,7 @@ "location": "us-west4" } ], -<<<<<<< HEAD "etag": "\"3133343838373034343130353038353234313337\"", -======= - "etag": "\"3134393437363236373436353839383934323639\"", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3228,7 +3224,6 @@ ], "supportsSubscription": true }, -<<<<<<< HEAD "move": { "description": "Moves the source object to the destination object in the same bucket.", "httpMethod": "POST", @@ -3321,8 +3316,6 @@ "https://www.googleapis.com/auth/devstorage.read_write" ] }, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "patch": { "description": "Patches an object's metadata.", "httpMethod": "PATCH", @@ -4371,11 +4364,7 @@ } } }, -<<<<<<< HEAD "revision": "20241206", -======= - "revision": "20241113", ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "rootUrl": "https://storage.googleapis.com/", "schemas": { "AdvanceRelocateBucketOperationRequest": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 0a5a3e6bf5..89f08a8d98 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,8 +1,4 @@ -<<<<<<< HEAD // Copyright 2025 Google LLC. -======= -// Copyright 2024 Google LLC. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -68,20 +64,14 @@ import ( "errors" "fmt" "io" -<<<<<<< HEAD "log/slog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net/http" "net/url" "strconv" "strings" "github.com/googleapis/gax-go/v2" -<<<<<<< HEAD "github.com/googleapis/gax-go/v2/internallog" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -105,10 +95,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version -<<<<<<< HEAD var _ = internallog.New -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var _ = gax.Version const apiId = "storage:v1" @@ -155,7 +142,6 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } -<<<<<<< HEAD s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} s.AnywhereCaches = NewAnywhereCachesService(s) s.BucketAccessControls = NewBucketAccessControlsService(s) @@ -169,9 +155,6 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err s.Objects = NewObjectsService(s) s.Operations = NewOperationsService(s) s.Projects = NewProjectsService(s) -======= - s, err := New(client) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -190,32 +173,12 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } -<<<<<<< HEAD return NewService(context.Background(), option.WithHTTPClient(client)) -======= - s := &Service{client: client, BasePath: basePath} - s.AnywhereCaches = NewAnywhereCachesService(s) - s.BucketAccessControls = NewBucketAccessControlsService(s) - s.Buckets = NewBucketsService(s) - s.Channels = NewChannelsService(s) - s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) - s.Folders = NewFoldersService(s) - s.ManagedFolders = NewManagedFoldersService(s) - s.Notifications = NewNotificationsService(s) - s.ObjectAccessControls = NewObjectAccessControlsService(s) - s.Objects = NewObjectsService(s) - s.Operations = NewOperationsService(s) - s.Projects = NewProjectsService(s) - return s, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type Service struct { client *http.Client -<<<<<<< HEAD logger *slog.Logger -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -2932,19 +2895,11 @@ func (c *AnywhereCachesDisableCall) Header() http.Header { func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -2953,10 +2908,7 @@ func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -2991,17 +2943,11 @@ func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereC }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3062,19 +3008,11 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3083,10 +3021,7 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3121,17 +3056,11 @@ func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3179,12 +3108,7 @@ func (c *AnywhereCachesInsertCall) Header() http.Header { func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3200,10 +3124,7 @@ func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3239,17 +3160,11 @@ func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3322,19 +3237,11 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3342,10 +3249,7 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3380,17 +3284,11 @@ func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCach }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3460,19 +3358,11 @@ func (c *AnywhereCachesPauseCall) Header() http.Header { func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3481,10 +3371,7 @@ func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3519,17 +3406,11 @@ func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCac }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3578,19 +3459,11 @@ func (c *AnywhereCachesResumeCall) Header() http.Header { func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3599,10 +3472,7 @@ func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3637,17 +3507,11 @@ func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCa }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3699,12 +3563,7 @@ func (c *AnywhereCachesUpdateCall) Header() http.Header { func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3721,10 +3580,7 @@ func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3760,17 +3616,11 @@ func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -3829,19 +3679,11 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3850,10 +3692,7 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3868,10 +3707,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -3941,19 +3777,11 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -3962,10 +3790,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4001,17 +3826,11 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4066,12 +3885,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4087,10 +3901,7 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4126,17 +3937,11 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4201,19 +4006,11 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4221,10 +4018,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4260,17 +4054,11 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4330,12 +4118,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4352,10 +4135,7 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4391,17 +4171,11 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4461,12 +4235,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4483,10 +4252,7 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4522,17 +4288,11 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4601,19 +4361,11 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4621,10 +4373,7 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4639,10 +4388,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -4752,19 +4498,11 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4772,10 +4510,7 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4810,17 +4545,11 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -4894,19 +4623,11 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -4914,10 +4635,7 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4952,17 +4670,11 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5029,19 +4741,11 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5049,10 +4753,7 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5088,17 +4789,11 @@ func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketS }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5229,12 +4924,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5247,10 +4937,7 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5285,17 +4972,11 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5402,27 +5083,16 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } req.Header = reqHeaders -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5457,17 +5127,11 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5544,19 +5208,11 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5564,10 +5220,7 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5602,17 +5255,11 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5754,12 +5401,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5775,10 +5417,7 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.patch", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5813,17 +5452,11 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.patch", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -5872,12 +5505,7 @@ func (c *BucketsRelocateCall) Header() http.Header { func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.relocatebucketrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -5893,10 +5521,7 @@ func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5932,17 +5557,11 @@ func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunni }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6009,19 +5628,11 @@ func (c *BucketsRestoreCall) Header() http.Header { func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6029,10 +5640,7 @@ func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.restore", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6067,17 +5675,11 @@ func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.restore", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6132,12 +5734,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6153,10 +5750,7 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6191,17 +5785,11 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6269,19 +5857,11 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6289,10 +5869,7 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6328,17 +5905,11 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6480,12 +6051,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6501,10 +6067,7 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6539,17 +6102,11 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6593,12 +6150,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6611,10 +6163,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.channels.stop", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6629,10 +6178,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.channels.stop", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -6691,19 +6237,11 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6712,10 +6250,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6730,10 +6265,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -6804,19 +6336,11 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6825,10 +6349,7 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6864,17 +6385,11 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -6929,12 +6444,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -6950,10 +6460,7 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6989,17 +6496,11 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7080,19 +6581,11 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7100,10 +6593,7 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7139,17 +6629,11 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7209,12 +6693,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7231,10 +6710,7 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7270,17 +6746,11 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7340,12 +6810,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7362,10 +6827,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7401,17 +6863,11 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7476,19 +6932,11 @@ func (c *FoldersDeleteCall) Header() http.Header { func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7497,10 +6945,7 @@ func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7515,10 +6960,7 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -7597,19 +7039,11 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7618,10 +7052,7 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7656,17 +7087,11 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7722,12 +7147,7 @@ func (c *FoldersInsertCall) Header() http.Header { func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.folder) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7743,10 +7163,7 @@ func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7781,17 +7198,11 @@ func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -7899,19 +7310,11 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -7919,10 +7322,7 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7957,17 +7357,11 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8057,19 +7451,11 @@ func (c *FoldersRenameCall) Header() http.Header { func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8079,10 +7465,7 @@ func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { "sourceFolder": c.sourceFolder, "destinationFolder": c.destinationFolder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.rename", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8118,17 +7501,11 @@ func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.rename", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8202,19 +7579,11 @@ func (c *ManagedFoldersDeleteCall) Header() http.Header { func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8223,10 +7592,7 @@ func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "managedFolder": c.managedFolder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8241,10 +7607,7 @@ func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -8322,19 +7685,11 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8343,10 +7698,7 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "managedFolder": c.managedFolder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8381,17 +7733,11 @@ func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8468,19 +7814,11 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8489,10 +7827,7 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8527,17 +7862,11 @@ func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8585,12 +7914,7 @@ func (c *ManagedFoldersInsertCall) Header() http.Header { func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.managedfolder) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8606,10 +7930,7 @@ func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8644,17 +7965,11 @@ func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFol }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8733,19 +8048,11 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8753,10 +8060,7 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8791,17 +8095,11 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -8880,12 +8178,7 @@ func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -8902,10 +8195,7 @@ func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8940,17 +8230,11 @@ func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9021,19 +8305,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9042,10 +8318,7 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "managedFolder": c.managedFolder, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9081,17 +8354,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9147,19 +8414,11 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9168,10 +8427,7 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "notification": c.notification, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9186,10 +8442,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -9257,19 +8510,11 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9278,10 +8523,7 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "notification": c.notification, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9316,17 +8558,11 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9381,12 +8617,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.notification) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9402,10 +8633,7 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9440,17 +8668,11 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9515,19 +8737,11 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9535,10 +8749,7 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9573,17 +8784,11 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9655,19 +8860,11 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9677,10 +8874,7 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9695,10 +8889,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -9781,19 +8972,11 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9803,10 +8986,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err "object": c.object, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9842,17 +9022,11 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -9920,12 +9094,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -9942,10 +9111,7 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9981,17 +9147,11 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10069,19 +9229,11 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10090,10 +9242,7 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10129,17 +9278,11 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10212,12 +9355,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10235,10 +9373,7 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "object": c.object, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10274,17 +9409,11 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10357,12 +9486,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10380,10 +9504,7 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10419,17 +9540,11 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10478,12 +9593,7 @@ func (c *ObjectsBulkRestoreCall) Header() http.Header { func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bulkrestoreobjectsrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10499,10 +9609,7 @@ func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10538,17 +9645,11 @@ func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongru }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10668,12 +9769,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.composerequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10690,10 +9786,7 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.compose", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10728,17 +9821,11 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.compose", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -10940,12 +10027,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -10964,10 +10046,7 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.copy", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11002,17 +10081,11 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.copy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11115,19 +10188,11 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11136,10 +10201,7 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11154,10 +10216,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -11302,19 +10361,11 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11323,10 +10374,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11377,17 +10425,11 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11465,19 +10507,11 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11486,10 +10520,7 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11524,17 +10555,11 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -11766,12 +10791,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11782,21 +10802,10 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } -<<<<<<< HEAD newBody, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) defer cleanup() urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, newBody) -======= - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) - defer cleanup() - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -11805,10 +10814,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.insert", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c.retry != nil { return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) } @@ -11864,17 +10870,11 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.insert", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12045,19 +11045,11 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12065,10 +11057,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12103,17 +11092,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12138,7 +11121,6 @@ func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) err } } -<<<<<<< HEAD type ObjectsMoveCall struct { s *Service bucket string @@ -12340,8 +11322,6 @@ func (c *ObjectsMoveCall) Do(opts ...googleapi.CallOption) (*Object, error) { return ret, nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type ObjectsPatchCall struct { s *Service bucket string @@ -12492,12 +11472,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12514,10 +11489,7 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.patch", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12552,17 +11524,11 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.patch", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12687,19 +11653,11 @@ func (c *ObjectsRestoreCall) Header() http.Header { func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -12708,10 +11666,7 @@ func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.restore", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12746,17 +11701,11 @@ func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.restore", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -12980,12 +11929,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13004,10 +11948,7 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13043,17 +11984,11 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13121,12 +12056,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13143,10 +12073,7 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13181,17 +12108,11 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13272,19 +12193,11 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13293,10 +12206,7 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13332,17 +12242,11 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13496,12 +12400,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13518,10 +12417,7 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13556,17 +12452,11 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13702,12 +12592,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13723,10 +12608,7 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13761,17 +12643,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -13826,12 +12702,7 @@ func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header { func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.advancerelocatebucketoperationrequest) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13848,10 +12719,7 @@ func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Respo "bucket": c.bucket, "operationId": c.operationId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13866,10 +12734,7 @@ func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) e if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -13920,19 +12785,11 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -13941,10 +12798,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13959,10 +12813,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -14023,19 +12874,11 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14044,10 +12887,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14083,17 +12923,11 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -14175,19 +13009,11 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14195,10 +13021,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14234,17 +13057,11 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunnin }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -14320,19 +13137,11 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("POST", urls, nil) -======= - req, err := http.NewRequest("POST", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14340,10 +13149,7 @@ func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14378,17 +13184,11 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -14444,19 +13244,11 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("DELETE", urls, nil) -======= - req, err := http.NewRequest("DELETE", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14465,10 +13257,7 @@ func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14483,10 +13272,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "response", internallog.HTTPResponse(res, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -14554,19 +13340,11 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14575,10 +13353,7 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) "projectId": c.projectId, "accessId": c.accessId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14614,17 +13389,11 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -14721,19 +13490,11 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14741,10 +13502,7 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14780,17 +13538,11 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -14872,12 +13624,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) -<<<<<<< HEAD body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.hmackeymetadata) -======= - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -14894,10 +13641,7 @@ func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "request", internallog.HTTPRequest(req, body.Bytes())) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -14933,17 +13677,11 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } @@ -15009,19 +13747,11 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } -<<<<<<< HEAD -======= - var body io.Reader = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") urls += "?" + c.urlParams_.Encode() -<<<<<<< HEAD req, err := http.NewRequest("GET", urls, nil) -======= - req, err := http.NewRequest("GET", urls, body) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, err } @@ -15029,10 +13759,7 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) -<<<<<<< HEAD c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "request", internallog.HTTPRequest(req, nil)) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -15067,16 +13794,10 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi }, } target := &ret -<<<<<<< HEAD b, err := gensupport.DecodeResponseBytes(target, res) if err != nil { return nil, err } c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "response", internallog.HTTPResponse(res, b)) -======= - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return ret, nil } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index db57bc8a31..a354d223d3 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -22,10 +22,6 @@ import ( "cloud.google.com/go/auth/grpctransport" "cloud.google.com/go/auth/oauth2adapt" "cloud.google.com/go/compute/metadata" -<<<<<<< HEAD -======= - "go.opencensus.io/plugin/ocgrpc" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "golang.org/x/oauth2" "golang.org/x/time/rate" @@ -239,10 +235,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, -<<<<<<< HEAD Logger: ds.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, @@ -256,10 +249,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna SkipValidation: skipValidation, }, UniverseDomain: ds.UniverseDomain, -<<<<<<< HEAD Logger: ds.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) return pool, err } @@ -396,10 +386,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. -<<<<<<< HEAD -======= - grpcOpts = addOCStatsHandler(grpcOpts, o) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { @@ -409,16 +395,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C return dialContext(ctx, endpoint, grpcOpts...) } -<<<<<<< HEAD -======= -func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { - if settings.TelemetryDisabled { - return opts - } - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { if settings.TelemetryDisabled { return opts diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 480c8fa48b..6b7ea74ba4 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -19,10 +19,6 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/oauth2adapt" -<<<<<<< HEAD -======= - "go.opencensus.io/plugin/ochttp" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" @@ -30,10 +26,6 @@ import ( "google.golang.org/api/internal" "google.golang.org/api/internal/cert" "google.golang.org/api/option" -<<<<<<< HEAD -======= - "google.golang.org/api/transport/http/internal/propagation" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -127,10 +119,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, -<<<<<<< HEAD Logger: ds.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, @@ -141,10 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. SkipValidation: skipValidation, }, UniverseDomain: ds.UniverseDomain, -<<<<<<< HEAD Logger: ds.Logger, -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) if err != nil { return nil, err @@ -179,14 +165,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport -<<<<<<< HEAD trans = addOpenTelemetryTransport(trans, settings) -======= - // Give OpenTelemetry precedence over OpenCensus in case user configuration - // causes both to write the same header (`X-Cloud-Trace-Context`). - trans = addOpenTelemetryTransport(trans, settings) - trans = addOCTransport(trans, settings) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch { case settings.NoAuth: // Do nothing. @@ -327,19 +306,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialS return otelhttp.NewTransport(trans) } -<<<<<<< HEAD -======= -func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { - if settings.TelemetryDisabled { - return trans - } - return &ochttp.Transport{ - Base: trans, - Propagation: &propagation.HTTPFormat{}, - } -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // clonedTransport returns the given RoundTripper as a cloned *http.Transport. // It returns nil if the RoundTripper can't be cloned or coerced to // *http.Transport. diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go deleted file mode 100644 index ba7512aa26..0000000000 --- a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.8 -// +build go1.8 - -// Package propagation implements X-Cloud-Trace-Context header propagation used -// by Google Cloud products. -package propagation - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - httpHeader = `X-Cloud-Trace-Context` -) - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(httpHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(httpHeader, header) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 48aa96659b..4a9fce53c4 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,11 +180,8 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` -<<<<<<< HEAD // Configuration for which RPCs should be generated in the GAPIC client. SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *CommonLanguageSettings) Reset() { @@ -234,7 +231,6 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } -<<<<<<< HEAD func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { if x != nil { return x.SelectiveGapicGeneration @@ -242,8 +238,6 @@ func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGe return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -999,7 +993,6 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` -<<<<<<< HEAD // Map of service names to renamed services. Keys are the package relative // service names and values are the name to be used for the service client // and call options. @@ -1010,8 +1003,6 @@ type GoSettings struct { // renamed_services: // Publisher: TopicAdmin RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GoSettings) Reset() { @@ -1053,7 +1044,6 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } -<<<<<<< HEAD func (x *GoSettings) GetRenamedServices() map[string]string { if x != nil { return x.RenamedServices @@ -1061,8 +1051,6 @@ func (x *GoSettings) GetRenamedServices() map[string]string { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1161,7 +1149,6 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } -<<<<<<< HEAD // This message is used to configure the generation of a subset of the RPCs in // a service for client libraries. type SelectiveGapicGeneration struct { @@ -1213,8 +1200,6 @@ func (x *SelectiveGapicGeneration) GetMethods() []string { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1228,24 +1213,17 @@ type PythonSettings_ExperimentalFeatures struct { // This feature will be enabled by default 1 month after launching the // feature in preview packages. RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` -<<<<<<< HEAD // Enables generation of protobuf code using new types that are more // Pythonic which are included in `protobuf>=5.29.x`. This feature will be // enabled by default 1 month after launching the feature in preview // packages. ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { -<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[14] -======= - mi := &file_google_api_client_proto_msgTypes[13] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1258,11 +1236,7 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[14] -======= - mi := &file_google_api_client_proto_msgTypes[13] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1285,7 +1259,6 @@ func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { return false } -<<<<<<< HEAD func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { if x != nil { return x.ProtobufPythonicTypesEnabled @@ -1293,8 +1266,6 @@ func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() return false } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1323,11 +1294,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { -<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[18] -======= - mi := &file_google_api_client_proto_msgTypes[16] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1340,11 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { -<<<<<<< HEAD mi := &file_google_api_client_proto_msgTypes[18] -======= - mi := &file_google_api_client_proto_msgTypes[16] ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1532,11 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, -<<<<<<< HEAD 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, -======= - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1545,7 +1504,6 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, -<<<<<<< HEAD 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, @@ -1722,113 +1680,10 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, -======= - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, -<<<<<<< HEAD 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, @@ -1918,149 +1773,6 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, - 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, - 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, - 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, - 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, - 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, - 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, - 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, - 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, - 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, - 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, - 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, - 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, - 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, - 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, - 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, - 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, - 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, - 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, - 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, - 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -2076,11 +1788,7 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -<<<<<<< HEAD var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -======= -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination @@ -2096,7 +1804,6 @@ var file_google_api_client_proto_goTypes = []interface{}{ (*RubySettings)(nil), // 11: google.api.RubySettings (*GoSettings)(nil), // 12: google.api.GoSettings (*MethodSettings)(nil), // 13: google.api.MethodSettings -<<<<<<< HEAD (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures @@ -2150,57 +1857,6 @@ var file_google_api_client_proto_depIdxs = []int32{ 35, // [35:35] is the sub-list for extension type_name 31, // [31:35] is the sub-list for extension extendee 0, // [0:31] is the sub-list for field type_name -======= - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures - nil, // 16: google.api.DotnetSettings.RenamedServicesEntry - nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 19: google.api.LaunchStage - (*durationpb.Duration)(nil), // 20: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions -} -var file_google_api_client_proto_depIdxs = []int32{ - 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures - 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 29, // [29:33] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_api_client_proto_init() } @@ -2353,7 +2009,6 @@ func file_google_api_client_proto_init() { return nil } } -<<<<<<< HEAD file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SelectiveGapicGeneration); i { case 0: @@ -2367,9 +2022,6 @@ func file_google_api_client_proto_init() { } } file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { -======= - file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -2381,11 +2033,7 @@ func file_google_api_client_proto_init() { return nil } } -<<<<<<< HEAD file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { -======= - file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -2404,11 +2052,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, -<<<<<<< HEAD NumMessages: 19, -======= - NumMessages: 17, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 1ea48cd118..f388426b08 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,23 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, -<<<<<<< HEAD 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, -======= - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, -<<<<<<< HEAD 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index 5da71ecec8..7f6e006cde 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -172,7 +172,6 @@ func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { return file_google_api_metric_proto_rawDescGZIP(), []int{0, 1} } -<<<<<<< HEAD // The resource hierarchy level of the timeseries data of a metric. type MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel int32 @@ -230,8 +229,6 @@ func (MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel return file_google_api_metric_proto_rawDescGZIP(), []int{0, 0, 0} } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Defines a metric type and its schema. Once a metric descriptor is created, // deleting or altering it stops data collection and makes the metric type's // existing data unusable. @@ -579,11 +576,8 @@ type MetricDescriptor_MetricDescriptorMetadata struct { // age are guaranteed to be ingested and available to be read, excluding // data loss due to errors. IngestDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=ingest_delay,json=ingestDelay,proto3" json:"ingest_delay,omitempty"` -<<<<<<< HEAD // The scope of the timeseries data of the metric. TimeSeriesResourceHierarchyLevel []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel `protobuf:"varint,4,rep,packed,name=time_series_resource_hierarchy_level,json=timeSeriesResourceHierarchyLevel,proto3,enum=google.api.MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel" json:"time_series_resource_hierarchy_level,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MetricDescriptor_MetricDescriptorMetadata) Reset() { @@ -640,7 +634,6 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetIngestDelay() *durationpb return nil } -<<<<<<< HEAD func (x *MetricDescriptor_MetricDescriptorMetadata) GetTimeSeriesResourceHierarchyLevel() []MetricDescriptor_MetricDescriptorMetadata_TimeSeriesResourceHierarchyLevel { if x != nil { return x.TimeSeriesResourceHierarchyLevel @@ -648,8 +641,6 @@ func (x *MetricDescriptor_MetricDescriptorMetadata) GetTimeSeriesResourceHierarc return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var File_google_api_metric_proto protoreflect.FileDescriptor var file_google_api_metric_proto_rawDesc = []byte{ @@ -660,11 +651,7 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, -<<<<<<< HEAD 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x09, 0x0a, -======= - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x07, 0x0a, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x10, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, @@ -699,11 +686,7 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, -<<<<<<< HEAD 0x73, 0x1a, 0x87, 0x04, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, -======= - 0x73, 0x1a, 0xd8, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, @@ -716,7 +699,6 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, -<<<<<<< HEAD 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0xa6, 0x01, 0x0a, 0x24, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x79, 0x5f, @@ -765,37 +747,6 @@ var file_google_api_metric_proto_rawDesc = []byte{ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x0b, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x4f, 0x0a, 0x0a, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x45, - 0x54, 0x52, 0x49, 0x43, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, - 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x02, 0x12, 0x0e, 0x0a, - 0x0a, 0x43, 0x55, 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x71, 0x0a, - 0x09, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x56, 0x41, - 0x4c, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, - 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, - 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x49, 0x53, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, - 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x4f, 0x4e, 0x45, 0x59, 0x10, 0x06, - 0x22, 0x8f, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x5f, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x3b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0xa2, 0x02, 0x04, 0x47, - 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -810,7 +761,6 @@ func file_google_api_metric_proto_rawDescGZIP() []byte { return file_google_api_metric_proto_rawDescData } -<<<<<<< HEAD var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_google_api_metric_proto_goTypes = []interface{}{ @@ -841,36 +791,6 @@ var file_google_api_metric_proto_depIdxs = []int32{ 10, // [10:10] is the sub-list for extension type_name 10, // [10:10] is the sub-list for extension extendee 0, // [0:10] is the sub-list for field type_name -======= -var file_google_api_metric_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_api_metric_proto_goTypes = []interface{}{ - (MetricDescriptor_MetricKind)(0), // 0: google.api.MetricDescriptor.MetricKind - (MetricDescriptor_ValueType)(0), // 1: google.api.MetricDescriptor.ValueType - (*MetricDescriptor)(nil), // 2: google.api.MetricDescriptor - (*Metric)(nil), // 3: google.api.Metric - (*MetricDescriptor_MetricDescriptorMetadata)(nil), // 4: google.api.MetricDescriptor.MetricDescriptorMetadata - nil, // 5: google.api.Metric.LabelsEntry - (*label.LabelDescriptor)(nil), // 6: google.api.LabelDescriptor - (api.LaunchStage)(0), // 7: google.api.LaunchStage - (*durationpb.Duration)(nil), // 8: google.protobuf.Duration -} -var file_google_api_metric_proto_depIdxs = []int32{ - 6, // 0: google.api.MetricDescriptor.labels:type_name -> google.api.LabelDescriptor - 0, // 1: google.api.MetricDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind - 1, // 2: google.api.MetricDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType - 4, // 3: google.api.MetricDescriptor.metadata:type_name -> google.api.MetricDescriptor.MetricDescriptorMetadata - 7, // 4: google.api.MetricDescriptor.launch_stage:type_name -> google.api.LaunchStage - 5, // 5: google.api.Metric.labels:type_name -> google.api.Metric.LabelsEntry - 7, // 6: google.api.MetricDescriptor.MetricDescriptorMetadata.launch_stage:type_name -> google.api.LaunchStage - 8, // 7: google.api.MetricDescriptor.MetricDescriptorMetadata.sample_period:type_name -> google.protobuf.Duration - 8, // 8: google.api.MetricDescriptor.MetricDescriptorMetadata.ingest_delay:type_name -> google.protobuf.Duration - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_api_metric_proto_init() } @@ -921,11 +841,7 @@ func file_google_api_metric_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_metric_proto_rawDesc, -<<<<<<< HEAD NumEnums: 3, -======= - NumEnums: 2, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) NumMessages: 4, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index fb8540b663..3cd9a5bb8e 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,20 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // -<<<<<<< HEAD // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than // `{"instanceLimit": "100/request"}`, should be returned as, // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of -======= - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -879,7 +871,6 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -<<<<<<< HEAD // The reason of the field-level error. This is a constant value that // identifies the proximate cause of the field-level error. It should // uniquely identify the type of the FieldViolation within the scope of the @@ -890,8 +881,6 @@ type BadRequest_FieldViolation struct { // Provides a localized error message for field-level errors that is safe to // return to the API consumer. LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *BadRequest_FieldViolation) Reset() { @@ -940,7 +929,6 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } -<<<<<<< HEAD func (x *BadRequest_FieldViolation) GetReason() string { if x != nil { return x.Reason @@ -955,8 +943,6 @@ func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { return nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1065,17 +1051,12 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, -<<<<<<< HEAD 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, -======= - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, -<<<<<<< HEAD 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, @@ -1121,47 +1102,6 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -======= - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } var ( @@ -1202,20 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link -<<<<<<< HEAD 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage 7, // [7:7] is the sub-list for method output_type 7, // [7:7] is the sub-list for method input_type 7, // [7:7] is the sub-list for extension type_name 7, // [7:7] is the sub-list for extension extendee 0, // [0:7] is the sub-list for field type_name -======= - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func init() { file_google_rpc_error_details_proto_init() } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 13b0558f27..382ad69411 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,20 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -<<<<<<< HEAD -======= -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -109,60 +95,6 @@ func Get(name string) Builder { return nil } -<<<<<<< HEAD -======= -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully close, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which may be - // called when the Producer is no longer needed. Otherwise the producer - // will automatically be closed upon connection loss or subchannel close. - // Should only be called on a SubConn in state Ready. Otherwise the - // producer will be unable to create streams. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -430,21 +362,6 @@ type ExitIdler interface { ExitIdle() } -<<<<<<< HEAD -======= -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -457,25 +374,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -<<<<<<< HEAD -======= - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Build also returns a close function that will be called when all - // references to the Producer have been given up for a SubConn, or when a - // connectivity state change occurs on the SubConn. The close function - // should always block until all asynchronous cleanup work is completed. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index cae53b4b64..86d495bb62 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,11 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto @@ -59,17 +55,9 @@ type LoadBalanceRequest struct { func (x *LoadBalanceRequest) Reset() { *x = LoadBalanceRequest{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LoadBalanceRequest) String() string { @@ -80,11 +68,7 @@ func (*LoadBalanceRequest) ProtoMessage() {} func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -153,17 +137,9 @@ type InitialLoadBalanceRequest struct { func (x *InitialLoadBalanceRequest) Reset() { *x = InitialLoadBalanceRequest{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InitialLoadBalanceRequest) String() string { @@ -174,11 +150,7 @@ func (*InitialLoadBalanceRequest) ProtoMessage() {} func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -214,17 +186,9 @@ type ClientStatsPerToken struct { func (x *ClientStatsPerToken) Reset() { *x = ClientStatsPerToken{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientStatsPerToken) String() string { @@ -235,11 +199,7 @@ func (*ClientStatsPerToken) ProtoMessage() {} func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -292,17 +252,9 @@ type ClientStats struct { func (x *ClientStats) Reset() { *x = ClientStats{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientStats) String() string { @@ -313,11 +265,7 @@ func (*ClientStats) ProtoMessage() {} func (x *ClientStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -389,17 +337,9 @@ type LoadBalanceResponse struct { func (x *LoadBalanceResponse) Reset() { *x = LoadBalanceResponse{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *LoadBalanceResponse) String() string { @@ -410,11 +350,7 @@ func (*LoadBalanceResponse) ProtoMessage() {} func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -492,17 +428,9 @@ type FallbackResponse struct { func (x *FallbackResponse) Reset() { *x = FallbackResponse{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *FallbackResponse) String() string { @@ -513,11 +441,7 @@ func (*FallbackResponse) ProtoMessage() {} func (x *FallbackResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -545,17 +469,9 @@ type InitialLoadBalanceResponse struct { func (x *InitialLoadBalanceResponse) Reset() { *x = InitialLoadBalanceResponse{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *InitialLoadBalanceResponse) String() string { @@ -566,11 +482,7 @@ func (*InitialLoadBalanceResponse) ProtoMessage() {} func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -606,17 +518,9 @@ type ServerList struct { func (x *ServerList) Reset() { *x = ServerList{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServerList) String() string { @@ -627,11 +531,7 @@ func (*ServerList) ProtoMessage() {} func (x *ServerList) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -681,17 +581,9 @@ type Server struct { func (x *Server) Reset() { *x = Server{} -<<<<<<< HEAD mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Server) String() string { @@ -702,11 +594,7 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -911,119 +799,6 @@ func file_grpc_lb_v1_load_balancer_proto_init() { if File_grpc_lb_v1_load_balancer_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*InitialLoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ClientStatsPerToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ClientStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*FallbackResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*InitialLoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ServerList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 3771f8c93e..9ff07522d7 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,11 +19,7 @@ package grpclb import ( -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" @@ -116,11 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, -<<<<<<< HEAD subConnsNext: rand.IntN(len(readySCs)), -======= - subConnsNext: rand.Intn(len(readySCs)), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -155,11 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, -<<<<<<< HEAD subConnsNext: rand.IntN(len(readySCs)), -======= - subConnsNext: rand.Intn(len(readySCs)), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stats: stats, } } diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go index 85bf145a48..6dede1a40b 100644 --- a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -22,11 +22,7 @@ package leastrequest import ( "encoding/json" "fmt" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "google.golang.org/grpc/balancer" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index 24d5af76eb..7d66cb491c 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,7 +18,6 @@ // Package internal contains code internal to the pickfirst package. package internal -<<<<<<< HEAD import ( rand "math/rand/v2" "time" @@ -34,9 +33,3 @@ var ( return func() { timer.Stop() } } ) -======= -import "math/rand" - -// RandShuffle pseudo-randomizes the order of addresses. -var RandShuffle = rand.Shuffle ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 3add356c78..ea8899818c 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,11 +23,7 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 0d359193e4..76fa5fea95 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,22 +29,15 @@ import ( "encoding/json" "errors" "fmt" -<<<<<<< HEAD "net" "net/netip" "sync" "time" -======= - "sync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" -<<<<<<< HEAD expstats "google.golang.org/grpc/experimental/stats" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -61,7 +54,6 @@ func init() { balancer.Register(pickfirstBuilder{}) } -<<<<<<< HEAD type ( // enableHealthListenerKeyType is a unique key type used in resolver // attributes to indicate whether the health listener usage is enabled. @@ -75,14 +67,11 @@ type ( managedByPickfirstKeyType struct{} ) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. -<<<<<<< HEAD Name = "pick_first_leaf" disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", @@ -136,23 +125,6 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) subConns: resolver.NewAddressMap(), state: connectivity.Connecting, cancelConnectionTimer: func() {}, -======= - Name = "pick_first_leaf" -) - -// TODO: change to pick-first when this becomes the default pick_first policy. -const logPrefix = "[pick-first-leaf-lb %p] " - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - addressList: addressList{}, - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - mu: sync.Mutex{}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -170,7 +142,6 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } -<<<<<<< HEAD // EnableHealthListener updates the state to configure pickfirst for using a // generic health listener. func EnableHealthListener(state resolver.State) resolver.State { @@ -189,8 +160,6 @@ func IsManagedByPickfirst(addr resolver.Address) bool { return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -208,7 +177,6 @@ type scData struct { subConn balancer.SubConn addr resolver.Address -<<<<<<< HEAD rawConnectivityState connectivity.State // The effective connectivity state based on raw connectivity, health state // and after following sticky TransientFailure behaviour defined in A62. @@ -223,16 +191,6 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, addr: addr, -======= - state connectivity.State - lastErr error -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - sd := &scData{ - state: connectivity.Idle, - addr: addr, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -249,20 +207,14 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. -<<<<<<< HEAD logger *internalgrpclog.PrefixLogger cc balancer.ClientConn target string metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil -======= - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. -<<<<<<< HEAD mu sync.Mutex // State reported to the channel based on SubConn states and resolver // updates. @@ -274,15 +226,6 @@ type pickfirstBalancer struct { numTF int cancelConnectionTimer func() healthCheckingEnabled bool -======= - mu sync.Mutex - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ResolverError is called by the ClientConn when the name resolver produces @@ -308,11 +251,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } -<<<<<<< HEAD b.updateBalancerState(balancer.State{ -======= - b.cc.UpdateState(balancer.State{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -321,26 +260,16 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() -<<<<<<< HEAD b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. -======= - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.state = connectivity.TransientFailure ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } -<<<<<<< HEAD b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -363,12 +292,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { -<<<<<<< HEAD -======= - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8305 section 4." - A61 - // TODO: support the above language. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -391,7 +314,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) -<<<<<<< HEAD newAddrs = interleaveAddresses(newAddrs) prevAddr := b.addressList.currentAddress() @@ -403,18 +325,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // If the previous ready SubConn exists in new address list, // keep this connection and don't create new SubConns. if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { -======= - - // Since we have a new set of addresses, we are again at first pass. - b.firstPass = true - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - prevAddr := b.addressList.currentAddress() - prevAddrsCount := b.addressList.size() - b.addressList.updateAddrs(newAddrs) - if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -426,7 +336,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. -<<<<<<< HEAD if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. b.forceUpdateConcludedStateLocked(balancer.State{ @@ -438,20 +347,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. b.startFirstPassLocked() -======= - if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.requestConnectionLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.requestConnectionLocked() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return nil } @@ -466,10 +361,7 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() -<<<<<<< HEAD b.cancelConnectionTimer() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.state = connectivity.Shutdown } @@ -479,7 +371,6 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() -<<<<<<< HEAD if b.state == connectivity.Idle { b.startFirstPassLocked() } @@ -495,14 +386,6 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.requestConnectionLocked() } -======= - if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { - b.firstPass = true - b.requestConnectionLocked() - } -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { sd.(*scData).subConn.Shutdown() @@ -524,7 +407,6 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } -<<<<<<< HEAD // interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) // as per RFC-8305 section 4. // Whichever address family is first in the list is followed by an address of @@ -589,8 +471,6 @@ func addressFamily(address string) ipAddrFamily { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -619,10 +499,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { -<<<<<<< HEAD b.cancelConnectionTimer() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -663,7 +540,6 @@ func (b *pickfirstBalancer) requestConnectionLocked() { } scd := sd.(*scData) -<<<<<<< HEAD switch scd.rawConnectivityState { case connectivity.Idle: scd.subConn.Connect() @@ -720,46 +596,17 @@ func (b *pickfirstBalancer) scheduleNextConnectionLocked() { cancelled = true closeFn() }) -======= - switch scd.state { - case connectivity.Idle: - scd.subConn.Connect() - case connectivity.TransientFailure: - // Try the next address. - lastErr = scd.lastErr - continue - case connectivity.Ready: - // Should never happen. - b.logger.Errorf("Requesting a connection even though we have a READY SubConn") - case connectivity.Shutdown: - // Should never happen. - b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") - case connectivity.Connecting: - // Wait for the SubConn to report success or failure. - } - return - } - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass. - b.endFirstPassLocked(lastErr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() -<<<<<<< HEAD oldState := sd.rawConnectivityState sd.rawConnectivityState = newState.ConnectivityState -======= - oldState := sd.state - sd.state = newState.ConnectivityState ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. -<<<<<<< HEAD if !b.isActiveSCData(sd) { return } @@ -776,16 +623,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if newState.ConnectivityState == connectivity.Ready { connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) -======= - if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - return - } - - if newState.ConnectivityState == connectivity.Ready { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -793,7 +630,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } -<<<<<<< HEAD if !b.healthCheckingEnabled { if b.logger.V(2) { b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) @@ -818,12 +654,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub }) sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { b.updateSubConnHealthState(sd, scs) -======= - b.state = connectivity.Ready - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }) return } @@ -834,7 +664,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. -<<<<<<< HEAD // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second // part of the if condition below once the issue is fixed. if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { @@ -853,15 +682,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) b.addressList.reset() b.updateBalancerState(balancer.State{ -======= - if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - b.state = connectivity.Idle - b.addressList.reset() - b.cc.UpdateState(balancer.State{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -871,29 +691,18 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: -<<<<<<< HEAD // The effective state can be in either IDLE, CONNECTING or // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. if sd.effectiveState != connectivity.TransientFailure { sd.effectiveState = connectivity.Connecting b.updateBalancerState(balancer.State{ -======= - // The balancer can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - // If the balancer is already in CONNECTING, no update is needed. - if b.state == connectivity.Idle { - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError -<<<<<<< HEAD sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from @@ -911,21 +720,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // End the first pass if we've seen a TRANSIENT_FAILURE from all // SubConns once. b.endFirstPassIfPossibleLocked(newState.ConnectionError) -======= - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. We ignore such updates. - - if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - return - } - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - // End of the first pass. - b.endFirstPassLocked(newState.ConnectionError) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return } @@ -936,11 +730,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { -<<<<<<< HEAD b.updateBalancerState(balancer.State{ -======= - b.cc.UpdateState(balancer.State{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -954,7 +744,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -<<<<<<< HEAD // endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the // addresses are tried and their SubConns have reported a failure. func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { @@ -972,31 +761,18 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } b.firstPass = false b.updateBalancerState(balancer.State{ -======= -func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { - b.firstPass = false - b.numTF = 0 - b.state = connectivity.TransientFailure - - b.cc.UpdateState(balancer.State{ ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. for _, v := range b.subConns.Values() { sd := v.(*scData) -<<<<<<< HEAD if sd.rawConnectivityState == connectivity.Idle { -======= - if sd.state == connectivity.Idle { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sd.subConn.Connect() } } } -<<<<<<< HEAD func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { activeSD, found := b.subConns.Get(sd.addr) return found && activeSD == sd @@ -1057,8 +833,6 @@ func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.St b.cc.UpdateState(newState) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type picker struct { result balancer.PickResult err error @@ -1115,18 +889,6 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } -<<<<<<< HEAD -======= -// first returns the first address in the list. If the list is empty, it returns -// an empty address instead. -func (al *addressList) first() resolver.Address { - if len(al.addresses) == 0 { - return resolver.Address{} - } - return al.addresses[0] -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (al *addressList) reset() { al.idx = 0 } @@ -1149,7 +911,6 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } -<<<<<<< HEAD // hasNext returns whether incrementing the addressList will result in moving // past the end of the list. If the list has already moved past the end, it // returns false. @@ -1160,8 +921,6 @@ func (al *addressList) hasNext() bool { return al.idx+1 < len(al.addresses) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/vendor/google.golang.org/grpc/balancer/rls/config.go b/vendor/google.golang.org/grpc/balancer/rls/config.go index c4e59d41b9..ff540aa058 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/config.go +++ b/vendor/google.golang.org/grpc/balancer/rls/config.go @@ -143,14 +143,10 @@ type lbConfigJSON struct { // - childPolicyConfigTargetFieldName: // - must be set and non-empty func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { -<<<<<<< HEAD if logger.V(2) { logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) } -======= - logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfgJSON := &lbConfigJSON{} if err := json.Unmarshal(c, cfgJSON); err != nil { return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) diff --git a/vendor/google.golang.org/grpc/balancer/rls/control_channel.go b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go index 06ec3671c3..f2ad8bc720 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/control_channel.go +++ b/vendor/google.golang.org/grpc/balancer/rls/control_channel.go @@ -209,13 +209,9 @@ func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLo Reason: reason, StaleHeaderData: staleHeaders, } -<<<<<<< HEAD if cc.logger.V(2) { cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) } -======= - cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) defer cancel() diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go index 9cd88f2ffa..6249948ede 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/adaptive.go @@ -20,24 +20,15 @@ package adaptive import ( -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "time" ) // For overriding in unittests. var ( -<<<<<<< HEAD timeNowFunc = time.Now randFunc = rand.Float64 -======= - timeNowFunc = func() time.Time { return time.Now() } - randFunc = func() float64 { return rand.Float64() } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) const ( diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 0aa8977286..80a42d2251 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,11 +22,7 @@ package roundrobin import ( -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "google.golang.org/grpc/balancer" @@ -64,11 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. -<<<<<<< HEAD next: uint32(rand.IntN(len(scs))), -======= - next: uint32(rand.Intn(len(scs))), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index 1cb533f7b4..d7b9dc4666 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -19,38 +19,23 @@ package weightedroundrobin import ( -<<<<<<< HEAD "encoding/json" "fmt" rand "math/rand/v2" -======= - "context" - "encoding/json" - "errors" - "fmt" - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" "time" "unsafe" "google.golang.org/grpc/balancer" -<<<<<<< HEAD "google.golang.org/grpc/balancer/endpointsharding" "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" -======= - "google.golang.org/grpc/balancer/base" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/balancer/weightedroundrobin/internal" "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/grpclog" -<<<<<<< HEAD "google.golang.org/grpc/internal/grpcsync" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/orca" "google.golang.org/grpc/resolver" @@ -99,7 +84,6 @@ var ( }) ) -<<<<<<< HEAD // endpointSharding which specifies pick first children. var endpointShardingLBConfig serviceconfig.LoadBalancingConfig @@ -110,17 +94,12 @@ func init() { if err != nil { logger.Fatal(err) } -======= -func init() { - balancer.Register(bb{}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &wrrBalancer{ -<<<<<<< HEAD ClientConn: cc, target: bOpts.Target.String(), metricsRecorder: bOpts.MetricsRecorder, @@ -130,17 +109,6 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.child = endpointsharding.NewBalancer(b, bOpts) -======= - cc: cc, - subConns: resolver.NewAddressMap(), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - scMap: make(map[balancer.SubConn]*weightedSubConn), - connectivityState: connectivity.Connecting, - target: bOpts.Target.String(), - metricsRecorder: bOpts.MetricsRecorder, - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.logger = prefixLogger(b) b.logger.Infof("Created") return b @@ -181,7 +149,6 @@ func (bb) Name() string { return Name } -<<<<<<< HEAD // updateEndpointsLocked updates endpoint weight state based off new update, by // starting and clearing any endpoint weights needed. // @@ -255,38 +222,11 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error if b.logger.V(2) { b.logger.Infof("UpdateCCS: %v", ccs) } -======= -// wrrBalancer implements the weighted round robin LB policy. -type wrrBalancer struct { - // The following fields are immutable. - cc balancer.ClientConn - logger *grpclog.PrefixLogger - target string - metricsRecorder estats.MetricsRecorder - - // The following fields are only accessed on calls into the LB policy, and - // do not need a mutex. - cfg *lbConfig // active config - subConns *resolver.AddressMap // active weightedSubConns mapped by address - scMap map[balancer.SubConn]*weightedSubConn - connectivityState connectivity.State // aggregate state - csEvltr *balancer.ConnectivityStateEvaluator - resolverErr error // the last error reported by the resolver; cleared on successful resolution - connErr error // the last connection error; cleared upon leaving TransientFailure - stopPicker func() - locality string -} - -func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - b.logger.Infof("UpdateCCS: %v", ccs) - b.resolverErr = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfg, ok := ccs.BalancerConfig.(*lbConfig) if !ok { return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) } -<<<<<<< HEAD // Note: empty endpoints and duplicate addresses across endpoints won't // explicitly error but will have undefined behavior. b.mu.Lock() @@ -397,96 +337,6 @@ func (b *wrrBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubC func (b *wrrBalancer) ResolverError(err error) { // Will cause inline picker update from endpoint sharding. b.child.ResolverError(err) -======= - b.cfg = cfg - b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) - b.updateAddresses(ccs.ResolverState.Addresses) - - if len(ccs.ResolverState.Addresses) == 0 { - b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker - return balancer.ErrBadResolverState - } - - b.regeneratePicker() - - return nil -} - -func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { - addrsSet := resolver.NewAddressMap() - - // Loop through new address list and create subconns for any new addresses. - for _, addr := range addrs { - if _, ok := addrsSet.Get(addr); ok { - // Redundant address; skip. - continue - } - addrsSet.Set(addr, nil) - - var wsc *weightedSubConn - wsci, ok := b.subConns.Get(addr) - if ok { - wsc = wsci.(*weightedSubConn) - } else { - // addr is a new address (not existing in b.subConns). - var sc balancer.SubConn - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sc, state) - }, - }) - if err != nil { - b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) - continue - } - wsc = &weightedSubConn{ - SubConn: sc, - logger: b.logger, - connectivityState: connectivity.Idle, - // Initially, we set load reports to off, because they are not - // running upon initial weightedSubConn creation. - cfg: &lbConfig{EnableOOBLoadReport: false}, - - metricsRecorder: b.metricsRecorder, - target: b.target, - locality: b.locality, - } - b.subConns.Set(addr, wsc) - b.scMap[sc] = wsc - b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) - sc.Connect() - } - // Update config for existing weightedSubConn or send update for first - // time to new one. Ensures an OOB listener is running if needed - // (and stops the existing one if applicable). - wsc.updateConfig(b.cfg) - } - - // Loop through existing subconns and remove ones that are not in addrs. - for _, addr := range b.subConns.Keys() { - if _, ok := addrsSet.Get(addr); ok { - // Existing address also in new address list; skip. - continue - } - // addr was removed by resolver. Remove. - wsci, _ := b.subConns.Get(addr) - wsc := wsci.(*weightedSubConn) - wsc.SubConn.Shutdown() - b.subConns.Delete(addr) - } -} - -func (b *wrrBalancer) ResolverError(err error) { - b.resolverErr = err - if b.subConns.Len() == 0 { - b.connectivityState = connectivity.TransientFailure - } - if b.connectivityState != connectivity.TransientFailure { - // No need to update the picker since no error is being returned. - return - } - b.regeneratePicker() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -494,7 +344,6 @@ func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub } func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { -<<<<<<< HEAD b.mu.Lock() ew := b.scToWeight[sc] // updates from a no longer relevant SubConn update, nothing to do here but @@ -536,47 +385,12 @@ func (b *wrrBalancer) updateSubConnState(sc balancer.SubConn, state balancer.Sub ew.stopORCAListener() } ew.pickedSC = nil -======= - wsc := b.scMap[sc] - if wsc == nil { - b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) - return - } - if b.logger.V(2) { - logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) - } - - cs := state.ConnectivityState - - if cs == connectivity.TransientFailure { - // Save error to be reported via picker. - b.connErr = state.ConnectionError - } - - if cs == connectivity.Shutdown { - delete(b.scMap, sc) - // The subconn was removed from b.subConns when the address was removed - // in updateAddresses. - } - - oldCS := wsc.updateConnectivityState(cs) - b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) - - // Regenerate picker when one of the following happens: - // - this sc entered or left ready - // - the aggregated state of balancer is TransientFailure - // (may need to update error message) - if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || - b.connectivityState == connectivity.TransientFailure { - b.regeneratePicker() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } // Close stops the balancer. It cancels any ongoing scheduler updates and // stops any ORCA listeners. func (b *wrrBalancer) Close() { -<<<<<<< HEAD b.mu.Lock() if b.stopPicker != nil { b.stopPicker.Fire() @@ -597,106 +411,17 @@ func (b *wrrBalancer) ExitIdle() { if ei, ok := b.child.(balancer.ExitIdler); ok { // Should always be ok, as child is endpoint sharding. ei.ExitIdle() } -======= - if b.stopPicker != nil { - b.stopPicker() - b.stopPicker = nil - } - for _, wsc := range b.scMap { - // Ensure any lingering OOB watchers are stopped. - wsc.updateConnectivityState(connectivity.Shutdown) - } -} - -// ExitIdle is ignored; we always connect to all backends. -func (b *wrrBalancer) ExitIdle() {} - -func (b *wrrBalancer) readySubConns() []*weightedSubConn { - var ret []*weightedSubConn - for _, v := range b.subConns.Values() { - wsc := v.(*weightedSubConn) - if wsc.connectivityState == connectivity.Ready { - ret = append(ret, wsc) - } - } - return ret -} - -// mergeErrors builds an error from the last connection error and the last -// resolver error. Must only be called if b.connectivityState is -// TransientFailure. -func (b *wrrBalancer) mergeErrors() error { - // connErr must always be non-nil unless there are no SubConns, in which - // case resolverErr must be non-nil. - if b.connErr == nil { - return fmt.Errorf("last resolver error: %v", b.resolverErr) - } - if b.resolverErr == nil { - return fmt.Errorf("last connection error: %v", b.connErr) - } - return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) -} - -func (b *wrrBalancer) regeneratePicker() { - if b.stopPicker != nil { - b.stopPicker() - b.stopPicker = nil - } - - switch b.connectivityState { - case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(b.mergeErrors()), - }) - return - case connectivity.Connecting, connectivity.Idle: - // Idle could happen very briefly if all subconns are Idle and we've - // asked them to connect but they haven't reported Connecting yet. - // Report the same as Connecting since this is temporary. - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), - }) - return - case connectivity.Ready: - b.connErr = nil - } - - p := &picker{ - v: rand.Uint32(), // start the scheduler at a random point - cfg: b.cfg, - subConns: b.readySubConns(), - metricsRecorder: b.metricsRecorder, - locality: b.locality, - target: b.target, - } - var ctx context.Context - ctx, b.stopPicker = context.WithCancel(context.Background()) - p.start(ctx) - b.cc.UpdateState(balancer.State{ - ConnectivityState: b.connectivityState, - Picker: p, - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // picker is the WRR policy's picker. It uses live-updating backend weights to // update the scheduler periodically and ensure picks are routed proportional // to those weights. type picker struct { -<<<<<<< HEAD scheduler unsafe.Pointer // *scheduler; accessed atomically v uint32 // incrementing value used by the scheduler; accessed atomically cfg *lbConfig // active config when picker created weightedPickers []pickerWeightedEndpoint // all READY pickers -======= - scheduler unsafe.Pointer // *scheduler; accessed atomically - v uint32 // incrementing value used by the scheduler; accessed atomically - cfg *lbConfig // active config when picker created - subConns []*weightedSubConn // all READY subconns ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The following fields are immutable. target string @@ -704,7 +429,6 @@ type picker struct { metricsRecorder estats.MetricsRecorder } -<<<<<<< HEAD func (p *picker) endpointWeights(recordMetrics bool) []float64 { wp := make([]float64, len(p.weightedPickers)) now := internal.TimeNow() @@ -738,16 +462,6 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } } return pr, nil -======= -func (p *picker) scWeights(recordMetrics bool) []float64 { - ws := make([]float64, len(p.subConns)) - now := internal.TimeNow() - for i, wsc := range p.subConns { - ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) - } - - return ws ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (p *picker) inc() uint32 { @@ -759,15 +473,9 @@ func (p *picker) regenerateScheduler() { atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) } -<<<<<<< HEAD func (p *picker) start(stopPicker *grpcsync.Event) { p.regenerateScheduler() if len(p.weightedPickers) == 1 { -======= -func (p *picker) start(ctx context.Context) { - p.regenerateScheduler() - if len(p.subConns) == 1 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // No need to regenerate weights with only one backend. return } @@ -777,11 +485,7 @@ func (p *picker) start(ctx context.Context) { defer ticker.Stop() for { select { -<<<<<<< HEAD case <-stopPicker.Done(): -======= - case <-ctx.Done(): ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return case <-ticker.C: p.regenerateScheduler() @@ -790,38 +494,12 @@ func (p *picker) start(ctx context.Context) { }() } -<<<<<<< HEAD // endpointWeight is the weight for an endpoint. It tracks the SubConn that will // be picked for the endpoint, and other parameters relevant to computing the // effective weight. When needed, it also tracks connectivity state, listens for // metrics updates by implementing the orca.OOBListener interface and manages // that listener. type endpointWeight struct { -======= -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - // Read the scheduler atomically. All scheduler operations are threadsafe, - // and if the scheduler is replaced during this usage, we want to use the - // scheduler that was live when the pick started. - sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) - - pickedSC := p.subConns[sched.nextIndex()] - pr := balancer.PickResult{SubConn: pickedSC.SubConn} - if !p.cfg.EnableOOBLoadReport { - pr.Done = func(info balancer.DoneInfo) { - if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { - pickedSC.OnLoadReport(load) - } - } - } - return pr, nil -} - -// weightedSubConn is the wrapper of a subconn that holds the subconn and its -// weight (and other parameters relevant to computing the effective weight). -// When needed, it also tracks connectivity state, listens for metrics updates -// by implementing the orca.OOBListener interface and manages that listener. -type weightedSubConn struct { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The following fields are immutable. balancer.SubConn logger *grpclog.PrefixLogger @@ -833,14 +511,11 @@ type weightedSubConn struct { // do not need a mutex. connectivityState connectivity.State stopORCAListener func() -<<<<<<< HEAD // The first SubConn for the endpoint that goes READY when endpoint has no // READY SubConns yet, cleared on that sc disconnecting (i.e. going out of // READY). Represents what pick first will use as it's picked SubConn for // this endpoint. pickedSC balancer.SubConn -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The following fields are accessed asynchronously and are protected by // mu. Note that mu may not be held when calling into the stopORCAListener @@ -854,19 +529,11 @@ type weightedSubConn struct { cfg *lbConfig } -<<<<<<< HEAD func (w *endpointWeight) OnLoadReport(load *v3orcapb.OrcaLoadReport) { if w.logger.V(2) { w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) } // Update weights of this endpoint according to the reported load. -======= -func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { - if w.logger.V(2) { - w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) - } - // Update weights of this subchannel according to the reported load ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) utilization := load.ApplicationUtilization if utilization == 0 { utilization = load.CpuUtilization @@ -895,11 +562,7 @@ func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { // updateConfig updates the parameters of the WRR policy and // stops/starts/restarts the ORCA OOB listener. -<<<<<<< HEAD func (w *endpointWeight) updateConfig(cfg *lbConfig) { -======= -func (w *weightedSubConn) updateConfig(cfg *lbConfig) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) w.mu.Lock() oldCfg := w.cfg w.cfg = cfg @@ -912,23 +575,12 @@ func (w *weightedSubConn) updateConfig(cfg *lbConfig) { // load reporting disabled, OOBReportingPeriod is always 0.) return } -<<<<<<< HEAD // (Re)start the listener to use the new config's settings for OOB // reporting. w.updateORCAListener(cfg) } func (w *endpointWeight) updateORCAListener(cfg *lbConfig) { -======= - if w.connectivityState == connectivity.Ready { - // (Re)start the listener to use the new config's settings for OOB - // reporting. - w.updateORCAListener(cfg) - } -} - -func (w *weightedSubConn) updateORCAListener(cfg *lbConfig) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w.stopORCAListener != nil { w.stopORCAListener() } @@ -936,7 +588,6 @@ func (w *weightedSubConn) updateORCAListener(cfg *lbConfig) { w.stopORCAListener = nil return } -<<<<<<< HEAD if w.pickedSC == nil { // No picked SC for this endpoint yet, nothing to listen on. return } @@ -948,63 +599,11 @@ func (w *weightedSubConn) updateORCAListener(cfg *lbConfig) { } // weight returns the current effective weight of the endpoint, taking into -======= - if w.logger.V(2) { - w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, cfg.OOBReportingPeriod) - } - opts := orca.OOBListenerOptions{ReportInterval: time.Duration(cfg.OOBReportingPeriod)} - w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) -} - -func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { - switch cs { - case connectivity.Idle: - // Always reconnect when idle. - w.SubConn.Connect() - case connectivity.Ready: - // If we transition back to READY state, reset nonEmptySince so that we - // apply the blackout period after we start receiving load data. Also - // reset lastUpdated to trigger endpoint weight not yet usable in the - // case endpoint gets asked what weight it is before receiving a new - // load report. Note that we cannot guarantee that we will never receive - // lingering callbacks for backend metric reports from the previous - // connection after the new connection has been established, but they - // should be masked by new backend metric reports from the new - // connection by the time the blackout period ends. - w.mu.Lock() - w.nonEmptySince = time.Time{} - w.lastUpdated = time.Time{} - cfg := w.cfg - w.mu.Unlock() - w.updateORCAListener(cfg) - } - - oldCS := w.connectivityState - - if oldCS == connectivity.TransientFailure && - (cs == connectivity.Connecting || cs == connectivity.Idle) { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or - // CONNECTING transitions to prevent the aggregated state from being - // always CONNECTING when many backends exist but are all down. - return oldCS - } - - w.connectivityState = cs - - return oldCS -} - -// weight returns the current effective weight of the subconn, taking into ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // account the parameters. Returns 0 for blacked out or expired data, which // will cause the backend weight to be treated as the mean of the weights of the // other backends. If forScheduler is set to true, this function will emit // metrics through the metrics registry. -<<<<<<< HEAD func (w *endpointWeight) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { -======= -func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) w.mu.Lock() defer w.mu.Unlock() @@ -1014,11 +613,7 @@ func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackout }() } -<<<<<<< HEAD // The endpoint has not received a load report (i.e. just turned READY with -======= - // The SubConn has not received a load report (i.e. just turned READY with ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // no load report). if w.lastUpdated.Equal(time.Time{}) { endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go index ecf378d61f..7d3d6815eb 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -26,7 +26,6 @@ type scheduler interface { nextIndex() int } -<<<<<<< HEAD // newScheduler uses scWeights to create a new scheduler for selecting endpoints // in a picker. It will return a round robin implementation if at least // len(scWeights)-1 are zero or there is only a single endpoint, otherwise it @@ -35,16 +34,6 @@ type scheduler interface { func (p *picker) newScheduler(recordMetrics bool) scheduler { epWeights := p.endpointWeights(recordMetrics) n := len(epWeights) -======= -// newScheduler uses scWeights to create a new scheduler for selecting subconns -// in a picker. It will return a round robin implementation if at least -// len(scWeights)-1 are zero or there is only a single subconn, otherwise it -// will return an Earliest Deadline First (EDF) scheduler implementation that -// selects the subchannels according to their weights. -func (p *picker) newScheduler(recordMetrics bool) scheduler { - scWeights := p.scWeights(recordMetrics) - n := len(scWeights) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if n == 0 { return nil } @@ -57,11 +46,7 @@ func (p *picker) newScheduler(recordMetrics bool) scheduler { sum := float64(0) numZero := 0 max := float64(0) -<<<<<<< HEAD for _, w := range epWeights { -======= - for _, w := range scWeights { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) sum += w if w > max { max = w @@ -83,11 +68,7 @@ func (p *picker) newScheduler(recordMetrics bool) scheduler { weights := make([]uint16, n) allEqual := true -<<<<<<< HEAD for i, w := range epWeights { -======= - for i, w := range scWeights { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w == 0 { // Backends with weight = 0 use the mean. weights[i] = mean diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go index 1d947f6a43..258cdd5db2 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/weightedroundrobin.go @@ -56,7 +56,6 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } -<<<<<<< HEAD // SetAddrInfoInEndpoint returns a copy of endpoint in which the Attributes // field is updated with addrInfo. func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolver.Endpoint { @@ -64,8 +63,6 @@ func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolv return endpoint } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of // addr. func GetAddrInfo(addr resolver.Address) AddrInfo { diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index c1ca4a39d2..a617f6a63a 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -99,23 +99,16 @@ func LocalityFromResolverState(state resolver.State) string { // creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { -<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) } -======= - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) -<<<<<<< HEAD endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.stateAggregator.PauseStateUpdates() defer b.stateAggregator.ResumeStateUpdates() @@ -163,10 +156,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[name], -<<<<<<< HEAD Endpoints: endpointsSplit[name], -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), }, diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index b034539fa9..c2688376ae 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -34,7 +34,6 @@ import ( "google.golang.org/grpc/status" ) -<<<<<<< HEAD var ( setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) // noOpRegisterHealthListenerFn is used when client side health checking is @@ -44,9 +43,6 @@ var ( return func() {} } ) -======= -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -201,10 +197,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, -<<<<<<< HEAD healthData: newHealthData(connectivity.Idle), -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ac.acbw = acbw return acbw, nil @@ -270,17 +263,13 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { -<<<<<<< HEAD internal.EnforceSubConnEmbedding -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer -<<<<<<< HEAD // Access to healthData is protected by healthMu. healthMu sync.Mutex @@ -307,8 +296,6 @@ func newHealthData(s connectivity.State) *healthData { connectivityState: s, closeHealthProducer: func() {}, } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // updateState is invoked by grpc to push a subConn state update to the @@ -328,7 +315,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } -<<<<<<< HEAD // Invalidate the health listener by updating the healthData. acbw.healthMu.Lock() // A race may occur if a health listener is registered soon after the @@ -347,8 +333,6 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve acbw.healthData = newHealthData(scs.ConnectivityState) acbw.healthMu.Unlock() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) acbw.stateListener(scs) }) } @@ -443,7 +427,6 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } -<<<<<<< HEAD // healthProducerRegisterFn is a type alias for the health producer's function // for registering listeners. @@ -530,5 +513,3 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub hd.closeHealthProducer = registerFn(ctx, listenerWrapper) }) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 01fc013464..21dd72969a 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,11 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -278,17 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcLogEntry) String() string { @@ -299,11 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -454,17 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ClientHeader) String() string { @@ -475,11 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -533,17 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServerHeader) String() string { @@ -554,11 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -599,17 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Trailer) String() string { @@ -620,11 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -682,17 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Message) String() string { @@ -703,11 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -767,17 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Metadata) String() string { @@ -788,11 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -826,17 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *MetadataEntry) String() string { @@ -847,11 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -894,17 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} -<<<<<<< HEAD mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Address) String() string { @@ -915,11 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1141,107 +1041,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4c9ae4fb1b..4f57b55434 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -775,14 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } -<<<<<<< HEAD balCfg := cc.sc.lbConfig -======= - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) bw := cc.balancerWrapper cc.mu.Unlock() @@ -1378,11 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz -<<<<<<< HEAD newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) -======= - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1456,11 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } -<<<<<<< HEAD healthCheckFunc := internal.HealthCheckFunc -======= - healthCheckFunc := ac.cc.dopts.healthCheckFunc ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1492,11 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { -<<<<<<< HEAD err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) -======= - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index e3c3dd529a..959c2f99d4 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,11 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } -<<<<<<< HEAD return mem.BufferSlice{mem.SliceBuffer(data)}, nil -======= - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 2403b6e775..40e42b6ae5 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,11 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/gcp/altscontext.proto @@ -64,17 +60,9 @@ type AltsContext struct { func (x *AltsContext) Reset() { *x = AltsContext{} -<<<<<<< HEAD mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *AltsContext) String() string { @@ -85,11 +73,7 @@ func (*AltsContext) ProtoMessage() {} func (x *AltsContext) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -238,23 +222,6 @@ func file_grpc_gcp_altscontext_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*AltsContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index f515fa0f5e..2993bbfab1 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,11 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/gcp/handshaker.proto @@ -158,17 +154,9 @@ type Endpoint struct { func (x *Endpoint) Reset() { *x = Endpoint{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Endpoint) String() string { @@ -179,11 +167,7 @@ func (*Endpoint) ProtoMessage() {} func (x *Endpoint) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -235,17 +219,9 @@ type Identity struct { func (x *Identity) Reset() { *x = Identity{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *Identity) String() string { @@ -256,11 +232,7 @@ func (*Identity) ProtoMessage() {} func (x *Identity) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -364,17 +336,9 @@ type StartClientHandshakeReq struct { func (x *StartClientHandshakeReq) Reset() { *x = StartClientHandshakeReq{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartClientHandshakeReq) String() string { @@ -385,11 +349,7 @@ func (*StartClientHandshakeReq) ProtoMessage() {} func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -501,17 +461,9 @@ type ServerHandshakeParameters struct { func (x *ServerHandshakeParameters) Reset() { *x = ServerHandshakeParameters{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *ServerHandshakeParameters) String() string { @@ -522,11 +474,7 @@ func (*ServerHandshakeParameters) ProtoMessage() {} func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -593,17 +541,9 @@ type StartServerHandshakeReq struct { func (x *StartServerHandshakeReq) Reset() { *x = StartServerHandshakeReq{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *StartServerHandshakeReq) String() string { @@ -614,11 +554,7 @@ func (*StartServerHandshakeReq) ProtoMessage() {} func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -699,17 +635,9 @@ type NextHandshakeMessageReq struct { func (x *NextHandshakeMessageReq) Reset() { *x = NextHandshakeMessageReq{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *NextHandshakeMessageReq) String() string { @@ -720,11 +648,7 @@ func (*NextHandshakeMessageReq) ProtoMessage() {} func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -768,17 +692,9 @@ type HandshakerReq struct { func (x *HandshakerReq) Reset() { *x = HandshakerReq{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerReq) String() string { @@ -789,11 +705,7 @@ func (*HandshakerReq) ProtoMessage() {} func (x *HandshakerReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -890,17 +802,9 @@ type HandshakerResult struct { func (x *HandshakerResult) Reset() { *x = HandshakerResult{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerResult) String() string { @@ -911,11 +815,7 @@ func (*HandshakerResult) ProtoMessage() {} func (x *HandshakerResult) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -999,17 +899,9 @@ type HandshakerStatus struct { func (x *HandshakerStatus) Reset() { *x = HandshakerStatus{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerStatus) String() string { @@ -1020,11 +912,7 @@ func (*HandshakerStatus) ProtoMessage() {} func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1077,17 +965,9 @@ type HandshakerResp struct { func (x *HandshakerResp) Reset() { *x = HandshakerResp{} -<<<<<<< HEAD mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HandshakerResp) String() string { @@ -1098,11 +978,7 @@ func (*HandshakerResp) ProtoMessage() {} func (x *HandshakerResp) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1417,131 +1293,6 @@ func file_grpc_gcp_handshaker_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Endpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*StartClientHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ServerHandshakeParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*StartServerHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*NextHandshakeMessageReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 1547dce067..a8d5c4857b 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,11 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto @@ -106,17 +102,9 @@ type RpcProtocolVersions struct { func (x *RpcProtocolVersions) Reset() { *x = RpcProtocolVersions{} -<<<<<<< HEAD mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RpcProtocolVersions) String() string { @@ -127,11 +115,7 @@ func (*RpcProtocolVersions) ProtoMessage() {} func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -172,17 +156,9 @@ type RpcProtocolVersions_Version struct { func (x *RpcProtocolVersions_Version) Reset() { *x = RpcProtocolVersions_Version{} -<<<<<<< HEAD mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RpcProtocolVersions_Version) String() string { @@ -193,11 +169,7 @@ func (*RpcProtocolVersions_Version) ProtoMessage() {} func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -297,35 +269,6 @@ func file_grpc_gcp_transport_security_common_proto_init() { if File_grpc_gcp_transport_security_common_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RpcProtocolVersions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RpcProtocolVersions_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index 572cb8c4de..5a9c9461f0 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -22,10 +22,6 @@ package google import ( "context" "fmt" -<<<<<<< HEAD -======= - "time" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" @@ -34,11 +30,7 @@ import ( "google.golang.org/grpc/internal" ) -<<<<<<< HEAD const defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" -======= -const tokenRequestTimeout = 30 * time.Second ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var logger = grpclog.Component("credentials") @@ -46,12 +38,9 @@ var logger = grpclog.Component("credentials") type DefaultCredentialsOptions struct { // PerRPCCreds is a per RPC credentials that is passed to a bundle. PerRPCCreds credentials.PerRPCCredentials -<<<<<<< HEAD // ALTSPerRPCCreds is a per RPC credentials that, if specified, will // supercede PerRPCCreds above for and only for ALTS connections. ALTSPerRPCCreds credentials.PerRPCCredentials -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // NewDefaultCredentialsWithOptions returns a credentials bundle that is @@ -60,31 +49,21 @@ type DefaultCredentialsOptions struct { // This API is experimental. func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { if opts.PerRPCCreds == nil { -<<<<<<< HEAD var err error // If the ADC ends up being Compute Engine Credentials, this context // won't be used. Otherwise, the context dictates all the subsequent // token requests via HTTP. So we cannot have any deadline or timeout. opts.PerRPCCreds, err = newADC(context.TODO()) -======= - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() - var err error - opts.PerRPCCreds, err = newADC(ctx) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } } -<<<<<<< HEAD if opts.ALTSPerRPCCreds != nil { opts.PerRPCCreds = &dualPerRPCCreds{ perRPCCreds: opts.PerRPCCreds, altsPerRPCCreds: opts.ALTSPerRPCCreds, } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { @@ -143,11 +122,7 @@ var ( return alts.NewClientCreds(alts.DefaultClientOptions()) } newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { -<<<<<<< HEAD return oauth.NewApplicationDefault(ctx, defaultCloudPlatformScope) -======= - return oauth.NewApplicationDefault(ctx) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } ) @@ -177,7 +152,6 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { return newCreds, nil } -<<<<<<< HEAD // dualPerRPCCreds implements credentials.PerRPCCredentials by embedding the // fallback PerRPCCredentials and the ALTS one. It pickes one of them based on @@ -202,5 +176,3 @@ func (d *dualPerRPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) func (d *dualPerRPCCreds) RequireTransportSecurity() bool { return d.altsPerRPCCreds.RequireTransportSecurity() || d.perRPCCreds.RequireTransportSecurity() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index c19d5f0a8d..bd5fe22b6a 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,11 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) -<<<<<<< HEAD const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -133,11 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() -<<<<<<< HEAD return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) -======= - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -167,11 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() -<<<<<<< HEAD return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) -======= - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 088864d328..f3a045296a 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -87,10 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool -<<<<<<< HEAD -======= - healthCheckFunc internal.HealthChecker ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -432,14 +428,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // -<<<<<<< HEAD // Note that gRPC by default performs name resolution on the target passed to // NewClient. To bypass name resolution and cause the target string to be // passed directly to the dialer here instead, use the "passthrough" resolver // by specifying it in the target string, e.g. "passthrough:target". // -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -456,13 +449,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -<<<<<<< HEAD -======= -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -676,19 +662,6 @@ func WithDisableHealthCheck() DialOption { }) } -<<<<<<< HEAD -======= -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -699,10 +672,6 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, -<<<<<<< HEAD -======= - healthCheckFunc: internal.HealthCheckFunc, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 6de1e7ce0c..ad75313a18 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,10 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" -<<<<<<< HEAD "google.golang.org/grpc/stats" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { @@ -38,11 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -<<<<<<< HEAD var DefaultMetrics = stats.NewMetricSet() -======= -var DefaultMetrics = NewMetrics() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -50,11 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. -<<<<<<< HEAD Name string -======= - Name Metric ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -166,25 +155,16 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -<<<<<<< HEAD var registeredMetrics = make(map[string]bool) -======= -var registeredMetrics = make(map[Metric]bool) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -<<<<<<< HEAD var metricsRegistry = make(map[string]*MetricDescriptor) -======= -var metricsRegistry = make(map[Metric]*MetricDescriptor) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -<<<<<<< HEAD func DescriptorForMetric(metricName string) *MetricDescriptor { return metricsRegistry[metricName] } @@ -196,19 +176,6 @@ func registerMetric(metricName string, def bool) { registeredMetrics[metricName] = true if def { DefaultMetrics = DefaultMetrics.Add(metricName) -======= -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] -} - -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) - } - registeredMetrics[name] = true - if def { - DefaultMetrics = DefaultMetrics.Add(name) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -290,13 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry -<<<<<<< HEAD registeredMetrics = make(map[string]bool) metricsRegistry = make(map[string]*MetricDescriptor) -======= - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 895a8eb9ef..ee1423605a 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,11 +19,7 @@ // Package stats contains experimental metrics/stats API's. package stats -<<<<<<< HEAD import "google.golang.org/grpc/stats" -======= -import "maps" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -44,7 +40,6 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } -<<<<<<< HEAD // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. // Metrics will be deleted in a future release. type Metrics = stats.MetricSet @@ -56,77 +51,4 @@ type Metric = string // stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { return stats.NewMetricSet(metrics...) -======= -// Metric is an identifier for a metric. -type Metric string - -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} - -// NewMetrics returns a Metrics containing Metrics. -func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index eb16dd3f41..ed90060c3c 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,7 +101,6 @@ var severityName = []string{ fatalLog: "FATAL", } -<<<<<<< HEAD // sprintf is fmt.Sprintf. // These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. var sprintf = fmt.Sprintf @@ -118,8 +117,6 @@ var sprintln = fmt.Sprintln // This var exists to make it possible to test functions calling os.Exit. var exit = os.Exit -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -130,11 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { -<<<<<<< HEAD g.m[severity].Output(2, sevStr+": "+s) -======= - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } // TODO: we can also include the logging component, but that needs more @@ -146,7 +139,6 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -<<<<<<< HEAD func (g *loggerT) printf(severity int, format string, args ...any) { // Note the discard check is duplicated in each print func, rather than in // output, to avoid the expensive Sprint calls. @@ -220,57 +212,6 @@ func (g *loggerT) Fatalln(args ...any) { func (g *loggerT) Fatalf(format string, args ...any) { g.printf(fatalLog, format, args...) exit(1) -======= -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (g *loggerT) V(l int) bool { @@ -285,7 +226,6 @@ type LoggerV2Config struct { FormatJSON bool } -<<<<<<< HEAD // combineLoggers returns a combined logger for both higher & lower severity logs, // or only one if the other is io.Discard. // @@ -303,21 +243,14 @@ func combineLoggers(lower, higher io.Writer) io.Writer { return io.MultiWriter(lower, higher) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { -<<<<<<< HEAD -======= - var m []*log.Logger ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) flag := log.LstdFlags if c.FormatJSON { flag = 0 } -<<<<<<< HEAD warningW = combineLoggers(infoW, warningW) errorW = combineLoggers(errorW, warningW) @@ -330,12 +263,5 @@ func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { log.New(errorW, "", flag), log.New(fatalW, "", flag), } -======= - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index c8c9d41812..467de16bdb 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,11 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -103,17 +99,9 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} -<<<<<<< HEAD mi := &file_grpc_health_v1_health_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HealthCheckRequest) String() string { @@ -124,11 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -160,17 +144,9 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} -<<<<<<< HEAD mi := &file_grpc_health_v1_health_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HealthCheckResponse) String() string { @@ -181,11 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -284,35 +256,6 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index e04dedd8ce..b6ae7f2585 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,11 +25,7 @@ package backoff import ( "context" "errors" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 889e565646..1e42b6fdc8 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,11 +49,7 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. -<<<<<<< HEAD XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) -======= - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 1e28cd888d..9afeb444d4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,13 +53,10 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") -<<<<<<< HEAD // XDSDualstackEndpointsEnabled is true if gRPC should read the // "additional addresses" in the xDS endpoint resource. // TODO: https://github.com/grpc/grpc-go/issues/7866 - Control this using // an env variable when all LB policies handle endpoints. XDSDualstackEndpointsEnabled = false -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go index 489e6d59f5..362c05fa2a 100644 --- a/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go +++ b/vendor/google.golang.org/grpc/internal/hierarchy/hierarchy.go @@ -48,7 +48,6 @@ func (p pathValue) Equal(o any) bool { return true } -<<<<<<< HEAD // FromEndpoint returns the hierarchical path of endpoint. func FromEndpoint(endpoint resolver.Endpoint) []string { path, _ := endpoint.Attributes.Value(pathKey).(pathValue) @@ -61,8 +60,6 @@ func SetInEndpoint(endpoint resolver.Endpoint, path []string) resolver.Endpoint return endpoint } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Get returns the hierarchical path of addr. func Get(addr resolver.Address) []string { attrs := addr.BalancerAttributes @@ -125,7 +122,6 @@ func Group(addrs []resolver.Address) map[string][]resolver.Address { } return ret } -<<<<<<< HEAD // GroupEndpoints splits a slice of endpoints into groups based on // the first hierarchy path. The first hierarchy path will be removed from the @@ -173,5 +169,3 @@ func GroupEndpoints(endpoints []resolver.Endpoint) map[string][]resolver.Endpoin } return ret } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index a27c4808ab..c17b98194b 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,19 +29,12 @@ import ( ) var ( -<<<<<<< HEAD // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // RegisterClientHealthCheckListener is used to provide a listener for // updates from the client-side health checking service. It returns a // function that can be called to stop the health producer. RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() -======= - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption - // HealthCheckFunc is used to provide client-side LB channel health checking - HealthCheckFunc HealthChecker ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -158,7 +151,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) -<<<<<<< HEAD // NewXDSResolverWithClientForTesting creates a new xDS resolver builder // using the provided xDS client instead of creating a new one using the // bootstrap configuration specified by the supported environment variables. @@ -173,8 +165,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -281,12 +271,9 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" -<<<<<<< HEAD // EnforceSubConnEmbedding is used to enforce proper SubConn implementation // embedding. type EnforceSubConnEmbedding interface { enforceSubConnEmbedding() } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 9456d69b57..22731029f5 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/lookup/v1/rls.proto @@ -109,17 +105,9 @@ type RouteLookupRequest struct { func (x *RouteLookupRequest) Reset() { *x = RouteLookupRequest{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupRequest) String() string { @@ -130,11 +118,7 @@ func (*RouteLookupRequest) ProtoMessage() {} func (x *RouteLookupRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -203,17 +187,9 @@ type RouteLookupResponse struct { func (x *RouteLookupResponse) Reset() { *x = RouteLookupResponse{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupResponse) String() string { @@ -224,11 +200,7 @@ func (*RouteLookupResponse) ProtoMessage() {} func (x *RouteLookupResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -363,35 +335,6 @@ func file_grpc_lookup_v1_rls_proto_init() { if File_grpc_lookup_v1_rls_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index a689fafdf3..73b70c25ea 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,11 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -<<<<<<< HEAD // protoc-gen-go v1.35.2 -======= -// protoc-gen-go v1.34.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto @@ -63,17 +59,9 @@ type NameMatcher struct { func (x *NameMatcher) Reset() { *x = NameMatcher{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *NameMatcher) String() string { @@ -84,11 +72,7 @@ func (*NameMatcher) ProtoMessage() {} func (x *NameMatcher) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,17 +129,9 @@ type GrpcKeyBuilder struct { func (x *GrpcKeyBuilder) Reset() { *x = GrpcKeyBuilder{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcKeyBuilder) String() string { @@ -166,11 +142,7 @@ func (*GrpcKeyBuilder) ProtoMessage() {} func (x *GrpcKeyBuilder) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -301,17 +273,9 @@ type HttpKeyBuilder struct { func (x *HttpKeyBuilder) Reset() { *x = HttpKeyBuilder{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *HttpKeyBuilder) String() string { @@ -322,11 +286,7 @@ func (*HttpKeyBuilder) ProtoMessage() {} func (x *HttpKeyBuilder) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -434,17 +394,9 @@ type RouteLookupConfig struct { func (x *RouteLookupConfig) Reset() { *x = RouteLookupConfig{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupConfig) String() string { @@ -455,11 +407,7 @@ func (*RouteLookupConfig) ProtoMessage() {} func (x *RouteLookupConfig) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -550,17 +498,9 @@ type RouteLookupClusterSpecifier struct { func (x *RouteLookupClusterSpecifier) Reset() { *x = RouteLookupClusterSpecifier{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *RouteLookupClusterSpecifier) String() string { @@ -571,11 +511,7 @@ func (*RouteLookupClusterSpecifier) ProtoMessage() {} func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -612,17 +548,9 @@ type GrpcKeyBuilder_Name struct { func (x *GrpcKeyBuilder_Name) Reset() { *x = GrpcKeyBuilder_Name{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcKeyBuilder_Name) String() string { @@ -633,11 +561,7 @@ func (*GrpcKeyBuilder_Name) ProtoMessage() {} func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -684,17 +608,9 @@ type GrpcKeyBuilder_ExtraKeys struct { func (x *GrpcKeyBuilder_ExtraKeys) Reset() { *x = GrpcKeyBuilder_ExtraKeys{} -<<<<<<< HEAD mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) -======= - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (x *GrpcKeyBuilder_ExtraKeys) String() string { @@ -705,11 +621,7 @@ func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] -<<<<<<< HEAD if x != nil { -======= - if protoimpl.UnsafeEnabled && x != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -916,95 +828,6 @@ func file_grpc_lookup_v1_rls_config_proto_init() { if File_grpc_lookup_v1_rls_config_proto != nil { return } -<<<<<<< HEAD -======= - if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*NameMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GrpcKeyBuilder); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*HttpKeyBuilder); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupClusterSpecifier); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GrpcKeyBuilder_Name); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 1bab1d432d..ba5c5a95d0 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,14 +24,9 @@ import ( "context" "encoding/json" "fmt" -<<<<<<< HEAD rand "math/rand/v2" "net" "net/netip" -======= - "math/rand" - "net" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "os" "strconv" "strings" @@ -128,11 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. -<<<<<<< HEAD if ipAddr, err := formatIP(host); err == nil { -======= - if ipAddr, ok := formatIP(host); ok { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -270,15 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { -<<<<<<< HEAD ip, err := formatIP(a) if err != nil { return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) -======= - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -338,15 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { -<<<<<<< HEAD ip, err := formatIP(a) if err != nil { return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) -======= - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -373,7 +352,6 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -<<<<<<< HEAD // formatIP returns an error if addr is not a valid textual representation of // an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and @@ -387,21 +365,6 @@ func formatIP(addr string) (string, error) { return addr, nil } return "[" + addr + "]", nil -======= -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // parseTarget takes the user input target string and default port, returns @@ -417,11 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } -<<<<<<< HEAD if _, err := netip.ParseAddr(target); err == nil { -======= - if ip := net.ParseIP(target); ip != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -469,11 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } -<<<<<<< HEAD return rand.IntN(100)+1 <= *a -======= - return rand.Intn(100)+1 <= *a ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 61516b2ad9..dfc0f224ec 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,22 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n -<<<<<<< HEAD if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() return 0 } return f.reset() -======= - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 - f.updateEffectiveWindowSize() - return w - } - f.updateEffectiveWindowSize() - return 0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 5ae7813909..3dea235735 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,11 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -<<<<<<< HEAD func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { -======= -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -293,22 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -<<<<<<< HEAD func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { -======= -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -<<<<<<< HEAD func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { -======= -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -329,11 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -<<<<<<< HEAD func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { -======= -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) h := ht.rw.Header() s.hdrMu.Lock() @@ -349,11 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -<<<<<<< HEAD func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { -======= -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -377,11 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -<<<<<<< HEAD func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { -======= -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := s.SetHeader(md); err != nil { return err } @@ -409,11 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -<<<<<<< HEAD func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { -======= -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -436,7 +408,6 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req -<<<<<<< HEAD s := &ServerStream{ Stream: &Stream{ id: 0, // irrelevant @@ -449,18 +420,6 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream }, cancel: cancel, st: ht, -======= - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -514,13 +473,7 @@ func (ht *serverHandlerTransport) runStream() { } } -<<<<<<< HEAD func (ht *serverHandlerTransport) incrMsgRecv() {} -======= -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") @@ -545,9 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } -<<<<<<< HEAD return connectionErrorf(true, err, "%s", err.Error()) -======= - return connectionErrorf(true, err, err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 6cd6a3d9fa..f323ab7f45 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -123,11 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState -<<<<<<< HEAD activeStreams map[uint32]*ClientStream -======= - activeStreams map[uint32]*Stream ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -203,17 +199,10 @@ func isTemporary(err error) bool { return true } -<<<<<<< HEAD // NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { -======= -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 -// and starts to receive messages on it. Non-nil error returns if construction -// fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -350,11 +339,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, -<<<<<<< HEAD activeStreams: make(map[uint32]*ClientStream), -======= - activeStreams: make(map[uint32]*Stream), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -495,7 +480,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -<<<<<<< HEAD func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ @@ -509,19 +493,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt done: make(chan struct{}), headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, -======= -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -537,11 +508,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { -<<<<<<< HEAD s.Close(err) -======= - t.CloseStream(s, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }, }, windowHandler: func(n int) { @@ -632,15 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } -<<<<<<< HEAD -======= - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -776,11 +734,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -<<<<<<< HEAD func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { -======= -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -805,11 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. -<<<<<<< HEAD s.unprocessed.Store(true) -======= - atomic.StoreUint32(&s.unprocessed, 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -954,25 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -<<<<<<< HEAD func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { -======= -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1135,11 +1067,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -<<<<<<< HEAD func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { -======= -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) reader := data.Reader() if opts.Last { @@ -1168,18 +1096,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } -<<<<<<< HEAD t.incrMsgSent() return nil } func (t *http2Client) getStream(f http2.Frame) *ClientStream { -======= - return nil -} - -func (t *http2Client) getStream(f http2.Frame) *Stream { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1189,11 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -<<<<<<< HEAD func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { -======= -func (t *http2Client) adjustWindow(s *Stream, n uint32) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1202,11 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -<<<<<<< HEAD func (t *http2Client) updateWindow(s *ClientStream, n uint32) { -======= -func (t *http2Client) updateWindow(s *Stream, n uint32) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1312,11 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. -<<<<<<< HEAD s.unprocessed.Store(true) -======= - atomic.StoreUint32(&s.unprocessed, 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1457,19 +1366,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } -<<<<<<< HEAD streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. stream.unprocessed.Store(true) -======= - streamsToClose := make([]*Stream, 0) - for streamID, stream := range t.activeStreams { - if streamID > id && streamID <= upperLimit { - // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) streamsToClose = append(streamsToClose, stream) } } @@ -1521,11 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() -<<<<<<< HEAD s.bytesReceived.Store(true) -======= - atomic.StoreUint32(&s.bytesReceived, 1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1895,7 +1792,6 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -<<<<<<< HEAD func (t *http2Client) incrMsgSent() { if channelz.IsOn() { t.channelz.SocketMetrics.MessagesSent.Add(1) @@ -1908,16 +1804,6 @@ func (t *http2Client) incrMsgRecv() { t.channelz.SocketMetrics.MessagesReceived.Add(1) t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } -======= -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) -} - -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 49fa2d316d..997b0a59b5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,11 +25,7 @@ import ( "fmt" "io" "math" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "net" "net/http" "strconv" @@ -115,11 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState -<<<<<<< HEAD activeStreams map[uint32]*ServerStream -======= - activeStreams map[uint32]*Stream ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -264,11 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, -<<<<<<< HEAD activeStreams: make(map[uint32]*ServerStream), -======= - activeStreams: make(map[uint32]*Stream), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -371,11 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -<<<<<<< HEAD func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { -======= -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -401,7 +385,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() -<<<<<<< HEAD s := &ServerStream{ Stream: &Stream{ id: streamID, @@ -409,13 +392,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade fc: &inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, -======= - s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) headerWireLength: int(frame.Header().Length), } var ( @@ -563,15 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) -<<<<<<< HEAD -======= - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } t.mu.Lock() if t.state != reachable { @@ -597,11 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ -<<<<<<< HEAD httpStatus: http.StatusMethodNotAllowed, -======= - httpStatus: 405, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -622,11 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ -<<<<<<< HEAD httpStatus: http.StatusOK, -======= - httpStatus: 200, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -671,11 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -<<<<<<< HEAD func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { -======= -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -739,11 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -<<<<<<< HEAD func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { -======= -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -761,11 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -<<<<<<< HEAD func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { -======= -func (t *http2Server) adjustWindow(s *Stream, n uint32) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -775,11 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -<<<<<<< HEAD func (t *http2Server) updateWindow(s *ServerStream, n uint32) { -======= -func (t *http2Server) updateWindow(s *Stream, n uint32) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -1016,11 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -<<<<<<< HEAD func (t *http2Server) streamContextErr(s *ServerStream) error { -======= -func (t *http2Server) streamContextErr(s *Stream) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) select { case <-t.done: return ErrConnClosing @@ -1030,11 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -<<<<<<< HEAD func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { -======= -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1067,11 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -<<<<<<< HEAD func (t *http2Server) writeHeaderLocked(s *ServerStream) error { -======= -func (t *http2Server) writeHeaderLocked(s *Stream) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1111,11 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -<<<<<<< HEAD func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { -======= -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1186,19 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -<<<<<<< HEAD func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.writeHeader(s, nil); err != nil { -======= -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { - reader := data.Reader() - - if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _ = reader.Close() return err } @@ -1224,10 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti _ = reader.Close() return err } -<<<<<<< HEAD t.incrMsgSent() -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -1357,11 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -<<<<<<< HEAD func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { -======= -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1382,11 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -<<<<<<< HEAD func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { -======= -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1410,11 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -<<<<<<< HEAD func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { -======= -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1508,7 +1412,6 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -<<<<<<< HEAD func (t *http2Server) incrMsgSent() { if channelz.IsOn() { t.channelz.SocketMetrics.MessagesSent.Add(1) @@ -1521,16 +1424,6 @@ func (t *http2Server) incrMsgRecv() { t.channelz.SocketMetrics.MessagesReceived.Add(1) t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } -======= -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) -} - -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (t *http2Server) getOutFlowWindow() int64 { @@ -1563,11 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) -<<<<<<< HEAD j := rand.Int64N(2*r) - r -======= - j := rand.Int63n(2*r) - r ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7f7599b303..2859b87755 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,10 +27,6 @@ import ( "fmt" "io" "net" -<<<<<<< HEAD -======= - "strings" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync" "sync/atomic" "time" @@ -42,10 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" -<<<<<<< HEAD -======= - "google.golang.org/grpc/resolver" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -139,11 +131,7 @@ type recvBufferReader struct { err error } -<<<<<<< HEAD func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { -======= -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if r.err != nil { return 0, r.err } @@ -152,15 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { -<<<<<<< HEAD n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) -======= - n, r.err = r.readHeaderClient(header) - } else { - n, r.err = r.readHeader(header) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return n, r.err } @@ -190,20 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -<<<<<<< HEAD func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { -======= -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): -<<<<<<< HEAD return r.readMessageHeaderAdditional(m, header) -======= - return r.readHeaderAdditional(m, header) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -216,11 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -<<<<<<< HEAD func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { -======= -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -241,15 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() -<<<<<<< HEAD return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): return r.readMessageHeaderAdditional(m, header) -======= - return r.readHeaderAdditional(m, header) - case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } @@ -280,11 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -<<<<<<< HEAD func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { -======= -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) r.recv.load() if m.err != nil { if m.buffer != nil { @@ -326,19 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 -<<<<<<< HEAD ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream -======= - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) recvCompress string sendCompress string buf *recvBuffer @@ -346,17 +295,10 @@ type Stream struct { fc *inFlow wq *writeQuota -<<<<<<< HEAD -======= - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) -<<<<<<< HEAD state streamState // contentSubtype is the content-subtype for requests. @@ -364,53 +306,6 @@ type Stream struct { contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. -======= - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - - state streamState - - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - - // contentSubtype is the content-subtype for requests. - // this must be lowercase or the behavior is undefined. - contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} - -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (s *Stream) swapState(st streamState) streamState { @@ -425,119 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -<<<<<<< HEAD // Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { return s.trailer.Copy() -======= -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - -// Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. -// It can be safely read only after stream has ended that is either read -// or write have returned io.EOF. -func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Context returns the context of the stream. @@ -545,80 +333,15 @@ func (s *Stream) Context() context.Context { return s.ctx } -<<<<<<< HEAD -======= -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -<<<<<<< HEAD -======= -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (s *Stream) write(m recvMsg) { s.buf.put(m) } -<<<<<<< HEAD // ReadMessageHeader reads data into the provided header slice from the stream. // It first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of @@ -628,29 +351,13 @@ func (s *Stream) write(m recvMsg) { // unexpected end of the stream. The method returns any error encountered during // the read process or nil if the header was successfully read. func (s *Stream) ReadMessageHeader(header []byte) (err error) { -======= -// ReadHeader reads data into the provided header slice from the stream. It -// first checks if there was an error during a previous read operation and -// returns it if present. It then requests a read operation for the length of -// the header. It continues to read from the stream until the entire header -// slice is filled or an error occurs. If an `io.EOF` error is encountered -// with partially read data, it is converted to `io.ErrUnexpectedEOF` to -// indicate an unexpected end of the stream. The method returns any error -// encountered during the read process or nil if the header was successfully -// read. -func (s *Stream) ReadHeader(header []byte) (err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { -<<<<<<< HEAD n, err := s.trReader.ReadMessageHeader(header) -======= - n, err := s.trReader.ReadHeader(header) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) header = header[n:] if len(header) == 0 { err = nil @@ -666,11 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -<<<<<<< HEAD func (s *Stream) read(n int) (data mem.BufferSlice, err error) { -======= -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -710,13 +413,8 @@ type transportReader struct { er error } -<<<<<<< HEAD func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { n, err := t.reader.ReadMessageHeader(header) -======= -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { t.er = err return 0, err @@ -735,20 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -<<<<<<< HEAD -======= -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -824,21 +508,9 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -<<<<<<< HEAD // WriteOptions provides additional hints and information for message // transmission. type WriteOptions struct { -======= -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message -// transmission. -type Options struct { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Last indicates whether this write is the last piece for // this stream. Last bool @@ -887,23 +559,8 @@ type ClientTransport interface { // It does not block. GracefulClose() -<<<<<<< HEAD // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) -======= - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -923,15 +580,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr -<<<<<<< HEAD -======= - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ServerTransport is the common interface for all gRPC server-side transport @@ -941,23 +589,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. -<<<<<<< HEAD HandleStreams(context.Context, func(*ServerStream)) -======= - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -969,7 +601,6 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) -<<<<<<< HEAD } type internalServerTransport interface { @@ -978,14 +609,6 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() -======= - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/internal/wrr/random.go b/vendor/google.golang.org/grpc/internal/wrr/random.go index f0c4e060c6..0913ed6764 100644 --- a/vendor/google.golang.org/grpc/internal/wrr/random.go +++ b/vendor/google.golang.org/grpc/internal/wrr/random.go @@ -19,11 +19,7 @@ package wrr import ( "fmt" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sort" ) @@ -50,31 +46,19 @@ func NewRandom() WRR { return &randomWRR{} } -<<<<<<< HEAD var randInt64n = rand.Int64N -======= -var randInt63n = rand.Int63n ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (rw *randomWRR) Next() (item any) { if len(rw.items) == 0 { return nil } if rw.equalWeights { -<<<<<<< HEAD return rw.items[randInt64n(int64(len(rw.items)))].item -======= - return rw.items[randInt63n(int64(len(rw.items)))].item ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight // Random number in [0, sumOfWeights). -<<<<<<< HEAD randomWeight := randInt64n(sumOfWeights) -======= - randomWeight := randInt63n(sumOfWeights) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Item's accumulated weights are in ascending order, because item's weight >= 0. // Binary search rw.items to find first item whose accumulatedWeight > randomWeight // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go index 1c288cec99..780257ec33 100644 --- a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go @@ -36,19 +36,11 @@ type HeaderMatcher interface { String() string } -<<<<<<< HEAD // valueFromMD retrieves metadata from context. If there are // multiple values, the values are concatenated with "," (comma and no space). // // All header matchers only match against the comma-concatenated string. func valueFromMD(md metadata.MD, key string) (string, bool) { -======= -// mdValuesFromOutgoingCtx retrieves metadata from context. If there are -// multiple values, the values are concatenated with "," (comma and no space). -// -// All header matchers only match against the comma-concatenated string. -func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) vs, ok := md[key] if !ok { return "", false @@ -71,11 +63,7 @@ func NewHeaderExactMatcher(key, exact string, invert bool) *HeaderExactMatcher { // Match returns whether the passed in HTTP Headers match according to the // HeaderExactMatcher. func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hem.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hem.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -102,11 +90,7 @@ func NewHeaderRegexMatcher(key string, re *regexp.Regexp, invert bool) *HeaderRe // Match returns whether the passed in HTTP Headers match according to the // HeaderRegexMatcher. func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hrm.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -133,11 +117,7 @@ func NewHeaderRangeMatcher(key string, start, end int64, invert bool) *HeaderRan // Match returns whether the passed in HTTP Headers match according to the // HeaderRangeMatcher. func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hrm.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -169,11 +149,7 @@ func NewHeaderPresentMatcher(key string, present bool, invert bool) *HeaderPrese // Match returns whether the passed in HTTP Headers match according to the // HeaderPresentMatcher. func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD vs, ok := valueFromMD(md, hpm.key) -======= - vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) present := ok && len(vs) > 0 // TODO: Are we sure we need this len(vs) > 0? return present == hpm.present } @@ -198,11 +174,7 @@ func NewHeaderPrefixMatcher(key string, prefix string, invert bool) *HeaderPrefi // Match returns whether the passed in HTTP Headers match according to the // HeaderPrefixMatcher. func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hpm.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hpm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -229,11 +201,7 @@ func NewHeaderSuffixMatcher(key string, suffix string, invert bool) *HeaderSuffi // Match returns whether the passed in HTTP Headers match according to the // HeaderSuffixMatcher. func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hsm.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hsm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -263,11 +231,7 @@ func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderC // Match returns whether the passed in HTTP Headers match according to the // HeaderContainsMatcher. func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hcm.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hcm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } @@ -298,11 +262,7 @@ func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderSt // Match returns whether the passed in HTTP Headers match according to the // specified StringMatcher. func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { -<<<<<<< HEAD v, ok := valueFromMD(md, hsm.key) -======= - v, ok := mdValuesFromOutgoingCtx(md, hsm.key) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok { return false } diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index 1416a5ff7d..07bb59cee5 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -20,10 +20,7 @@ import ( "errors" "fmt" "net" -<<<<<<< HEAD "net/netip" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "regexp" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -348,12 +345,8 @@ func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) } func (sim *remoteIPMatcher) match(data *rpcData) bool { -<<<<<<< HEAD ip, _ := netip.ParseAddr(data.peerInfo.Addr.String()) return sim.ipNet.Contains(net.IP(ip.AsSlice())) -======= - return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } type localIPMatcher struct { @@ -370,12 +363,8 @@ func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { } func (dim *localIPMatcher) match(data *rpcData) bool { -<<<<<<< HEAD ip, _ := netip.ParseAddr(data.localAddr.String()) return dim.ipNet.Contains(net.IP(ip.AsSlice())) -======= - return dim.ipNet.Contains(net.IP(net.ParseIP(data.localAddr.String()))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // portMatcher matches on whether the destination port of the RPC matches the diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go index fb33537791..8f376c4003 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -219,12 +219,9 @@ func newRPCData(ctx context.Context) (*rpcData, error) { if !ok { return nil, errors.New("missing method in incoming context") } -<<<<<<< HEAD // gRPC-Go strips :path from the headers given to the application, but RBAC should be // able to match against it. md[":path"] = []string{mn} -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The connection is needed in order to find the destination address and // port of the incoming RPC Call. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index ab92111273..65002e2cc8 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,14 +22,11 @@ import ( "io" ) -<<<<<<< HEAD const ( // 32 KiB is what io.Copy uses. readAllBufSize = 32 * 1024 ) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -227,7 +224,6 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -<<<<<<< HEAD // buffer into a new Buffer pulled from the given pool and the Buffer is // added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { @@ -283,10 +279,3 @@ nextBuffer: } } } -======= -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. -func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { - return &writer{buffers: buffers, pool: pool} -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 02de738099..ee0ff969af 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,11 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() -<<<<<<< HEAD p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} -======= - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -80,11 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() -<<<<<<< HEAD compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} -======= - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index ecede0885b..8eb1cf3bcf 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,10 +22,7 @@ package resolver import ( "context" -<<<<<<< HEAD "errors" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "fmt" "net" "net/url" @@ -241,13 +238,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an -<<<<<<< HEAD // error. The ClientConn then forwards this error to the load balancing // policy. -======= - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -339,7 +331,6 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } -<<<<<<< HEAD // ValidateEndpoints validates endpoints from a petiole policy's perspective. // Petiole policies should call this before calling into their children. See @@ -357,5 +348,3 @@ func ValidateEndpoints(endpoints []Endpoint) error { } return errors.New("endpoints list contains no addresses") } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 8701787699..9fac2b08b4 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -622,11 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { -<<<<<<< HEAD ReadMessageHeader(header []byte) error -======= - ReadHeader(header []byte) error ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Read(n int) (mem.BufferSlice, error) } @@ -660,11 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { -<<<<<<< HEAD err := p.r.ReadMessageHeader(p.header[:]) -======= - err := p.r.ReadHeader(p.header[:]) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return 0, nil, err } @@ -672,12 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) -<<<<<<< HEAD -======= - if length == 0 { - return pf, nil, nil - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -828,11 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -<<<<<<< HEAD func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, -======= -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -856,11 +838,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { -<<<<<<< HEAD out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} -======= - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } size = len(uncompressedBuf) } else { @@ -896,34 +874,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } -<<<<<<< HEAD out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) -======= - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} - - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { out.Free() return nil, 0, err @@ -931,7 +882,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } -<<<<<<< HEAD type recvCompressor interface { RecvCompress() string } @@ -940,12 +890,6 @@ type recvCompressor interface { // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { -======= -// For the two compressor parameters, both should not be set, but if they are, -// dc takes precedence over compressor. -// TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index d56822a7b5..9d5b2884d1 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -87,21 +87,13 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -<<<<<<< HEAD // MethodHandler is a function type that processes a unary RPC method call. type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) -======= -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string -<<<<<<< HEAD Handler MethodHandler -======= - Handler methodHandler ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // ServiceDesc represents an RPC service's specification. @@ -630,13 +622,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -<<<<<<< HEAD // serverWorker blocks on a *transport.ServerStream channel forever and waits // for data to be fed by serveStreams. This allows multiple requests to be -======= -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1034,11 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) -<<<<<<< HEAD st.HandleStreams(ctx, func(stream *transport.ServerStream) { -======= - st.HandleStreams(ctx, func(stream *transport.Stream) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1154,11 +1137,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -<<<<<<< HEAD func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { -======= -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1187,11 +1166,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } -<<<<<<< HEAD err = stream.Write(hdr, payload, opts) -======= - err = t.Write(stream, hdr, payload, opts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1238,11 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -<<<<<<< HEAD func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { -======= -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1350,11 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) -<<<<<<< HEAD stream.WriteStatus(st) -======= - t.WriteStatus(stream, st) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return st.Err() } } @@ -1388,16 +1355,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { -<<<<<<< HEAD if e := stream.WriteStatus(status.Convert(err)); e != nil { -======= - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } -<<<<<<< HEAD freed := false dataFree := func() { if !freed { @@ -1408,13 +1370,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor defer dataFree() df := func(v any) error { defer dataFree() -======= - defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() - } - df := func(v any) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1455,11 +1410,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } -<<<<<<< HEAD if e := stream.WriteStatus(appStatus); e != nil { -======= - if e := t.WriteStatus(stream, appStatus); e != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1486,32 +1437,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } -<<<<<<< HEAD opts := &transport.WriteOptions{Last: true} -======= - opts := &transport.Options{Last: true} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } -<<<<<<< HEAD if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { -======= - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { -<<<<<<< HEAD if e := stream.WriteStatus(sts); e != nil { -======= - if e := t.WriteStatus(stream, sts); e != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1551,12 +1490,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } -<<<<<<< HEAD -======= - if channelz.IsOn() { - t.IncrMsgSent() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1572,11 +1505,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } -<<<<<<< HEAD return stream.WriteStatus(statusOK) -======= - return t.WriteStatus(stream, statusOK) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1615,11 +1544,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -<<<<<<< HEAD func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { -======= -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if channelz.IsOn() { s.incrCallsStarted() } @@ -1639,10 +1564,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, -<<<<<<< HEAD -======= - t: t, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1729,11 +1650,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) -<<<<<<< HEAD ss.s.WriteStatus(st) -======= - t.WriteStatus(ss.s, st) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return st.Err() } } @@ -1802,11 +1719,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } -<<<<<<< HEAD ss.s.WriteStatus(appStatus) -======= - t.WriteStatus(ss.s, appStatus) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1824,17 +1737,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } -<<<<<<< HEAD return ss.s.WriteStatus(statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { -======= - return t.WriteStatus(ss.s, statusOK) -} - -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1864,11 +1770,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) -<<<<<<< HEAD if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { -======= - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1883,7 +1785,6 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] -<<<<<<< HEAD // FromIncomingContext is expensive: skip if there are no statsHandlers if len(s.opts.statsHandlers) > 0 { md, _ := metadata.FromIncomingContext(ctx) @@ -1898,19 +1799,6 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str Header: md, }) } -======= - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1919,29 +1807,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { -<<<<<<< HEAD s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { s.processStreamingRPC(ctx, stream, srv, sd, ti) -======= - s.processUnaryRPC(ctx, t, stream, srv, md, ti) - return - } - if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { -<<<<<<< HEAD s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) -======= - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return } var errDesc string @@ -1954,11 +1830,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } -<<<<<<< HEAD if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { -======= - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2233,11 +2105,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { -<<<<<<< HEAD stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) -======= - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2259,11 +2127,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { -<<<<<<< HEAD stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) -======= - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 7b6ff9c85c..8d451e07c7 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,10 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -271,7 +268,6 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -<<<<<<< HEAD func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { return jrp.MaxAttempts > 1 && jrp.InitialBackoff > 0 && @@ -280,25 +276,13 @@ func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { len(jrp.RetryableStatusCodes) > 0 } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } -<<<<<<< HEAD if !isValidRetryPolicy(jrp) { return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) -======= - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if jrp.MaxAttempts < maxAttempts { @@ -317,11 +301,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -<<<<<<< HEAD func minPointers(a, b *int) *int { -======= -func min(a, b *int) *int { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if *a < *b { return a } @@ -333,11 +313,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { -<<<<<<< HEAD return minPointers(mcMax, doptMax) -======= - return min(mcMax, doptMax) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go index 7c58f534e0..265791e5a2 100644 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go @@ -260,7 +260,6 @@ func (h *clientStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, } const ( -<<<<<<< HEAD // ClientAttemptStartedMetricName is the number of client call attempts // started. ClientAttemptStartedMetricName string = "grpc.client.attempt.started" @@ -276,20 +275,4 @@ const ( // ClientCallDurationMetricName is the time taken by gRPC to complete an RPC // from application's perspective. ClientCallDurationMetricName string = "grpc.client.call.duration" -======= - // ClientAttemptStarted is the number of client call attempts started. - ClientAttemptStarted estats.Metric = "grpc.client.attempt.started" - // ClientAttemptDuration is the end-to-end time taken to complete a client - // call attempt. - ClientAttemptDuration estats.Metric = "grpc.client.attempt.duration" - // ClientAttemptSentCompressedTotalMessageSize is the compressed message - // bytes sent per client call attempt. - ClientAttemptSentCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.sent_total_compressed_message_size" - // ClientAttemptRcvdCompressedTotalMessageSize is the compressed message - // bytes received per call attempt. - ClientAttemptRcvdCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.rcvd_total_compressed_message_size" - // ClientCallDuration is the time taken by gRPC to complete an RPC from - // application's perspective. - ClientCallDuration estats.Metric = "grpc.client.call.duration" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go index c78c1f958a..dcc424775f 100644 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go @@ -16,13 +16,10 @@ // Package opentelemetry implements opentelemetry instrumentation code for // gRPC-Go clients and servers. -<<<<<<< HEAD // // For details on configuring opentelemetry and various instruments that this // package creates, see // [gRPC OpenTelemetry Metrics](https://grpc.io/docs/guides/opentelemetry-metrics/). -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) package opentelemetry import ( @@ -35,10 +32,7 @@ import ( estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" -<<<<<<< HEAD "google.golang.org/grpc/stats" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) otelinternal "google.golang.org/grpc/stats/opentelemetry/internal" otelattribute "go.opentelemetry.io/otel/attribute" @@ -68,20 +62,13 @@ type Options struct { type MetricsOptions struct { // MeterProvider is the MeterProvider instance that will be used to create // instruments. To enable metrics collection, set a meter provider. If -<<<<<<< HEAD // unset, no metrics will be recorded. -======= - // unset, no metrics will be recorded. Any implementation knobs (i.e. views, - // bounds) set in the MeterProvider take precedence over the API calls from - // this interface. (i.e. it will create default views for unset views). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) MeterProvider otelmetric.MeterProvider // Metrics are the metrics to instrument. Will create instrument and record telemetry // for corresponding metric supported by the client and server // instrumentation components if applicable. If not set, the default metrics // will be recorded. -<<<<<<< HEAD Metrics *stats.MetricSet // MethodAttributeFilter is a function that determines whether to record the @@ -97,20 +84,6 @@ type MetricsOptions struct { // OptionalLabels specifies a list of optional labels to enable on any // metrics that support them. -======= - Metrics *estats.Metrics - - // MethodAttributeFilter is to record the method name of RPCs handled by - // grpc.UnknownServiceHandler, but take care to limit the values allowed, as - // allowing too many will increase cardinality and could cause severe memory - // or performance problems. On Client Side, pass a - // grpc.StaticMethodCallOption as a call option into Invoke or NewStream. - // This only applies for server side metrics. - MethodAttributeFilter func(string) bool - - // OptionalLabels are labels received from LB Policies that this component - // should add to metrics that record after receiving incoming metadata. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) OptionalLabels []string // pluginOption is used to get labels to attach to certain metrics, if set. @@ -240,11 +213,7 @@ type serverMetrics struct { callDuration otelmetric.Float64Histogram } -<<<<<<< HEAD func createInt64Counter(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter { -======= -func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Int64Counter{} } @@ -256,11 +225,7 @@ func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.M return ret } -<<<<<<< HEAD func createFloat64Counter(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter { -======= -func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Float64Counter{} } @@ -272,11 +237,7 @@ func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats return ret } -<<<<<<< HEAD func createInt64Histogram(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram { -======= -func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Int64Histogram{} } @@ -288,11 +249,7 @@ func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats return ret } -<<<<<<< HEAD func createFloat64Histogram(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram { -======= -func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Float64Histogram{} } @@ -304,11 +261,7 @@ func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName esta return ret } -<<<<<<< HEAD func createInt64Gauge(setOfMetrics map[string]bool, metricName string, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge { -======= -func createInt64Gauge(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if _, ok := setOfMetrics[metricName]; !ok { return noop.Int64Gauge{} } @@ -351,11 +304,7 @@ type registryMetrics struct { optionalLabels []string } -<<<<<<< HEAD func (rm *registryMetrics) registerMetrics(metrics *stats.MetricSet, meter otelmetric.Meter) { -======= -func (rm *registryMetrics) registerMetrics(metrics *estats.Metrics, meter otelmetric.Meter) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rm.intCounts = make(map[*estats.MetricDescriptor]otelmetric.Int64Counter) rm.floatCounts = make(map[*estats.MetricDescriptor]otelmetric.Float64Counter) rm.intHistos = make(map[*estats.MetricDescriptor]otelmetric.Int64Histogram) @@ -436,20 +385,12 @@ var ( // DefaultSizeBounds are the default bounds for metrics which record size. DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} // defaultPerCallMetrics are the default metrics provided by this module. -<<<<<<< HEAD defaultPerCallMetrics = stats.NewMetricSet(ClientAttemptStartedMetricName, ClientAttemptDurationMetricName, ClientAttemptSentCompressedTotalMessageSizeMetricName, ClientAttemptRcvdCompressedTotalMessageSizeMetricName, ClientCallDurationMetricName, ServerCallStartedMetricName, ServerCallSentCompressedTotalMessageSizeMetricName, ServerCallRcvdCompressedTotalMessageSizeMetricName, ServerCallDurationMetricName) -======= - defaultPerCallMetrics = estats.NewMetrics(ClientAttemptStarted, ClientAttemptDuration, ClientAttemptSentCompressedTotalMessageSize, ClientAttemptRcvdCompressedTotalMessageSize, ClientCallDuration, ServerCallStarted, ServerCallSentCompressedTotalMessageSize, ServerCallRcvdCompressedTotalMessageSize, ServerCallDuration) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) // DefaultMetrics returns a set of default OpenTelemetry metrics. // // This should only be invoked after init time. -<<<<<<< HEAD func DefaultMetrics() *stats.MetricSet { -======= -func DefaultMetrics() *estats.Metrics { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return defaultPerCallMetrics.Join(estats.DefaultMetrics) } diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go index 05e3958101..4765afa8ed 100644 --- a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go +++ b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go @@ -264,7 +264,6 @@ func (h *serverStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, } const ( -<<<<<<< HEAD // ServerCallStartedMetricName is the number of server calls started. ServerCallStartedMetricName string = "grpc.server.call.started" // ServerCallSentCompressedTotalMessageSizeMetricName is the compressed @@ -276,17 +275,4 @@ const ( // ServerCallDurationMetricName is the end-to-end time taken to complete a // call from server transport's perspective. ServerCallDurationMetricName string = "grpc.server.call.duration" -======= - // ServerCallStarted is the number of server calls started. - ServerCallStarted estats.Metric = "grpc.server.call.started" - // ServerCallSentCompressedTotalMessageSize is the compressed message bytes - // sent per server call. - ServerCallSentCompressedTotalMessageSize estats.Metric = "grpc.server.call.sent_total_compressed_message_size" - // ServerCallRcvdCompressedTotalMessageSize is the compressed message bytes - // received per server call. - ServerCallRcvdCompressedTotalMessageSize estats.Metric = "grpc.server.call.rcvd_total_compressed_message_size" - // ServerCallDuration is the end-to-end time taken to complete a call from - // server transport's perspective. - ServerCallDuration estats.Metric = "grpc.server.call.duration" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index f5ee6595c5..6f20d2d548 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,33 +260,17 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -<<<<<<< HEAD -======= -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -<<<<<<< HEAD // Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) -======= -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Tags returns the tags from the context for the inbound RPC. // -<<<<<<< HEAD // Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") @@ -296,57 +280,17 @@ func Tags(ctx context.Context) []byte { return []byte(traceValues[len(traceValues)-1]) } -======= -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b -} - -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -<<<<<<< HEAD // Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) -======= -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Trace returns the trace from the context for the inbound RPC. // -<<<<<<< HEAD // Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") @@ -354,27 +298,4 @@ func Trace(ctx context.Context) []byte { return nil } return []byte(traceValues[len(traceValues)-1]) -======= -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 0bdadc3e1c..54adbbced7 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,11 +23,7 @@ import ( "errors" "io" "math" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "sync" "time" @@ -117,13 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of -<<<<<<< HEAD // the stream may be discovered using RecvMsg. For unary or server-streaming // RPCs (StreamDesc.ClientStreams is false), a nil error is returned // unconditionally. -======= - // the stream may be discovered using RecvMsg. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -226,11 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() -<<<<<<< HEAD newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { -======= - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -598,11 +586,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport -<<<<<<< HEAD s *transport.ClientStream -======= - s *transport.Stream ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p *parser pickResult balancer.PickResult @@ -724,18 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) -<<<<<<< HEAD cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) // Apply jitter by multiplying with a random factor between 0.8 and 1.2 cur *= 0.8 + 0.4*rand.Float64() dur = time.Duration(int64(cur)) -======= - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cs.numRetriesSincePushback++ } @@ -1016,11 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { -<<<<<<< HEAD a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) -======= - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1112,11 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } -<<<<<<< HEAD if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { -======= - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1130,12 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } -<<<<<<< HEAD -======= - if channelz.IsOn() { - a.t.IncrMsgSent() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -1189,12 +1151,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } -<<<<<<< HEAD -======= - if channelz.IsOn() { - a.t.IncrMsgRecv() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1222,11 +1178,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { -<<<<<<< HEAD a.s.Close(err) -======= - a.t.CloseStream(a.s, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) tr = a.s.Trailer() } @@ -1383,11 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { -<<<<<<< HEAD s *transport.ClientStream -======= - s *transport.Stream ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1427,11 +1375,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true -<<<<<<< HEAD as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) -======= - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1481,11 +1425,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } -<<<<<<< HEAD if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { -======= - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1495,12 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } -<<<<<<< HEAD -======= - if channelz.IsOn() { - as.t.IncrMsgSent() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil } @@ -1538,12 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } -<<<<<<< HEAD -======= - if channelz.IsOn() { - as.t.IncrMsgRecv() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1571,11 +1499,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { -<<<<<<< HEAD as.s.Close(err) -======= - as.t.CloseStream(as.s, err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err != nil { @@ -1642,12 +1566,7 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context -<<<<<<< HEAD s *transport.ServerStream -======= - t transport.ServerTransport - s *transport.Stream ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) p *parser codec baseCodec @@ -1697,11 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } -<<<<<<< HEAD err = ss.s.SendHeader(md) -======= - err = ss.t.WriteHeader(ss.s, md) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1741,11 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) -<<<<<<< HEAD ss.s.WriteStatus(st) -======= - ss.t.WriteStatus(ss.s, st) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1753,12 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } -<<<<<<< HEAD -======= - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1790,11 +1695,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } -<<<<<<< HEAD if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { -======= - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return toRPCErr(err) } @@ -1840,11 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) -<<<<<<< HEAD ss.s.WriteStatus(st) -======= - ss.t.WriteStatus(ss.s, st) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1852,12 +1749,6 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } -<<<<<<< HEAD -======= - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { @@ -1875,11 +1766,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { -<<<<<<< HEAD err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) -======= - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return toRPCErr(err) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1af9328caf..0e03fa4d4f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,8 +19,4 @@ package grpc // Version is the current grpc version. -<<<<<<< HEAD const Version = "1.70.0" -======= -const Version = "1.68.1" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 5f249cc0e3..0f9cb52b7e 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -28,14 +28,9 @@ package googledirectpath import ( "encoding/json" "fmt" -<<<<<<< HEAD rand "math/rand/v2" "net/url" "sync" -======= - "math/rand" - "net/url" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "time" "google.golang.org/grpc/grpclog" @@ -52,11 +47,7 @@ const ( c2pScheme = "google-c2p" c2pAuthority = "traffic-director-c2p.xds.googleapis.com" -<<<<<<< HEAD defaultUniverseDomain = "googleapis.com" -======= - tdURL = "dns:///directpath-pa.googleapis.com" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" @@ -66,7 +57,6 @@ const ( dnsName, xdsName = "dns", "xds" ) -<<<<<<< HEAD var ( logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) universeDomainMu sync.Mutex @@ -74,20 +64,12 @@ var ( // For overriding in unittests. onGCE = googlecloud.OnGCE randInt = rand.Int -======= -// For overriding in unittests. -var ( - onGCE = googlecloud.OnGCE - randInt = rand.Int - logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { resolver.Register(c2pResolverBuilder{}) } -<<<<<<< HEAD // SetUniverseDomain informs the gRPC library of the universe domain // in which the process is running (for example, "googleapis.com"). // It is the caller's responsibility to ensure that the domain is correct. @@ -135,8 +117,6 @@ func getXdsServerURI() string { return fmt.Sprintf("dns:///directpath-pa.%s", universeDomain) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type c2pResolverBuilder struct{} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { @@ -160,15 +140,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts go func() { zoneCh <- getZone(httpReqTimeout) }() go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() -<<<<<<< HEAD xdsServerURI := getXdsServerURI() -======= - xdsServerURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI - if xdsServerURI == "" { - xdsServerURI = tdURL - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) nodeCfg := newNodeConfig(<-zoneCh, <-ipv6CapableCh) xdsServerCfg := newXdsServerConfig(xdsServerURI) authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index 5790ba13ae..71a4c9c9da 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -24,10 +24,6 @@ package clusterimpl import ( -<<<<<<< HEAD -======= - "context" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "encoding/json" "fmt" "sync" @@ -38,10 +34,6 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/grpclog" -<<<<<<< HEAD -======= - "google.golang.org/grpc/internal/grpcsync" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/bootstrap" @@ -60,16 +52,11 @@ const ( ) var ( -<<<<<<< HEAD connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) // Below function is no-op in actual code, but can be overridden in // tests to give tests visibility into exactly when certain events happen. clientConnUpdateHook = func() {} pickerUpdateHook = func() {} -======= - connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) - errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) func init() { @@ -79,21 +66,10 @@ func init() { type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { -<<<<<<< HEAD b := &clusterImplBalancer{ ClientConn: cc, loadWrapper: loadstore.NewWrapper(), requestCountMax: defaultRequestCountMax, -======= - ctx, cancel := context.WithCancel(context.Background()) - b := &clusterImplBalancer{ - ClientConn: cc, - bOpts: bOpts, - loadWrapper: loadstore.NewWrapper(), - requestCountMax: defaultRequestCountMax, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.logger = prefixLogger(b) b.child = gracefulswitch.NewBalancer(b, bOpts) @@ -112,7 +88,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err type clusterImplBalancer struct { balancer.ClientConn -<<<<<<< HEAD // The following fields are set at creation time, and are read-only after // that, and therefore need not be protected by a mutex. logger *grpclog.PrefixLogger @@ -183,35 +158,6 @@ func (b *clusterImplBalancer) newPickerLocked() *picker { countMax: b.requestCountMax, telemetryLabels: b.telemetryLabels, } -======= - bOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - xdsClient xdsclient.XDSClient - - config *LBConfig - child *gracefulswitch.Balancer - cancelLoadReport func() - edsServiceName string - lrsServer *bootstrap.ServerConfig - loadWrapper *loadstore.Wrapper - - clusterNameMu sync.Mutex - clusterName string - - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - // childState/drops/requestCounter keeps the state used by the most recently - // generated picker. - childState balancer.State - dropCategories []DropConfig // The categories for drops. - drops []*dropper - requestCounterCluster string // The cluster name for the request counter. - requestCounterService string // The service name for the request counter. - requestCounter *xdsclient.ClusterRequestsCounter - requestCountMax uint32 - telemetryLabels map[string]string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // updateLoadStore checks the config for load store, and decides whether it @@ -292,16 +238,12 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { return nil } -<<<<<<< HEAD func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { defer clientConnUpdateHook() b.mu.Lock() b.inhibitPickerUpdates = true b.mu.Unlock() -======= -func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if b.logger.V(2) { b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) } @@ -344,7 +286,6 @@ func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) return err } -<<<<<<< HEAD // Addresses and sub-balancer config are sent to sub-balancer. err = b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, @@ -375,45 +316,6 @@ func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) func (b *clusterImplBalancer) ResolverError(err error) { b.child.ResolverError(err) -======= - b.config = newConfig - - b.telemetryLabels = newConfig.TelemetryLabels - dc := b.handleDropAndRequestCount(newConfig) - if dc != nil && b.childState.Picker != nil { - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(dc), - }) - } - - // Addresses and sub-balancer config are sent to sub-balancer. - return b.child.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: s.ResolverState, - BalancerConfig: parsedCfg, - }) -} - -func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - // Handle the update in a blocking fashion. - errCh := make(chan error, 1) - callback := func(context.Context) { - errCh <- b.updateClientConnState(s) - } - onFailure := func() { - // An attempt to schedule callback fails only when an update is received - // after Close(). - errCh <- errBalancerClosed - } - b.serializer.ScheduleOr(callback, onFailure) - return <-errCh -} - -func (b *clusterImplBalancer) ResolverError(err error) { - b.serializer.TrySchedule(func(context.Context) { - b.child.ResolverError(err) - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { @@ -438,7 +340,6 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer } func (b *clusterImplBalancer) Close() { -<<<<<<< HEAD b.child.Close() b.childState = balancer.State{} @@ -451,32 +352,11 @@ func (b *clusterImplBalancer) Close() { func (b *clusterImplBalancer) ExitIdle() { b.child.ExitIdle() -======= - b.serializer.TrySchedule(func(_ context.Context) { - b.child.Close() - b.childState = balancer.State{} - - if b.cancelLoadReport != nil { - b.cancelLoadReport() - b.cancelLoadReport = nil - } - b.logger.Infof("Shutdown") - }) - b.serializerCancel() - <-b.serializer.Done() -} - -func (b *clusterImplBalancer) ExitIdle() { - b.serializer.TrySchedule(func(context.Context) { - b.child.ExitIdle() - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // Override methods to accept updates from the child LB. func (b *clusterImplBalancer) UpdateState(state balancer.State) { -<<<<<<< HEAD b.mu.Lock() defer b.mu.Unlock() @@ -502,35 +382,12 @@ func (b *clusterImplBalancer) UpdateState(state balancer.State) { func (b *clusterImplBalancer) setClusterName(n string) { b.mu.Lock() defer b.mu.Unlock() -======= - b.serializer.TrySchedule(func(context.Context) { - b.childState = state - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(&dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - }), - }) - }) -} - -func (b *clusterImplBalancer) setClusterName(n string) { - b.clusterNameMu.Lock() - defer b.clusterNameMu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.clusterName = n } func (b *clusterImplBalancer) getClusterName() string { -<<<<<<< HEAD b.mu.Lock() defer b.mu.Unlock() -======= - b.clusterNameMu.Lock() - defer b.clusterNameMu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return b.clusterName } @@ -571,7 +428,6 @@ func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer scw := &scWrapper{} oldListener := opts.StateListener opts.StateListener = func(state balancer.SubConnState) { -<<<<<<< HEAD b.updateSubConnState(sc, state, oldListener) if state.ConnectivityState != connectivity.Ready { return @@ -587,25 +443,6 @@ func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer return } scw.updateLocalityID(lID) -======= - b.serializer.TrySchedule(func(context.Context) { - b.updateSubConnState(sc, state, oldListener) - if state.ConnectivityState != connectivity.Ready { - return - } - // Read connected address and call updateLocalityID() based on the connected - // address's locality. https://github.com/grpc/grpc-go/issues/7339 - addr := connectedAddress(state) - lID := xdsinternal.GetLocalityID(addr) - if lID.Empty() { - if b.logger.V(2) { - b.logger.Infof("Locality ID for %s unexpectedly empty", addr) - } - return - } - scw.updateLocalityID(lID) - }) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { @@ -635,56 +472,3 @@ func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resol } b.ClientConn.UpdateAddresses(sc, newAddrs) } -<<<<<<< HEAD -======= - -type dropConfigs struct { - drops []*dropper - requestCounter *xdsclient.ClusterRequestsCounter - requestCountMax uint32 -} - -// handleDropAndRequestCount compares drop and request counter in newConfig with -// the one currently used by picker. It returns a new dropConfigs if a new -// picker needs to be generated, otherwise it returns nil. -func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { - // Compare new drop config. And update picker if it's changed. - var updatePicker bool - if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { - b.dropCategories = newConfig.DropCategories - b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) - for _, c := range newConfig.DropCategories { - b.drops = append(b.drops, newDropper(c)) - } - updatePicker = true - } - - // Compare cluster name. And update picker if it's changed, because circuit - // breaking's stream counter will be different. - if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { - b.requestCounterCluster = newConfig.Cluster - b.requestCounterService = newConfig.EDSServiceName - b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) - updatePicker = true - } - // Compare upper bound of stream count. And update picker if it's changed. - // This is also for circuit breaking. - var newRequestCountMax uint32 = 1024 - if newConfig.MaxConcurrentRequests != nil { - newRequestCountMax = *newConfig.MaxConcurrentRequests - } - if b.requestCountMax != newRequestCountMax { - b.requestCountMax = newRequestCountMax - updatePicker = true - } - - if !updatePicker { - return nil - } - return &dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - } -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index 05c8711d1b..cd94182fa7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -87,20 +87,6 @@ type picker struct { telemetryLabels map[string]string } -<<<<<<< HEAD -======= -func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { - return &picker{ - drops: config.drops, - s: b.childState, - loadStore: b.loadWrapper, - counter: config.requestCounter, - countMax: config.requestCountMax, - telemetryLabels: b.telemetryLabels, - } -} - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func telemetryLabels(ctx context.Context) map[string]string { if ctx == nil { return nil @@ -143,11 +129,7 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if d.loadStore != nil { d.loadStore.CallDropped("") } -<<<<<<< HEAD return balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) -======= - return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go index 149b081706..24ad2399dd 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -87,10 +87,7 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er // TODO: Get rid of handling hierarchy in addresses. This LB policy never // gets addresses from the resolver. addressesSplit := hierarchy.Group(s.ResolverState.Addresses) -<<<<<<< HEAD endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Remove sub-balancers that are not in the new list from the aggregator and // balancergroup. @@ -143,10 +140,7 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er if err := b.bg.UpdateClientConnState(childName, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[childName], -<<<<<<< HEAD Endpoints: endpointsSplit[childName], -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }, @@ -175,21 +169,14 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er } func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { -<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newConfig, ok := s.BalancerConfig.(*lbConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } -<<<<<<< HEAD -======= - b.logger.Infof("Update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.stateAggregator.pauseStateUpdates() defer b.stateAggregator.resumeStateUpdates() diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index 3f8222c996..f0a8905d37 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -184,14 +184,10 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { return } -<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) } -======= - b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cfg, _ := update.state.BalancerConfig.(*LBConfig) if cfg == nil { b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) @@ -238,11 +234,7 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) } -<<<<<<< HEAD childCfgBytes, endpoints, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) -======= - childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { b.logger.Warningf("Failed to build child policy config: %v", err) return @@ -256,7 +248,6 @@ func (b *clusterResolverBalancer) updateChildConfig() { b.logger.Infof("Built child policy config: %s", pretty.ToJSON(childCfg)) } -<<<<<<< HEAD flattenedAddrs := make([]resolver.Address, len(endpoints)) for i := range endpoints { for j := range endpoints[i].Addresses { @@ -279,22 +270,11 @@ func (b *clusterResolverBalancer) updateChildConfig() { // See https://github.com/grpc/grpc-go/issues/7339 endpoints[i].Addresses[j] = addr } -======= - endpoints := make([]resolver.Endpoint, len(addrs)) - for i, a := range addrs { - endpoints[i].Attributes = a.BalancerAttributes - endpoints[i].Addresses = []resolver.Address{a} - endpoints[i].Addresses[0].BalancerAttributes = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Endpoints: endpoints, -<<<<<<< HEAD Addresses: flattenedAddrs, -======= - Addresses: addrs, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: b.configRaw, Attributes: b.attrsWithClient, }, diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go index 2aab56addf..9a3a71c2e5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -48,13 +48,8 @@ type priorityConfig struct { mechanism DiscoveryMechanism // edsResp is set only if type is EDS. edsResp xdsresource.EndpointsUpdate -<<<<<<< HEAD // endpoints is set only if type is DNS. endpoints []resolver.Endpoint -======= - // addresses is set only if type is DNS. - addresses []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Each discovery mechanism has a name generator so that the child policies // can reuse names between updates (EDS updates for example). childNameGen *nameGenerator @@ -76,13 +71,8 @@ type priorityConfig struct { // ┌──────▼─────┐ ┌─────▼──────┐ // │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) // └────────────┘ └────────────┘ -<<<<<<< HEAD func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Endpoint, error) { pc, endpoints, err := buildPriorityConfig(priorities, xdsLBPolicy) -======= -func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { - pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, fmt.Errorf("failed to build priority config: %v", err) } @@ -90,7 +80,6 @@ func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internals if err != nil { return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) } -<<<<<<< HEAD return ret, endpoints, nil } @@ -98,33 +87,16 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi var ( retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} retEndpoints []resolver.Endpoint -======= - return ret, addrs, nil -} - -func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { - var ( - retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} - retAddrs []resolver.Address ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) for _, p := range priorities { switch p.mechanism.Type { case DiscoveryMechanismTypeEDS: -<<<<<<< HEAD names, configs, endpoints, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) -======= - names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, err } retConfig.Priorities = append(retConfig.Priorities, names...) -<<<<<<< HEAD retEndpoints = append(retEndpoints, endpoints...) -======= - retAddrs = append(retAddrs, addrs...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) odCfgs := convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) for n, c := range odCfgs { retConfig.Children[n] = &priority.Child{ @@ -135,15 +107,9 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi } continue case DiscoveryMechanismTypeLogicalDNS: -<<<<<<< HEAD name, config, endpoints := buildClusterImplConfigForDNS(p.childNameGen, p.endpoints, p.mechanism) retConfig.Priorities = append(retConfig.Priorities, name) retEndpoints = append(retEndpoints, endpoints...) -======= - name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) - retConfig.Priorities = append(retConfig.Priorities, name) - retAddrs = append(retAddrs, addrs...) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) odCfg := makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) retConfig.Children[name] = &priority.Child{ Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, @@ -154,11 +120,7 @@ func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalservi continue } } -<<<<<<< HEAD return retConfig, retEndpoints, nil -======= - return retConfig, retAddrs, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { @@ -175,7 +137,6 @@ func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg out return &odCfgRet } -<<<<<<< HEAD func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoint, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Endpoint) { // Endpoint picking policy for DNS is hardcoded to pick_first. const childPolicy = "pick_first" @@ -186,25 +147,12 @@ func buildClusterImplConfigForDNS(g *nameGenerator, endpoints []resolver.Endpoin // Copy the nested address field as slice fields are shared by the // iteration variable and the original slice. retEndpoints[i].Addresses = append([]resolver.Address{}, e.Addresses...) -======= -func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { - // Endpoint picking policy for DNS is hardcoded to pick_first. - const childPolicy = "pick_first" - retAddrs := make([]resolver.Address, 0, len(addrStrs)) - pName := fmt.Sprintf("priority-%v", g.prefix) - for _, addrStr := range addrStrs { - retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } return pName, &clusterimpl.LBConfig{ Cluster: mechanism.Cluster, TelemetryLabels: mechanism.TelemetryLabels, ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, -<<<<<<< HEAD }, retEndpoints -======= - }, retAddrs ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for @@ -216,11 +164,7 @@ func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism // - map{"p0":p0_config, "p1":p1_config} // - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] // - p0 addresses' hierarchy attributes are set to p0 -<<<<<<< HEAD func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Endpoint, error) { -======= -func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) for _, d := range edsResp.Drops { drops = append(drops, clusterimpl.DropConfig{ @@ -242,30 +186,17 @@ func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.Endpoint } retNames := g.generate(priorities) retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) -<<<<<<< HEAD var retEndpoints []resolver.Endpoint for i, pName := range retNames { priorityLocalities := priorities[i] cfg, endpoints, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) -======= - var retAddrs []resolver.Address - for i, pName := range retNames { - priorityLocalities := priorities[i] - cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, nil, err } retConfigs[pName] = cfg -<<<<<<< HEAD retEndpoints = append(retEndpoints, endpoints...) } return retNames, retConfigs, retEndpoints, nil -======= - retAddrs = append(retAddrs, addrs...) - } - return retNames, retConfigs, retAddrs, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // groupLocalitiesByPriority returns the localities grouped by priority. @@ -316,13 +247,8 @@ func dedupSortedIntSlice(a []int) []int { // priority), and generates a cluster impl policy config, and a list of // addresses with their path hierarchy set to [priority-name, locality-name], so // priority and the xDS LB Policy know which child policy each address is for. -<<<<<<< HEAD func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Endpoint, error) { var retEndpoints []resolver.Endpoint -======= -func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { - var addrs []resolver.Address ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, locality := range localities { var lw uint32 = 1 if locality.Weight != 0 { @@ -339,39 +265,24 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { continue } -<<<<<<< HEAD resolverEndpoint := resolver.Endpoint{} for _, as := range endpoint.Addresses { resolverEndpoint.Addresses = append(resolverEndpoint.Addresses, resolver.Address{Addr: as}) } resolverEndpoint = hierarchy.SetInEndpoint(resolverEndpoint, []string{priorityName, localityStr}) resolverEndpoint = internal.SetLocalityIDInEndpoint(resolverEndpoint, locality.ID) -======= - addr := resolver.Address{Addr: endpoint.Address} - addr = hierarchy.Set(addr, []string{priorityName, localityStr}) - addr = internal.SetLocalityID(addr, locality.ID) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // "To provide the xds_wrr_locality load balancer information about // locality weights received from EDS, the cluster resolver will // populate a new locality weight attribute for each address The // attribute will have the weight (as an integer) of the locality // the address is part of." - A52 -<<<<<<< HEAD resolverEndpoint = wrrlocality.SetAddrInfoInEndpoint(resolverEndpoint, wrrlocality.AddrInfo{LocalityWeight: lw}) -======= - addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ew uint32 = 1 if endpoint.Weight != 0 { ew = endpoint.Weight } -<<<<<<< HEAD resolverEndpoint = weightedroundrobin.SetAddrInfoInEndpoint(resolverEndpoint, weightedroundrobin.AddrInfo{Weight: lw * ew}) retEndpoints = append(retEndpoints, resolverEndpoint) -======= - addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) - addrs = append(addrs, addr) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } return &clusterimpl.LBConfig{ @@ -382,9 +293,5 @@ func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priority TelemetryLabels: mechanism.TelemetryLabels, DropCategories: drops, ChildPolicy: xdsLBPolicy, -<<<<<<< HEAD }, retEndpoints, nil -======= - }, addrs, nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index bb8a3a2ab4..d9315c3ace 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -24,10 +24,7 @@ import ( "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" -<<<<<<< HEAD "google.golang.org/grpc/resolver" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -298,13 +295,8 @@ func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { switch uu := u.(type) { case xdsresource.EndpointsUpdate: ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) -<<<<<<< HEAD case []resolver.Endpoint: ret = append(ret, priorityConfig{mechanism: rDM.dm, endpoints: uu, childNameGen: rDM.childNameGen}) -======= - case []string: - ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } select { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index 554f1a32f7..5f7a211530 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -47,11 +47,7 @@ type dnsDiscoveryMechanism struct { logger *grpclog.PrefixLogger mu sync.Mutex -<<<<<<< HEAD endpoints []resolver.Endpoint -======= - addrs []string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) updateReceived bool } @@ -107,11 +103,7 @@ func (dr *dnsDiscoveryMechanism) lastUpdate() (any, bool) { if !dr.updateReceived { return nil, false } -<<<<<<< HEAD return dr.endpoints, true -======= - return dr.addrs, true ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (dr *dnsDiscoveryMechanism) resolveNow() { @@ -141,7 +133,6 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } dr.mu.Lock() -<<<<<<< HEAD var endpoints = state.Endpoints if len(endpoints) == 0 { endpoints = make([]resolver.Endpoint, len(state.Addresses)) @@ -151,25 +142,6 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { } } dr.endpoints = endpoints -======= - var addrs []string - if len(state.Endpoints) > 0 { - // Assume 1 address per endpoint, which is how DNS is expected to - // behave. The slice will grow as needed, however. - addrs = make([]string, 0, len(state.Endpoints)) - for _, e := range state.Endpoints { - for _, a := range e.Addresses { - addrs = append(addrs, a.Addr) - } - } - } else { - addrs = make([]string, len(state.Addresses)) - for i, a := range state.Addresses { - addrs[i] = a.Addr - } - } - dr.addrs = addrs ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dr.updateReceived = true dr.mu.Unlock() @@ -192,11 +164,7 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) { dr.mu.Unlock() return } -<<<<<<< HEAD dr.endpoints = nil -======= - dr.addrs = nil ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) dr.updateReceived = true dr.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index 159ff51192..8f58c00303 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -25,11 +25,7 @@ import ( "encoding/json" "fmt" "math" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "sync" "sync/atomic" @@ -37,10 +33,7 @@ import ( "unsafe" "google.golang.org/grpc/balancer" -<<<<<<< HEAD "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" @@ -80,11 +73,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba } b.logger = prefixLogger(b) b.logger.Infof("Created") -<<<<<<< HEAD b.child = synchronizingBalancerWrapper{lb: gracefulswitch.NewBalancer(b, bOpts)} -======= - b.child = gracefulswitch.NewBalancer(b, bOpts) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go b.run() return b } @@ -164,14 +153,11 @@ type lbCfgUpdate struct { done chan struct{} } -<<<<<<< HEAD type scHealthUpdate struct { scw *subConnWrapper state balancer.SubConnState } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) type outlierDetectionBalancer struct { // These fields are safe to be accessed without holding any mutex because // they are synchronized in run(), which makes these field accesses happen @@ -190,14 +176,7 @@ type outlierDetectionBalancer struct { logger *grpclog.PrefixLogger channelzParent channelz.Identifier -<<<<<<< HEAD child synchronizingBalancerWrapper -======= - // childMu guards calls into child (to uphold the balancer.Balancer API - // guarantee of synchronous calls). - childMu sync.Mutex - child *gracefulswitch.Balancer ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // mu guards access to the following fields. It also helps to synchronize // behaviors of the following events: config updates, firing of the interval @@ -214,13 +193,8 @@ type outlierDetectionBalancer struct { // which uses addrs. This balancer waits for the interval timer algorithm to // finish before making the update to the addrs map. // -<<<<<<< HEAD // This mutex is never held when calling methods on the child policy // (within the context of a single goroutine). -======= - // This mutex is never held at the same time as childMu (within the context - // of a single goroutine). ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) mu sync.Mutex addrs map[string]*addressInfo cfg *LBConfig @@ -305,19 +279,9 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt // the balancer.Balancer API, so it is guaranteed to be called in a // synchronous manner, so it cannot race with this read. if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { -<<<<<<< HEAD if err := b.child.switchTo(bb); err != nil { return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) } -======= - b.childMu.Lock() - err := b.child.SwitchTo(bb) - if err != nil { - b.childMu.Unlock() - return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) - } - b.childMu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.mu.Lock() @@ -354,19 +318,10 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } b.mu.Unlock() -<<<<<<< HEAD err := b.child.updateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, BalancerConfig: b.cfg.ChildPolicy.Config, }) -======= - b.childMu.Lock() - err := b.child.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: s.ResolverState, - BalancerConfig: b.cfg.ChildPolicy.Config, - }) - b.childMu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) done := make(chan struct{}) b.pickerUpdateCh.Put(lbCfgUpdate{ @@ -379,13 +334,7 @@ func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnSt } func (b *outlierDetectionBalancer) ResolverError(err error) { -<<<<<<< HEAD b.child.resolverError(err) -======= - b.childMu.Lock() - defer b.childMu.Unlock() - b.child.ResolverError(err) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -401,10 +350,7 @@ func (b *outlierDetectionBalancer) updateSubConnState(sc balancer.SubConn, state if state.ConnectivityState == connectivity.Shutdown { delete(b.scWrappers, scw.SubConn) } -<<<<<<< HEAD scw.setLatestConnectivityState(state.ConnectivityState) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.scUpdateCh.Put(&scUpdate{ scw: scw, state: state, @@ -418,13 +364,7 @@ func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state func (b *outlierDetectionBalancer) Close() { b.closed.Fire() <-b.done.Done() -<<<<<<< HEAD b.child.closeLB() -======= - b.childMu.Lock() - b.child.Close() - b.childMu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.scUpdateCh.Close() b.pickerUpdateCh.Close() @@ -437,13 +377,7 @@ func (b *outlierDetectionBalancer) Close() { } func (b *outlierDetectionBalancer) ExitIdle() { -<<<<<<< HEAD b.child.exitIdle() -======= - b.childMu.Lock() - defer b.childMu.Unlock() - b.child.ExitIdle() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // wrappedPicker delegates to the child policy's picker, and when the request @@ -533,7 +467,6 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal return nil, err } scw := &subConnWrapper{ -<<<<<<< HEAD SubConn: sc, addresses: addrs, scUpdateCh: b.scUpdateCh, @@ -541,12 +474,6 @@ func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts bal latestRawConnectivityState: balancer.SubConnState{ConnectivityState: connectivity.Idle}, latestHealthState: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, healthListenerEnabled: len(addrs) == 1 && pickfirstleaf.IsManagedByPickfirst(addrs[0]), -======= - SubConn: sc, - addresses: addrs, - scUpdateCh: b.scUpdateCh, - listener: oldListener, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } b.mu.Lock() defer b.mu.Unlock() @@ -664,47 +591,18 @@ func (b *outlierDetectionBalancer) Target() string { // if the SubConn is not ejected. func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { scw := u.scw -<<<<<<< HEAD scw.clearHealthListener() b.child.updateSubConnState(scw, u.state) } func (b *outlierDetectionBalancer) handleSubConnHealthUpdate(u *scHealthUpdate) { b.child.updateSubConnHealthState(u.scw, u.state) -======= - scw.latestState = u.state - if !scw.ejected { - if scw.listener != nil { - b.childMu.Lock() - scw.listener(u.state) - b.childMu.Unlock() - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // handleEjectedUpdate handles any SubConns that get ejected/unejected, and // forwards the appropriate corresponding subConnState to the child policy. func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { -<<<<<<< HEAD b.child.handleEjectionUpdate(u) -======= - scw := u.scw - scw.ejected = u.isEjected - // If scw.latestState has never been written to will default to connectivity - // IDLE, which is fine. - stateToUpdate := scw.latestState - if u.isEjected { - stateToUpdate = balancer.SubConnState{ - ConnectivityState: connectivity.TransientFailure, - } - } - if scw.listener != nil { - b.childMu.Lock() - scw.listener(stateToUpdate) - b.childMu.Unlock() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // handleChildStateUpdate forwards the picker update wrapped in a wrapped picker @@ -777,11 +675,8 @@ func (b *outlierDetectionBalancer) run() { b.handleSubConnUpdate(u) case *ejectionUpdate: b.handleEjectedUpdate(u) -<<<<<<< HEAD case *scHealthUpdate: b.handleSubConnHealthUpdate(u) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } case update, ok := <-b.pickerUpdateCh.Get(): if !ok { @@ -910,11 +805,7 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) if successRate < requiredSuccessRate { channelz.Infof(logger, b.channelzParent, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) -<<<<<<< HEAD if uint32(rand.Int32N(100)) < ejectionCfg.EnforcementPercentage { -======= - if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.ejectAddress(addrInfo) } } @@ -941,11 +832,7 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { channelz.Infof(logger, b.channelzParent, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) -<<<<<<< HEAD if uint32(rand.Int32N(100)) < ejectionCfg.EnforcementPercentage { -======= - if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.ejectAddress(addrInfo) } } @@ -974,7 +861,6 @@ func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { } } -<<<<<<< HEAD // synchronizingBalancerWrapper serializes calls into balancer (to uphold the // balancer.Balancer API guarantee of synchronous calls). It also ensures a // consistent order of locking mutexes when using SubConn listeners to avoid @@ -1038,8 +924,6 @@ func (sbw *synchronizingBalancerWrapper) handleEjectionUpdate(u *ejectionUpdate) } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // addressInfo contains the runtime information about an address that pertains // to Outlier Detection. This struct and all of its fields is protected by // outlierDetectionBalancer.mu in the case where it is accessed through the diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go index 3ba2583790..7d710fde1b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -19,17 +19,11 @@ package outlierdetection import ( "fmt" -<<<<<<< HEAD "sync" "unsafe" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" -======= - "unsafe" - - "google.golang.org/grpc/balancer" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/resolver" ) @@ -39,7 +33,6 @@ import ( // whether or not this SubConn is ejected. type subConnWrapper struct { balancer.SubConn -<<<<<<< HEAD // addressInfo is a pointer to the subConnWrapper's corresponding address // map entry, if the map entry exists. It is accessed atomically. addressInfo unsafe.Pointer // *addressInfo @@ -67,26 +60,10 @@ type subConnWrapper struct { // run(), and child will always have the correctly updated SubConnState. ejected bool -======= - listener func(balancer.SubConnState) - - // addressInfo is a pointer to the subConnWrapper's corresponding address - // map entry, if the map entry exists. - addressInfo unsafe.Pointer // *addressInfo - // These two pieces of state will reach eventual consistency due to sync in - // run(), and child will always have the correctly updated SubConnState. - // latestState is the latest state update from the underlying SubConn. This - // is used whenever a SubConn gets unejected. - latestState balancer.SubConnState - ejected bool - - scUpdateCh *buffer.Unbounded ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // addresses is the list of address(es) this SubConn was created with to // help support any change in address(es) addresses []resolver.Address -<<<<<<< HEAD // latestHealthState is tracked to update the child policy during // unejection. latestHealthState balancer.SubConnState @@ -104,8 +81,6 @@ type subConnWrapper struct { // used to ensure a health listener is registered with the SubConn only when // the SubConn is READY. latestReceivedConnectivityState connectivity.State -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // eject causes the wrapper to report a state update with the TRANSIENT_FAILURE @@ -130,7 +105,6 @@ func (scw *subConnWrapper) uneject() { func (scw *subConnWrapper) String() string { return fmt.Sprintf("%+v", scw.addresses) } -<<<<<<< HEAD func (scw *subConnWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { // gRPC currently supports two mechanisms that provide a health signal for @@ -236,5 +210,3 @@ func (scw *subConnWrapper) setLatestConnectivityState(state connectivity.State) defer scw.mu.Unlock() scw.latestReceivedConnectivityState = state } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 5cfd764e53..ba3fe52e5c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -123,10 +123,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) -<<<<<<< HEAD endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) b.mu.Lock() // Create and remove children, since we know all children from the config @@ -146,10 +143,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err cb := newChildBalancer(name, b, bb.Name(), b.cc) cb.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], -<<<<<<< HEAD Endpoints: endpointsSplit[name], -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) @@ -171,10 +165,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // be built, if it's a low priority). currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], -<<<<<<< HEAD Endpoints: endpointsSplit[name], -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index 923e3f8c0a..8c44f19c3b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -285,14 +285,10 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { } func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { -<<<<<<< HEAD if b.logger.V(2) { b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) } -======= - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go index c016ca77ba..2b289a8114 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/wrrlocality/balancer.go @@ -120,7 +120,6 @@ func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { return addr } -<<<<<<< HEAD // SetAddrInfoInEndpoint returns a copy of endpoint in which the Attributes // field is updated with AddrInfo. func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolver.Endpoint { @@ -128,8 +127,6 @@ func SetAddrInfoInEndpoint(endpoint resolver.Endpoint, addrInfo AddrInfo) resolv return endpoint } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (a AddrInfo) String() string { return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go index 986d1e96fb..0ffa9c8272 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -24,11 +24,7 @@ import ( "errors" "fmt" "io" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strconv" "sync/atomic" "time" @@ -166,11 +162,7 @@ func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done f } // For overriding in tests -<<<<<<< HEAD var randIntn = rand.IntN -======= -var randIntn = rand.Intn ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var newTimer = time.NewTimer func injectDelay(ctx context.Context, delayCfg *cpb.FaultDelay) error { diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index 6bffe2eecd..74c9195215 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -86,15 +86,12 @@ func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { return addr } -<<<<<<< HEAD // SetLocalityIDInEndpoint sets locality ID in endpoint to l. func SetLocalityIDInEndpoint(endpoint resolver.Endpoint, l LocalityID) resolver.Endpoint { endpoint.Attributes = endpoint.Attributes.WithValue(localityKey, l) return endpoint } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. var ResourceTypeMapForTesting map[string]any diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index 19def17a7c..7df75465ac 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -23,11 +23,7 @@ import ( "encoding/json" "fmt" "math/bits" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "sync/atomic" "time" diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index 087184622e..1ba6c001d9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -22,11 +22,7 @@ package resolver import ( "context" "fmt" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "google.golang.org/grpc/internal" @@ -48,17 +44,10 @@ import ( // xdsresolver.Scheme const Scheme = "xds" -<<<<<<< HEAD // newBuilderWithConfigForTesting creates a new xds resolver builder using a // specific xds bootstrap config, so tests can use multiple xds clients in // different ClientConns at the same time. func newBuilderWithConfigForTesting(config []byte) (resolver.Builder, error) { -======= -// newBuilderForTesting creates a new xds resolver builder using a specific xds -// bootstrap config, so tests can use multiple xds clients in different -// ClientConns at the same time. -func newBuilderForTesting(config []byte) (resolver.Builder, error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return &xdsResolverBuilder{ newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) @@ -66,7 +55,6 @@ func newBuilderForTesting(config []byte) (resolver.Builder, error) { }, nil } -<<<<<<< HEAD // newBuilderWithClientForTesting creates a new xds resolver builder using the // specific xDS client, so that tests have complete control over the exact // specific xDS client being used. @@ -84,11 +72,6 @@ func init() { resolver.Register(&xdsResolverBuilder{}) internal.NewXDSResolverWithConfigForTesting = newBuilderWithConfigForTesting internal.NewXDSResolverWithClientForTesting = newBuilderWithClientForTesting -======= -func init() { - resolver.Register(&xdsResolverBuilder{}) - internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) rinternal.NewWRR = wrr.NewRandom rinternal.NewXDSClient = xdsclient.New diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index 0f40119160..f81685a45e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -19,7 +19,6 @@ package xdsclient import ( "context" -<<<<<<< HEAD "fmt" "sync/atomic" @@ -28,20 +27,6 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" -======= - "errors" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/transport" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -50,7 +35,6 @@ import ( v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -<<<<<<< HEAD type resourceState struct { watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource. cache xdsresource.ResourceData // Most recent ACKed update for this resource. @@ -105,46 +89,6 @@ type authority struct { // The below defined fields must only be accessed in the context of the // serializer callback, owned by this authority. -======= -type watchState int - -const ( - watchStateStarted watchState = iota // Watch started, request not yet set. - watchStateRequested // Request sent for resource being watched. - watchStateReceived // Response received for resource being watched. - watchStateTimeout // Watch timer expired, no response. - watchStateCanceled // Watch cancelled. -) - -type resourceState struct { - watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource - cache xdsresource.ResourceData // Most recent ACKed update for this resource - md xdsresource.UpdateMetadata // Metadata for the most recent update - deletionIgnored bool // True if resource deletion was ignored for a prior update - - // Common watch state for all watchers of this resource. - wTimer *time.Timer // Expiry timer - wState watchState // State of the watch -} - -// authority wraps all state associated with a single management server. It -// contains the transport used to communicate with the management server and a -// cache of resource state for resources requested from the management server. -// -// Bootstrap configuration could contain multiple entries in the authorities map -// that share the same server config (server address and credentials to use). We -// share the same authority instance amongst these entries, and the reference -// counting is taken care of by the `clientImpl` type. -type authority struct { - serverCfg *bootstrap.ServerConfig // Server config for this authority - bootstrapCfg *bootstrap.Config // Full bootstrap configuration - refCount int // Reference count of watches referring to this authority - serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks - resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup - transport *transport.Transport // Underlying xDS transport to the management server - watchExpiryTimeout time.Duration // Resource watch expiry timeout - logger *grpclog.PrefixLogger ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // A two level map containing the state of all the resources being watched. // @@ -154,7 +98,6 @@ type authority struct { // // The second level map key is the resource name, with the value being the // actual state of the resource. -<<<<<<< HEAD resources map[xdsresource.Type]map[string]*resourceState // An ordered list of xdsChannels corresponding to the list of server @@ -377,91 +320,6 @@ func (a *authority) adsResourceUpdate(serverConfig *bootstrap.ServerConfig, rTyp // Only executed in the context of a serializer callback. func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig, rType xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { a.handleRevertingToPrimaryOnUpdate(serverConfig) -======= - resourcesMu sync.Mutex - resources map[xdsresource.Type]map[string]*resourceState - closed bool -} - -// authorityArgs is a convenience struct to wrap arguments required to create a -// new authority. All fields here correspond directly to appropriate fields -// stored in the authority struct. -type authorityArgs struct { - // The reason for passing server config and bootstrap config separately - // (although the former is part of the latter) is because authorities in the - // bootstrap config might contain an empty server config, and in this case, - // the top-level server config is to be used. - serverCfg *bootstrap.ServerConfig - bootstrapCfg *bootstrap.Config - serializer *grpcsync.CallbackSerializer - resourceTypeGetter func(string) xdsresource.Type - watchExpiryTimeout time.Duration - backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. - logger *grpclog.PrefixLogger -} - -func newAuthority(args authorityArgs) (*authority, error) { - ret := &authority{ - serverCfg: args.serverCfg, - bootstrapCfg: args.bootstrapCfg, - serializer: args.serializer, - resourceTypeGetter: args.resourceTypeGetter, - watchExpiryTimeout: args.watchExpiryTimeout, - logger: args.logger, - resources: make(map[xdsresource.Type]map[string]*resourceState), - } - - tr, err := transport.New(transport.Options{ - ServerCfg: args.serverCfg, - OnRecvHandler: ret.handleResourceUpdate, - OnErrorHandler: ret.newConnectionError, - OnSendHandler: ret.transportOnSendHandler, - Backoff: args.backoff, - Logger: args.logger, - NodeProto: args.bootstrapCfg.Node(), - }) - if err != nil { - return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) - } - ret.transport = tr - return ret, nil -} - -// transportOnSendHandler is called by the underlying transport when it sends a -// resource request successfully. Timers are activated for resources waiting for -// a response. -func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { - rType := a.resourceTypeGetter(u.URL) - // Resource type not found is not expected under normal circumstances, since - // the resource type url passed to the transport is determined by the authority. - if rType == nil { - a.logger.Warningf("Unknown resource type url: %s.", u.URL) - return - } - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - a.startWatchTimersLocked(rType, u.ResourceNames) -} - -func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { - rType := a.resourceTypeGetter(resourceUpdate.URL) - if rType == nil { - return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) - } - - opts := &xdsresource.DecodeOptions{ - BootstrapConfig: a.bootstrapCfg, - ServerConfig: a.serverCfg, - } - updates, md, err := decodeAllResources(opts, rType, resourceUpdate) - a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) - return err -} - -func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // We build a list of callback funcs to invoke, and invoke them at the end // of this method instead of inline (when handling the update for a @@ -471,12 +329,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // callbacks are invoked, and the watchers have processed the update. watcherCnt := new(atomic.Int64) done := func() { -<<<<<<< HEAD if watcherCnt.Add(-1) == 0 { -======= - watcherCnt.Add(-1) - if watcherCnt.Load() == 0 { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) onDone() } } @@ -487,22 +340,15 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // this update, invoke onDone explicitly to unblock the next read on // the ADS stream. onDone() -<<<<<<< HEAD return } for _, f := range funcsToSchedule { a.watcherCallbackSerializer.ScheduleOr(f, onDone) -======= - } - for _, f := range funcsToSchedule { - a.serializer.ScheduleOr(f, onDone) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } }() resourceStates := a.resources[rType] for name, uErr := range updates { -<<<<<<< HEAD state, ok := resourceStates[name] if !ok { continue @@ -555,87 +401,6 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty if md.ErrState != nil { state.md.Version = md.ErrState.Version } -======= - if state, ok := resourceStates[name]; ok { - // Cancel the expiry timer associated with the resource once a - // response is received, irrespective of whether the update is a - // good one or not. - // - // We check for watch states `started` and `requested` here to - // accommodate for a race which can happen in the following - // scenario: - // - When a watch is registered, it is possible that the ADS stream - // is not yet created. In this case, the request for the resource - // is not sent out immediately. An entry in the `resourceStates` - // map is created with a watch state of `started`. - // - Once the stream is created, it is possible that the management - // server might respond with the requested resource before we send - // out request for the same. If we don't check for `started` here, - // and move the state to `received`, we will end up starting the - // timer when the request gets sent out. And since the management - // server already sent us the resource, there is a good chance - // that it will not send it again. This would eventually lead to - // the timer firing, even though we have the resource in the - // cache. - if state.wState == watchStateStarted || state.wState == watchStateRequested { - // It is OK to ignore the return value from Stop() here because - // if the timer has already fired, it means that the timer watch - // expiry callback is blocked on the same lock that we currently - // hold. Since we move the state to `received` here, the timer - // callback will be a no-op. - if state.wTimer != nil { - state.wTimer.Stop() - } - state.wState = watchStateReceived - } - - if uErr.err != nil { - // On error, keep previous version of the resource. But update - // status and error. - state.md.ErrState = md.ErrState - state.md.Status = md.Status - for watcher := range state.watchers { - watcher := watcher - err := uErr.err - watcherCnt.Add(1) - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) - } - continue - } - - if state.deletionIgnored { - state.deletionIgnored = false - a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) - } - // Notify watchers if any of these conditions are met: - // - this is the first update for this resource - // - this update is different from the one currently cached - // - the previous update for this resource was NACKed, but the update - // before that was the same as this update. - if state.cache == nil || !state.cache.Equal(uErr.resource) || state.md.ErrState != nil { - for watcher := range state.watchers { - watcher := watcher - resource := uErr.resource - watcherCnt.Add(1) - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) - } - } - // Sync cache. - if a.logger.V(2) { - a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) - } - state.cache = uErr.resource - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - state.md = md - state.md.ErrState = nil - state.md.Status = xdsresource.ServiceStatusACKed - if md.ErrState != nil { - state.md.Version = md.ErrState.Version - } - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // If this resource type requires that all resources be present in every @@ -665,20 +430,15 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // resource. So, there is no need to generate another one. continue } -<<<<<<< HEAD if _, ok := updates[name]; ok { // If the resource was present in the response, move on. continue } if state.md.Status == xdsresource.ServiceStatusNotExist { -======= - if _, ok := updates[name]; !ok { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // The metadata status is set to "ServiceStatusNotExist" if a // previous update deleted this resource, in which case we do not // want to repeatedly call the watch callbacks with a // "resource-not-found" error. -<<<<<<< HEAD continue } if serverConfig.ServerFeaturesIgnoreResourceDeletion() { @@ -743,281 +503,10 @@ func (a *authority) handleADSResourceDoesNotExist(rType xdsresource.Type, resour return } -======= - if state.md.Status == xdsresource.ServiceStatusNotExist { - continue - } - // Per A53, resource deletions are ignored if the `ignore_resource_deletion` - // server feature is enabled through the bootstrap configuration. If the - // resource deletion is to be ignored, the resource is not removed from - // the cache and the corresponding OnResourceDoesNotExist() callback is - // not invoked on the watchers. - if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { - if !state.deletionIgnored { - state.deletionIgnored = true - a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) - } - continue - } - // If resource exists in cache, but not in the new update, delete - // the resource from cache, and also send a resource not found error - // to indicate resource removed. Metadata for the resource is still - // maintained, as this is required by CSDS. - state.cache = nil - state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for watcher := range state.watchers { - watcher := watcher - watcherCnt.Add(1) - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) - } - } - } -} - -type resourceDataErrTuple struct { - resource xdsresource.ResourceData - err error -} - -func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { - timestamp := time.Now() - md := xdsresource.UpdateMetadata{ - Version: update.Version, - Timestamp: timestamp, - } - - topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. - perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. - ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. - for _, r := range update.Resources { - result, err := rType.Decode(opts, r) - - // Name field of the result is left unpopulated only when resource - // deserialization fails. - name := "" - if result != nil { - name = xdsresource.ParseName(result.Name).String() - } - if err == nil { - ret[name] = resourceDataErrTuple{resource: result.Resource} - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret[name] = resourceDataErrTuple{err: err} - } - - if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { - md.Status = xdsresource.ServiceStatusACKed - return ret, md, nil - } - - md.Status = xdsresource.ServiceStatusNACKed - errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) - md.ErrState = &xdsresource.UpdateErrorMetadata{ - Version: update.Version, - Err: errRet, - Timestamp: timestamp, - } - return ret, md, errRet -} - -// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources -// requested on the underlying ADS stream. This satisfies the conditions to start -// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] -// -// Caller must hold a.resourcesMu. -func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { - resourceStates := a.resources[rType] - for _, resourceName := range resourceNames { - if state, ok := resourceStates[resourceName]; ok { - if state.wState != watchStateStarted { - continue - } - state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { - a.resourcesMu.Lock() - a.handleWatchTimerExpiryLocked(rType, resourceName, state) - a.resourcesMu.Unlock() - }) - state.wState = watchStateRequested - } - } -} - -// stopWatchTimersLocked is invoked upon connection errors to stops watch timers -// for resources that have been requested, but not yet responded to by the management -// server. -// -// Caller must hold a.resourcesMu. -func (a *authority) stopWatchTimersLocked() { - for _, rType := range a.resources { - for resourceName, state := range rType { - if state.wState != watchStateRequested { - continue - } - if !state.wTimer.Stop() { - // If the timer has already fired, it means that the timer watch expiry - // callback is blocked on the same lock that we currently hold. Don't change - // the watch state and instead let the watch expiry callback handle it. - a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) - continue - } - state.wTimer = nil - state.wState = watchStateStarted - } - } -} - -// newConnectionError is called by the underlying transport when it receives a -// connection error. The error will be forwarded to all the resource watchers. -func (a *authority) newConnectionError(err error) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - - a.stopWatchTimersLocked() - - // We do not consider it an error if the ADS stream was closed after having received - // a response on the stream. This is because there are legitimate reasons why the server - // may need to close the stream during normal operations, such as needing to rebalance - // load or the underlying connection hitting its max connection age limit. - // See gRFC A57 for more details. - if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { - a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) - return - } - - for _, rType := range a.resources { - for _, state := range rType { - // Propagate the connection error from the transport layer to all watchers. - for watcher := range state.watchers { - watcher := watcher - a.serializer.TrySchedule(func(context.Context) { - watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) - }) - } - } - } -} - -// Increments the reference count. Caller must hold parent's authorityMu. -func (a *authority) refLocked() { - a.refCount++ -} - -// Decrements the reference count. Caller must hold parent's authorityMu. -func (a *authority) unrefLocked() int { - a.refCount-- - return a.refCount -} - -func (a *authority) close() { - a.transport.Close() - - a.resourcesMu.Lock() - a.closed = true - a.resourcesMu.Unlock() -} - -func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - if a.logger.V(2) { - a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) - } - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - - // Lookup the ResourceType specific resources from the top-level map. If - // there is no entry for this ResourceType, create one. - resources := a.resources[rType] - if resources == nil { - resources = make(map[string]*resourceState) - a.resources[rType] = resources - } - - // Lookup the resourceState for the particular resource that the watch is - // being registered for. If this is the first watch for this resource, - // instruct the transport layer to send a DiscoveryRequest for the same. - state := resources[resourceName] - if state == nil { - if a.logger.V(2) { - a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) - } - state = &resourceState{ - watchers: make(map[xdsresource.ResourceWatcher]bool), - md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, - wState: watchStateStarted, - } - resources[resourceName] = state - a.sendDiscoveryRequestLocked(rType, resources) - } - // Always add the new watcher to the set of watchers. - state.watchers[watcher] = true - - // If we have a cached copy of the resource, notify the new watcher. - if state.cache != nil { - if a.logger.V(2) { - a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) - } - resource := state.cache - a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) - } - - return func() { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - - // We already have a reference to the resourceState for this particular - // resource. Avoid indexing into the two-level map to figure this out. - - // Delete this particular watcher from the list of watchers, so that its - // callback will not be invoked in the future. - state.wState = watchStateCanceled - delete(state.watchers, watcher) - if len(state.watchers) > 0 { - return - } - - // There are no more watchers for this resource, delete the state - // associated with it, and instruct the transport to send a request - // which does not include this resource name. - if a.logger.V(2) { - a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) - } - delete(resources, resourceName) - a.sendDiscoveryRequestLocked(rType, resources) - } -} - -func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourceName string, state *resourceState) { - if a.closed { - return - } - a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) - - switch state.wState { - case watchStateRequested: - // This is the only state where we need to handle the timer expiry by - // invoking appropriate watch callbacks. This is handled outside the switch. - case watchStateCanceled: - return - default: - a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) - return - } - - state.wState = watchStateTimeout - // With the watch timer firing, it is safe to assume that the resource does - // not exist on the management server. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) state.cache = nil state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher -<<<<<<< HEAD a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) } } @@ -1315,61 +804,6 @@ func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig // // Only executed in the context of a serializer callback. func (a *authority) resourceConfig() []*v3statuspb.ClientConfig_GenericXdsConfig { -======= - a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) - } -} - -func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - - if a.closed { - return - } - resourceStates := a.resources[rType] - state, ok := resourceStates[resourceName] - if !ok { - return - } - // if watchStateTimeout already triggered resource not found above from - // normal watch expiry. - if state.wState == watchStateCanceled || state.wState == watchStateTimeout { - return - } - state.wState = watchStateTimeout - state.cache = nil - state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for watcher := range state.watchers { - watcher := watcher - a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) - } -} - -// sendDiscoveryRequestLocked sends a discovery request for the specified -// resource type and resource names. Even though this method does not directly -// access the resource cache, it is important that `resourcesMu` be held when -// calling this method to ensure that a consistent snapshot of resource names is -// being requested. -func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { - resourcesToRequest := make([]string, len(resources)) - i := 0 - for name := range resources { - resourcesToRequest[i] = name - i++ - } - a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) -} - -func (a *authority) reportLoad() (*load.Store, func()) { - return a.transport.ReportLoad() -} - -func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ret []*v3statuspb.ClientConfig_GenericXdsConfig for rType, resourceStates := range a.resources { typeURL := rType.TypeURL() @@ -1399,7 +833,6 @@ func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig return ret } -<<<<<<< HEAD func (a *authority) close() { a.xdsClientSerializerClose() <-a.xdsClientSerializer.Done() @@ -1408,8 +841,6 @@ func (a *authority) close() { } } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { case xdsresource.ServiceStatusUnknown: @@ -1426,31 +857,3 @@ func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.Cli return v3adminpb.ClientResourceStatus_UNKNOWN } } -<<<<<<< HEAD -======= - -func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index b95dc74f16..eba00907dc 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -32,15 +32,9 @@ import ( type XDSClient interface { // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how -<<<<<<< HEAD // xDS responses are are deserialized and validated, as received from the // xDS management server. Upon receipt of a response from the management // server, an appropriate callback on the watcher is invoked. -======= - // xDS requests are sent out and how responses are deserialized and - // validated. Upon receipt of a response from the management server, an - // appropriate callback on the watcher is invoked. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // // Most callers will not have a need to use this API directly. They will // instead use a resource-type-specific wrapper API provided by the relevant diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index 526e2070e4..55299c457b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -26,17 +26,11 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" -<<<<<<< HEAD "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" "google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport" -======= - "google.golang.org/grpc/internal/cache" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/bootstrap" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -62,7 +56,6 @@ const NameForServer = "#server" // only when all references are released, and it is safe for the caller to // invoke this close function multiple times. func New(name string) (XDSClient, func(), error) { -<<<<<<< HEAD config, err := bootstrap.GetConfiguration() if err != nil { return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) @@ -76,22 +69,11 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, s c := &clientImpl{ done: grpcsync.NewEvent(), authorities: make(map[string]*authority), -======= - return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout, backoff.DefaultExponential.Backoff) -} - -// newClientImpl returns a new xdsClient with the given config. -func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration, streamBackoff func(int) time.Duration) (*clientImpl, error) { - ctx, cancel := context.WithCancel(context.Background()) - c := &clientImpl{ - done: grpcsync.NewEvent(), ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) config: config, watchExpiryTimeout: watchExpiryTimeout, backoff: streamBackoff, serializer: grpcsync.NewCallbackSerializer(ctx), serializerClose: cancel, -<<<<<<< HEAD transportBuilder: &grpctransport.Builder{}, resourceTypes: newResourceTypeRegistry(), xdsActiveChannels: make(map[string]*channelState), @@ -119,13 +101,6 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, i getChannelForADS: c.getChannelForADS, logPrefix: clientPrefix(c), }) -======= - resourceTypes: newResourceTypeRegistry(), - authorities: make(map[string]*authority), - idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), - } - ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) c.logger = prefixLogger(c) return c, nil } @@ -135,10 +110,7 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, i type OptionsForTesting struct { // Name is a unique name for this xDS client. Name string -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // Contents contain a JSON representation of the bootstrap configuration to // be used when creating the xDS client. Contents []byte @@ -147,19 +119,9 @@ type OptionsForTesting struct { // unspecified, uses the default value used in non-test code. WatchExpiryTimeout time.Duration -<<<<<<< HEAD // StreamBackoffAfterFailure is the backoff function used to determine the // backoff duration after stream failures. // If unspecified, uses the default value used in non-test code. -======= - // AuthorityIdleTimeout is the timeout before idle authorities are deleted. - // If unspecified, uses the default value used in non-test code. - AuthorityIdleTimeout time.Duration - - // StreamBackoffAfterFailure is the backoff function used to determine the - // backoff duration after stream failures. If unspecified, uses the default - // value used in non-test code. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) StreamBackoffAfterFailure func(int) time.Duration } @@ -179,29 +141,15 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if opts.WatchExpiryTimeout == 0 { opts.WatchExpiryTimeout = defaultWatchExpiryTimeout } -<<<<<<< HEAD -======= - if opts.AuthorityIdleTimeout == 0 { - opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if opts.StreamBackoffAfterFailure == nil { opts.StreamBackoffAfterFailure = defaultStreamBackoffFunc } -<<<<<<< HEAD config, err := bootstrap.NewConfigForTesting(opts.Contents) if err != nil { return nil, nil, err } return newRefCounted(opts.Name, config, opts.WatchExpiryTimeout, opts.StreamBackoffAfterFailure) -======= - if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { - return nil, nil, err - } - client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout, opts.StreamBackoffAfterFailure) - return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // GetForTesting returns an xDS client created earlier using the given name. @@ -227,10 +175,7 @@ func GetForTesting(name string) (XDSClient, func(), error) { func init() { internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting -<<<<<<< HEAD xdsclientinternal.ResourceWatchStateForTesting = resourceWatchStateForTesting -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { @@ -241,7 +186,6 @@ func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type return crc.clientImpl.triggerResourceNotFoundForTesting(typ, name) } -<<<<<<< HEAD func resourceWatchStateForTesting(client XDSClient, typ xdsresource.Type, name string) (ads.ResourceWatchState, error) { crc, ok := client.(*clientRefCounted) if !ok { @@ -250,8 +194,6 @@ func resourceWatchStateForTesting(client XDSClient, typ xdsresource.Type, name s return crc.clientImpl.resourceWatchStateForTesting(typ, name) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( clients = map[string]*clientRefCounted{} clientsMu sync.Mutex diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go index fdcef4a1c1..f5fc76d8a7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -19,10 +19,6 @@ package xdsclient import ( -<<<<<<< HEAD -======= - "fmt" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "sync/atomic" "time" @@ -31,14 +27,7 @@ import ( "google.golang.org/grpc/internal/xds/bootstrap" ) -<<<<<<< HEAD const defaultWatchExpiryTimeout = 15 * time.Second -======= -const ( - defaultWatchExpiryTimeout = 15 * time.Second - defaultIdleAuthorityDeleteTimeout = 5 * time.Minute -) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) var ( // The following functions are no-ops in the actual code, but can be @@ -51,7 +40,6 @@ var ( func clientRefCountedClose(name string) { clientsMu.Lock() -<<<<<<< HEAD client, ok := clients[name] if !ok { logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) @@ -70,32 +58,13 @@ func clientRefCountedClose(name string) { // Hence, this needs to be called without holding the lock. client.clientImpl.close() xdsClientImplCloseHook(name) -======= - defer clientsMu.Unlock() - - client, ok := clients[name] - if !ok { - logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) - return - } - if client.decrRef() != 0 { - return - } - client.clientImpl.close() - xdsClientImplCloseHook(name) - delete(clients, name) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // newRefCounted creates a new reference counted xDS client implementation for // name, if one does not exist already. If an xDS client for the given name // exists, it gets a reference to it and returns it. -<<<<<<< HEAD func newRefCounted(name string, config *bootstrap.Config, watchExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { -======= -func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) clientsMu.Lock() defer clientsMu.Unlock() @@ -105,15 +74,7 @@ func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Du } // Create the new client implementation. -<<<<<<< HEAD c, err := newClientImpl(config, watchExpiryTimeout, streamBackoff) -======= - config, err := bootstrap.GetConfiguration() - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) - } - c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout, streamBackoff) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if err != nil { return nil, nil, err } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index f0509d210e..bb8d904002 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -19,7 +19,6 @@ package xdsclient import ( -<<<<<<< HEAD "errors" "fmt" "sync" @@ -32,20 +31,10 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -======= - "sync" - "time" - - "google.golang.org/grpc/internal/cache" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/bootstrap" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) var _ XDSClient = &clientImpl{} -<<<<<<< HEAD // ErrClientClosed is returned when the xDS client is closed. var ErrClientClosed = errors.New("xds: the xDS client is closed") @@ -142,39 +131,6 @@ func (cs *channelState) adsResourceDoesNotExist(typ xdsresource.Type, resourceNa for authority := range cs.interestedAuthorities { authority.adsResourceDoesNotExist(typ, resourceName) } -======= -// clientImpl is the real implementation of the xds client. The exported Client -// is a wrapper of this struct with a ref count. -type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - logger *grpclog.PrefixLogger - watchExpiryTimeout time.Duration - backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. - serializer *grpcsync.CallbackSerializer - serializerClose func() - resourceTypes *resourceTypeRegistry - - // authorityMu protects the authority fields. It's necessary because an - // authority is created when it's used. - authorityMu sync.Mutex - // authorities is a map from ServerConfig to authority. So that - // different authorities sharing the same ServerConfig can share the - // authority. - // - // The key is **ServerConfig.String()**, not the authority name. - // - // An authority is either in authorities, or idleAuthorities, - // never both. - authorities map[string]*authority - // idleAuthorities keeps the authorities that are not used (the last - // watch on it was canceled). They are kept in the cache and will be deleted - // after a timeout. The key is ServerConfig.String(). - // - // An authority is either in authorities, or idleAuthorities, - // never both. - idleAuthorities *cache.TimeoutCache ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // BootstrapConfig returns the configuration read from the bootstrap file. @@ -183,17 +139,12 @@ func (c *clientImpl) BootstrapConfig() *bootstrap.Config { return c.config } -<<<<<<< HEAD // close closes the xDS client and releases all resources. -======= -// close closes the gRPC connection to the management server. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *clientImpl) close() { if c.done.HasFired() { return } c.done.Fire() -<<<<<<< HEAD c.topLevelAuthority.close() for _, a := range c.authorities { @@ -218,18 +169,6 @@ func (c *clientImpl) close() { c.serializerClose() <-c.serializer.Done() -======= - // TODO: Should we invoke the registered callbacks here with an error that - // the client is closed? - - c.authorityMu.Lock() - for _, a := range c.authorities { - a.close() - } - c.idleAuthorities.Clear(true) - c.authorityMu.Unlock() - c.serializerClose() ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, s := range c.config.XDSServers() { for _, f := range s.Cleanups() { @@ -245,7 +184,6 @@ func (c *clientImpl) close() { } c.logger.Infof("Shutdown") } -<<<<<<< HEAD // getChannelForADS returns an xdsChannel for the given server configuration. // @@ -412,5 +350,3 @@ func (c *clientImpl) releaseChannel(serverConfig *bootstrap.ServerConfig, state channelToClose.close() }) } -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go deleted file mode 100644 index 56c26b8175..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go +++ /dev/null @@ -1,146 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "errors" - "fmt" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// findAuthority returns the authority for this name. If it doesn't already -// exist, one will be created. -// -// Note that this doesn't always create new authority. authorities with the same -// config but different names are shared. -// -// The returned unref function must be called when the caller is done using this -// authority, without holding c.authorityMu. -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), error) { - scheme, authority := n.Scheme, n.Authority - - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if c.done.HasFired() { - return nil, nil, errors.New("the xds-client is closed") - } - - config := c.config.XDSServers()[0] - if scheme == xdsresource.FederationScheme { - authorities := c.config.Authorities() - if authorities == nil { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - cfg, ok := authorities[authority] - if !ok { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - if len(cfg.XDSServers) >= 1 { - config = cfg.XDSServers[0] - } - } - - a, err := c.newAuthorityLocked(config) - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) - } - // All returned authority from this function will be used by a watch, - // holding the ref here. - // - // Note that this must be done while c.authorityMu is held, to avoid the - // race that an authority is returned, but before the watch starts, the - // old last watch is canceled (in another goroutine), causing this - // authority to be removed, and then a watch will start on a removed - // authority. - // - // unref() will be done when the watch is canceled. - a.refLocked() - return a, func() { c.unrefAuthority(a) }, nil -} - -// newAuthorityLocked creates a new authority for the given config. If an -// authority for the given config exists in the cache, it is returned instead of -// creating a new one. -// -// The caller must take a reference of the returned authority before using, and -// unref afterwards. -// -// caller must hold c.authorityMu -func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { - // First check if there's already an authority for this config. If found, it - // means this authority is used by other watches (could be the same - // authority name, or a different authority name but the same server - // config). Return it. - configStr := config.String() - if a, ok := c.authorities[configStr]; ok { - return a, nil - } - // Second check if there's an authority in the idle cache. If found, it - // means this authority was created, but moved to the idle cache because the - // watch was canceled. Move it from idle cache to the authority cache, and - // return. - if old, ok := c.idleAuthorities.Remove(configStr); ok { - oldA, _ := old.(*authority) - if oldA != nil { - c.authorities[configStr] = oldA - return oldA, nil - } - } - - // Make a new authority since there's no existing authority for this config. - ret, err := newAuthority(authorityArgs{ - serverCfg: config, - bootstrapCfg: c.config, - serializer: c.serializer, - resourceTypeGetter: c.resourceTypes.get, - watchExpiryTimeout: c.watchExpiryTimeout, - backoff: c.backoff, - logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), - }) - if err != nil { - return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) - } - // Add it to the cache, so it will be reused. - c.authorities[configStr] = ret - return ret, nil -} - -// unrefAuthority unrefs the authority. It also moves the authority to idle -// cache if it's ref count is 0. -// -// This function doesn't need to called explicitly. It's called by the returned -// unref from findAuthority(). -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) unrefAuthority(a *authority) { - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if a.unrefLocked() > 0 { - return - } - configStr := a.serverCfg.String() - delete(c.authorities, configStr) - c.idleAuthorities.Add(configStr, a, func() { - a.close() - }) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go index abf0c1a14f..9d75867730 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -24,14 +24,7 @@ import ( // dumpResources returns the status and contents of all xDS resources. func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { -<<<<<<< HEAD retCfg := c.topLevelAuthority.dumpResources() -======= - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - - var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, a := range c.authorities { retCfg = append(retCfg, a.dumpResources()...) } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go index b7f836b8c9..efb41b87db 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -28,7 +28,6 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { -<<<<<<< HEAD xc, releaseChannelRef, err := c.getChannelForLRS(server) if err != nil { c.logger.Warningf("Failed to create a channel to the management server to report load: %v", server, err) @@ -38,22 +37,5 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu return load, func() { stopLoadReporting() releaseChannelRef() -======= - c.authorityMu.Lock() - a, err := c.newAuthorityLocked(server) - if err != nil { - c.authorityMu.Unlock() - c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) - return nil, func() {} - } - // Hold the ref before starting load reporting. - a.refLocked() - c.authorityMu.Unlock() - - store, cancelF := a.reportLoad() - return store, func() { - cancelF() - c.unrefAuthority(a) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index f8a2b5ee34..ed4ee360fb 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -22,25 +22,15 @@ import ( "fmt" "sync" -<<<<<<< HEAD "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // WatchResource uses xDS to discover the resource associated with the provided -<<<<<<< HEAD // resource name. The resource type implementation determines how xDS responses // are are deserialized and validated, as received from the xDS management // server. Upon receipt of a response from the management server, an // appropriate callback on the watcher is invoked. -======= -// resource name. The resource type implementation determines how xDS requests -// are sent out and how responses are deserialized and validated. Upon receipt -// of a response from the management server, an appropriate callback on the -// watcher is invoked. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) { // Return early if the client is already closed. // @@ -59,7 +49,6 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, return func() {} } -<<<<<<< HEAD n := xdsresource.ParseName(resourceName) a := c.getAuthorityForResource(n) if a == nil { @@ -95,22 +84,6 @@ func (c *clientImpl) getAuthorityForResource(name *xdsresource.Name) *authority return c.topLevelAuthority } return c.authorities[name.Authority] -======= - // TODO: Make ParseName return an error if parsing fails, and - // schedule the OnError callback in that case. - n := xdsresource.ParseName(resourceName) - a, unref, err := c.findAuthority(n) - if err != nil { - logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) - c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) - return func() {} - } - cancelF := a.watchResource(rType, n.String(), watcher) - return func() { - cancelF() - unref() - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) } // A registry of xdsresource.Type implementations indexed by their corresponding @@ -145,7 +118,6 @@ func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { } func (c *clientImpl) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { -<<<<<<< HEAD c.channelsMu.Lock() defer c.channelsMu.Unlock() @@ -172,18 +144,3 @@ func (c *clientImpl) resourceWatchStateForTesting(rType xdsresource.Type, resour } return ads.ResourceWatchState{}, fmt.Errorf("unable to find watch state for resource type %q and name %q", rType.TypeName(), resourceName) } -======= - if c == nil || c.done.HasFired() { - return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but client is closed", rType.TypeName(), resourceName) - } - - n := xdsresource.ParseName(resourceName) - a, unref, err := c.findAuthority(n) - if err != nil { - return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but authority %q is not found", rType.TypeName(), resourceName, n.Authority) - } - defer unref() - a.triggerResourceNotFoundForTesting(rType, n.String()) - return nil -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go index f4edfc612f..cdbb86db82 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go @@ -20,7 +20,6 @@ package internal // The following vars can be overridden by tests. var ( -<<<<<<< HEAD // GRPCNewClient returns a new gRPC Client. GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) @@ -31,8 +30,4 @@ var ( // identified by the given resource type and resource name. Returns a // non-nil error if there is no such resource being watched. ResourceWatchStateForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error -======= - // NewADSStream is a function that returns a new ADS stream. - NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go index 978b9f8e4b..00b6392d6a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go @@ -34,10 +34,3 @@ func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { func clientPrefix(p *clientImpl) string { return fmt.Sprintf("[xds-client %p] ", p) } -<<<<<<< HEAD -======= - -func authorityPrefix(p *clientImpl, serverURI string) string { - return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) -} ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go deleted file mode 100644 index 9acc33cbbf..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package internal contains functionality internal to the transport package. -package internal - -// The following vars can be overridden by tests. -var ( - // GRPCNewClient creates a new gRPC Client. - GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) -) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go deleted file mode 100644 index e47fdd9846..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ /dev/null @@ -1,259 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/durationpb" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" - v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" -) - -type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient - -// ReportLoad starts reporting loads to the management server the transport is -// configured to use. -// -// It returns a Store for the user to report loads and a function to cancel the -// load reporting. -func (t *Transport) ReportLoad() (*load.Store, func()) { - t.lrsStartStream() - return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) -} - -// lrsStartStream starts an LRS stream to the server, if none exists. -func (t *Transport) lrsStartStream() { - t.lrsMu.Lock() - defer t.lrsMu.Unlock() - - t.lrsRefCount++ - if t.lrsRefCount != 1 { - // Return early if the stream has already been started. - return - } - - ctx, cancel := context.WithCancel(context.Background()) - t.lrsCancelStream = cancel - - // Create a new done channel everytime a new stream is created. This ensures - // that we don't close the same channel multiple times (from lrsRunner() - // goroutine) when multiple streams are created and closed. - t.lrsRunnerDoneCh = make(chan struct{}) - go t.lrsRunner(ctx) -} - -// lrsStopStream closes the LRS stream, if this is the last user of the stream. -func (t *Transport) lrsStopStream() { - t.lrsMu.Lock() - defer t.lrsMu.Unlock() - - t.lrsRefCount-- - if t.lrsRefCount != 0 { - // Return early if the stream has other references. - return - } - - t.lrsCancelStream() - t.logger.Infof("Stopping LRS stream") - - // Wait for the runner goroutine to exit. The done channel will be - // recreated when a new stream is created. - <-t.lrsRunnerDoneCh -} - -// lrsRunner starts an LRS stream to report load data to the management server. -// It reports load at constant intervals (as configured by the management -// server) until the context is cancelled. -func (t *Transport) lrsRunner(ctx context.Context) { - defer close(t.lrsRunnerDoneCh) - - // This feature indicates that the client supports the - // LoadStatsResponse.send_all_clusters field in the LRS response. - node := proto.Clone(t.nodeProto).(*v3corepb.Node) - node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") - - runLoadReportStream := func() error { - // streamCtx is created and canceled in case we terminate the stream - // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring - // goroutine. - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) - if err != nil { - t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) - return nil - } - t.logger.Infof("Created LRS stream to server %q", t.serverURI) - - if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { - t.logger.Warningf("Sending first LRS request failed: %v", err) - return nil - } - - clusters, interval, err := t.recvFirstLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("Reading from LRS stream failed: %v", err) - return nil - } - - // We reset backoff state when we successfully receive at least one - // message from the server. - t.sendLoads(streamCtx, stream, clusters, interval) - return backoff.ErrResetBackoff - } - backoff.RunF(ctx, runLoadReportStream, t.backoff) -} - -func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { - tick := time.NewTicker(interval) - defer tick.Stop() - for { - select { - case <-tick.C: - case <-ctx.Done(): - return - } - if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { - t.logger.Warningf("Writing to LRS stream failed: %v", err) - return - } - } -} - -func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { - req := &v3lrspb.LoadStatsRequest{Node: node} - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) - } - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { - resp, err := stream.Recv() - if err != nil { - return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) - } - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) - } - - rInterval := resp.GetLoadReportingInterval() - if rInterval.CheckValid() != nil { - return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) - } - interval := rInterval.AsDuration() - - if resp.ReportEndpointGranularity { - // TODO(easwars): Support per endpoint loads. - return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") - } - - clusters := resp.Clusters - if resp.SendAllClusters { - // Return nil to send stats for all clusters. - clusters = nil - } - - return clusters, interval, nil -} - -func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { - clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) - for _, sd := range loads { - droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) - for category, count := range sd.Drops { - droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ - Category: category, - DroppedCount: count, - }) - } - localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) - for l, localityData := range sd.LocalityStats { - lid, err := internal.LocalityIDFromString(l) - if err != nil { - return err - } - loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) - for name, loadData := range localityData.LoadStats { - loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ - MetricName: name, - NumRequestsFinishedWithMetric: loadData.Count, - TotalMetricValue: loadData.Sum, - }) - } - localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ - Locality: &v3corepb.Locality{ - Region: lid.Region, - Zone: lid.Zone, - SubZone: lid.SubZone, - }, - TotalSuccessfulRequests: localityData.RequestStats.Succeeded, - TotalRequestsInProgress: localityData.RequestStats.InProgress, - TotalErrorRequests: localityData.RequestStats.Errored, - TotalIssuedRequests: localityData.RequestStats.Issued, - LoadMetricStats: loadMetricStats, - UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. - }) - } - - clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ - ClusterName: sd.Cluster, - ClusterServiceName: sd.Service, - UpstreamLocalityStats: localityStats, - TotalDroppedRequests: sd.TotalDrops, - DroppedRequests: droppedReqs, - LoadReportInterval: durationpb.New(sd.ReportInterval), - }) - } - - req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) - } - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func getStreamError(stream lrsStream) error { - for { - if _, err := stream.Recv(); err != nil { - return err - } - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go deleted file mode 100644 index 59b221727a..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ /dev/null @@ -1,702 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package transport implements the xDS transport protocol functionality -// required by the xdsclient. -package transport - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/keepalive" - xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" - "google.golang.org/grpc/xds/internal/xdsclient/load" - transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -func init() { - transportinternal.GRPCNewClient = grpc.NewClient - xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) - } -} - -// Any per-RPC level logs which print complete request or response messages -// should be gated at this verbosity level. Other per-RPC level logs which print -// terse output should be at `INFO` and verbosity 2. -const perRPCVerbosityLevel = 9 - -// Transport provides a resource-type agnostic implementation of the xDS -// transport protocol. At this layer, resource contents are supposed to be -// opaque blobs which should be meaningful only to the xDS data model layer -// which is implemented by the `xdsresource` package. -// -// Under the hood, it owns the gRPC connection to a single management server and -// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport -// protocol version. -type Transport struct { - // These fields are initialized at creation time and are read-only afterwards. - cc *grpc.ClientConn // ClientConn to the management server. - serverURI string // URI of the management server. - onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. - onErrorHandler func(error) // To report underlying stream errors. - onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. - lrsStore *load.Store // Store returned to user for pushing loads. - backoff func(int) time.Duration // Backoff after stream failures. - nodeProto *v3corepb.Node // Identifies the gRPC application. - logger *grpclog.PrefixLogger // Prefix logger for transport logs. - adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. - adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. - lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. - - // These channels enable synchronization amongst the different goroutines - // spawned by the transport, and between asynchronous events resulting from - // receipt of responses from the management server. - adsStreamCh chan adsStream // New ADS streams are pushed here. - adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. - - // mu guards the following runtime state maintained by the transport. - mu sync.Mutex - // resources is map from resource type URL to the set of resource names - // being requested for that type. When the ADS stream is restarted, the - // transport requests all these resources again from the management server. - resources map[string]map[string]bool - // versions is a map from resource type URL to the most recently ACKed - // version for that resource. Resource versions are a property of the - // resource type and not the stream, and will not be reset upon stream - // restarts. - versions map[string]string - // nonces is a map from resource type URL to the most recently received - // nonce for that resource type. Nonces are a property of the ADS stream and - // will be reset upon stream restarts. - nonces map[string]string - - lrsMu sync.Mutex // Protects all LRS state. - lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. - lrsRefCount int // Reference count on the load store. -} - -// OnRecvHandlerFunc is the implementation at the xDS data model layer, which -// determines if the configuration received from the management server can be -// applied locally or not. -// -// A nil error is returned from this function when the data model layer believes -// that the received configuration is good and can be applied locally. This will -// cause the transport layer to send an ACK to the management server. A non-nil -// error is returned from this function when the data model layer believes -// otherwise, and this will cause the transport layer to send a NACK. -// -// The implementation is expected to invoke onDone when local processing of the -// update is complete, i.e. it is consumed by all watchers. -type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error - -// OnSendHandlerFunc is the implementation at the authority, which handles state -// changes for the resource watch and stop watch timers accordingly. -type OnSendHandlerFunc func(update *ResourceSendInfo) - -// ResourceUpdate is a representation of the configuration update received from -// the management server. It only contains fields which are useful to the data -// model layer, and layers above it. -type ResourceUpdate struct { - // Resources is the list of resources received from the management server. - Resources []*anypb.Any - // URL is the resource type URL for the above resources. - URL string - // Version is the resource version, for the above resources, as specified by - // the management server. - Version string -} - -// Options specifies configuration knobs used when creating a new Transport. -type Options struct { - // ServerCfg contains all the configuration required to connect to the xDS - // management server. - ServerCfg *bootstrap.ServerConfig - // OnRecvHandler is the component which makes ACK/NACK decisions based on - // the received resources. - // - // Invoked inline and implementations must not block. - OnRecvHandler OnRecvHandlerFunc - // OnErrorHandler provides a way for the transport layer to report - // underlying stream errors. These can be bubbled all the way up to the user - // of the xdsClient. - // - // Invoked inline and implementations must not block. - OnErrorHandler func(error) - // OnSendHandler provides a way for the transport layer to report underlying - // resource requests sent on the stream. However, Send() on the ADS stream will - // return successfully as long as: - // 1. there is enough flow control quota to send the message. - // 2. the message is added to the send buffer. - // However, the connection may fail after the callback is invoked and before - // the message is actually sent on the wire. This is accepted. - // - // Invoked inline and implementations must not block. - OnSendHandler func(*ResourceSendInfo) - // Backoff controls the amount of time to backoff before recreating failed - // ADS streams. If unspecified, a default exponential backoff implementation - // is used. For more details, see: - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. - Backoff func(retries int) time.Duration - // Logger does logging with a prefix. - Logger *grpclog.PrefixLogger - // NodeProto contains the Node proto to be used in xDS requests. This will be - // of type *v3corepb.Node. - NodeProto *v3corepb.Node -} - -// New creates a new Transport. -func New(opts Options) (*Transport, error) { - switch { - case opts.OnRecvHandler == nil: - return nil, errors.New("missing OnRecv callback handler when creating a new transport") - case opts.OnErrorHandler == nil: - return nil, errors.New("missing OnError callback handler when creating a new transport") - case opts.OnSendHandler == nil: - return nil, errors.New("missing OnSend callback handler when creating a new transport") - } - - // Dial the xDS management server with dial options specified by the server - // configuration and a static keepalive configuration that is common across - // gRPC language implementations. - kpCfg := grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }) - dopts := append([]grpc.DialOption{kpCfg}, opts.ServerCfg.DialOptions()...) - grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) - cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) - if err != nil { - // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) - } - cc.Connect() - - boff := opts.Backoff - if boff == nil { - boff = backoff.DefaultExponential.Backoff - } - ret := &Transport{ - cc: cc, - serverURI: opts.ServerCfg.ServerURI(), - onRecvHandler: opts.OnRecvHandler, - onErrorHandler: opts.OnErrorHandler, - onSendHandler: opts.OnSendHandler, - lrsStore: load.NewStore(), - backoff: boff, - nodeProto: opts.NodeProto, - logger: opts.Logger, - - adsStreamCh: make(chan adsStream, 1), - adsRequestCh: buffer.NewUnbounded(), - resources: make(map[string]map[string]bool), - versions: make(map[string]string), - nonces: make(map[string]string), - adsRunnerDoneCh: make(chan struct{}), - } - - // This context is used for sending and receiving RPC requests and - // responses. It is also used by all the goroutines spawned by this - // Transport. Therefore, cancelling this context when the transport is - // closed will essentially cancel any pending RPCs, and cause the goroutines - // to terminate. - ctx, cancel := context.WithCancel(context.Background()) - ret.adsRunnerCancel = cancel - go ret.adsRunner(ctx) - - ret.logger.Infof("Created transport to server %q", ret.serverURI) - return ret, nil -} - -// resourceRequest wraps the resource type url and the resource names requested -// by the user of this transport. -type resourceRequest struct { - resources []string - url string -} - -// SendRequest sends out an ADS request for the provided resources of the -// specified resource type. -// -// The request is sent out asynchronously. If no valid stream exists at the time -// of processing this request, it is queued and will be sent out once a valid -// stream exists. -// -// If a successful response is received, the update handler callback provided at -// creation time is invoked. If an error is encountered, the stream error -// handler callback provided at creation time is invoked. -func (t *Transport) SendRequest(url string, resources []string) { - t.adsRequestCh.Put(&resourceRequest{ - url: url, - resources: resources, - }) -} - -// ResourceSendInfo wraps the names and url of resources sent to the management -// server. This is used by the `authority` type to start/stop the watch timer -// associated with every resource in the update. -type ResourceSendInfo struct { - ResourceNames []string - URL string -} - -func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { - req := &v3discoverypb.DiscoveryRequest{ - TypeUrl: resourceURL, - ResourceNames: resourceNames, - VersionInfo: version, - ResponseNonce: nonce, - } - if sendNodeProto { - req.Node = t.nodeProto - } - if nackErr != nil { - req.ErrorDetail = &statuspb.Status{ - Code: int32(codes.InvalidArgument), Message: nackErr.Error(), - } - } - if err := stream.Send(req); err != nil { - return err - } - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) - } else { - if t.logger.V(2) { - t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) - } - } - t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) - return nil -} - -func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { - resp, err := stream.Recv() - if err != nil { - return nil, "", "", "", err - } - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) - } else if t.logger.V(2) { - t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) - } - return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil -} - -// adsRunner starts an ADS stream (and backs off exponentially, if the previous -// stream failed without receiving a single reply) and runs the sender and -// receiver routines to send and receive data from the stream respectively. -func (t *Transport) adsRunner(ctx context.Context) { - defer close(t.adsRunnerDoneCh) - - go t.send(ctx) - - // We reset backoff state when we successfully receive at least one - // message from the server. - runStreamWithBackoff := func() error { - newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) - stream, err := newStream(ctx, t.cc) - if err != nil { - t.onErrorHandler(err) - t.logger.Warningf("Creating new ADS stream failed: %v", err) - return nil - } - t.logger.Infof("ADS stream created") - - select { - case <-t.adsStreamCh: - default: - } - t.adsStreamCh <- stream - msgReceived := t.recv(ctx, stream) - if msgReceived { - return backoff.ErrResetBackoff - } - return nil - } - backoff.RunF(ctx, runStreamWithBackoff, t.backoff) -} - -// send is a separate goroutine for sending resource requests on the ADS stream. -// -// For every new stream received on the stream channel, all existing resources -// are re-requested from the management server. -// -// For every new resource request received on the resources channel, the -// resources map is updated (this ensures that resend will pick them up when -// there are new streams) and the appropriate request is sent out. -func (t *Transport) send(ctx context.Context) { - var stream adsStream - // The xDS protocol only requires that we send the node proto in the first - // discovery request on every stream. Sending the node proto in every - // request message wastes CPU resources on the client and the server. - sentNodeProto := false - for { - select { - case <-ctx.Done(): - return - case stream = <-t.adsStreamCh: - // We have a new stream and we've to ensure that the node proto gets - // sent out in the first request on the stream. - var err error - if sentNodeProto, err = t.sendExisting(stream); err != nil { - // Send failed, clear the current stream. Attempt to resend will - // only be made after a new stream is created. - stream = nil - continue - } - case u, ok := <-t.adsRequestCh.Get(): - if !ok { - // No requests will be sent after the adsRequestCh buffer is closed. - return - } - t.adsRequestCh.Load() - - var ( - resources []string - url, version, nonce string - send bool - nackErr error - ) - switch update := u.(type) { - case *resourceRequest: - resources, url, version, nonce = t.processResourceRequest(update) - case *ackRequest: - resources, url, version, nonce, send = t.processAckRequest(update, stream) - if !send { - continue - } - nackErr = update.nackErr - } - if stream == nil { - // There's no stream yet. Skip the request. This request - // will be resent to the new streams. If no stream is - // created, the watcher will timeout (same as server not - // sending response back). - continue - } - if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, resources, url, version, nonce, nackErr); err != nil { - t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) - // Send failed, clear the current stream. - stream = nil - } - sentNodeProto = true - } - } -} - -// sendExisting sends out xDS requests for existing resources when recovering -// from a broken stream. -// -// We call stream.Send() here with the lock being held. It should be OK to do -// that here because the stream has just started and Send() usually returns -// quickly (once it pushes the message onto the transport layer) and is only -// ever blocked if we don't have enough flow control quota. -// -// Returns true if the node proto was sent. -func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err error) { - t.mu.Lock() - defer t.mu.Unlock() - - // Reset only the nonces map when the stream restarts. - // - // xDS spec says the following. See section: - // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version - // - // Note that the version for a resource type is not a property of an - // individual xDS stream but rather a property of the resources themselves. If - // the stream becomes broken and the client creates a new stream, the client’s - // initial request on the new stream should indicate the most recent version - // seen by the client on the previous stream - t.nonces = make(map[string]string) - - // Send node proto only in the first request on the stream. - for url, resources := range t.resources { - if len(resources) == 0 { - continue - } - if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { - t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) - return false, err - } - sentNodeProto = true - } - - return sentNodeProto, nil -} - -// recv receives xDS responses on the provided ADS stream and branches out to -// message specific handlers. Returns true if at least one message was -// successfully received. -func (t *Transport) recv(ctx context.Context, stream adsStream) bool { - // Initialize the flow control quota for the stream. This helps to block the - // next read until the previous one is consumed by all watchers. - fc := newADSFlowControl() - - msgReceived := false - for { - // Wait for ADS stream level flow control to be available. - if !fc.wait(ctx) { - if t.logger.V(2) { - t.logger.Infof("ADS stream context canceled") - } - return msgReceived - } - - resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) - if err != nil { - // Note that we do not consider it an error if the ADS stream was closed - // after having received a response on the stream. This is because there - // are legitimate reasons why the server may need to close the stream during - // normal operations, such as needing to rebalance load or the underlying - // connection hitting its max connection age limit. - // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). - if msgReceived { - err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) - } - t.onErrorHandler(err) - t.logger.Warningf("ADS stream closed: %v", err) - return msgReceived - } - msgReceived = true - - u := ResourceUpdate{ - Resources: resources, - URL: url, - Version: rVersion, - } - fc.setPending() - if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { - t.logger.Warningf("%v", err) - continue - } - // If the data model layer returned an error, we need to NACK the - // response in which case we need to set the version to the most - // recently accepted version of this resource type. - if err != nil { - t.mu.Lock() - t.adsRequestCh.Put(&ackRequest{ - url: url, - nonce: nonce, - stream: stream, - version: t.versions[url], - nackErr: err, - }) - t.mu.Unlock() - t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) - continue - } - t.adsRequestCh.Put(&ackRequest{ - url: url, - nonce: nonce, - stream: stream, - version: rVersion, - }) - if t.logger.V(2) { - t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) - } - } -} - -func mapToSlice(m map[string]bool) []string { - ret := make([]string, 0, len(m)) - for i := range m { - ret = append(ret, i) - } - return ret -} - -func sliceToMap(ss []string) map[string]bool { - ret := make(map[string]bool, len(ss)) - for _, s := range ss { - ret[s] = true - } - return ret -} - -// processResourceRequest pulls the fields needed to send out an ADS request. -// The resource type and the list of resources to request are provided by the -// user, while the version and nonce are maintained internally. -// -// The resources map, which keeps track of the resources being requested, is -// updated here. Any subsequent stream failure will re-request resources stored -// in this map. -// -// Returns the list of resources, resource type url, version and nonce. -func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { - t.mu.Lock() - defer t.mu.Unlock() - - resources := sliceToMap(req.resources) - t.resources[req.url] = resources - return req.resources, req.url, t.versions[req.url], t.nonces[req.url] -} - -type ackRequest struct { - url string // Resource type URL. - version string // NACK if version is an empty string. - nonce string - nackErr error // nil for ACK, non-nil for NACK. - // ACK/NACK are tagged with the stream it's for. When the stream is down, - // all the ACK/NACK for this stream will be dropped, and the version/nonce - // won't be updated. - stream grpc.ClientStream -} - -// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces -// and versions map is updated. -// -// Returns the list of resources, resource type url, version, nonce, and an -// indication of whether an ACK should be sent on the wire or not. -func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { - if ack.stream != stream { - // If ACK's stream isn't the current sending stream, this means the ACK - // was pushed to queue before the old stream broke, and a new stream has - // been started since. Return immediately here so we don't update the - // nonce for the new stream. - return nil, "", "", "", false - } - - t.mu.Lock() - defer t.mu.Unlock() - - // Update the nonce irrespective of whether we send the ACK request on wire. - // An up-to-date nonce is required for the next request. - nonce := ack.nonce - t.nonces[ack.url] = nonce - - s, ok := t.resources[ack.url] - if !ok || len(s) == 0 { - // We don't send the ACK request if there are no resources of this type - // in our resources map. This can be either when the server sends - // responses before any request, or the resources are removed while the - // ackRequest was in queue). If we send a request with an empty - // resource name list, the server may treat it as a wild card and send - // us everything. - return nil, "", "", "", false - } - resources := mapToSlice(s) - - // Update the versions map only when we plan to send an ACK. - if ack.nackErr == nil { - t.versions[ack.url] = ack.version - } - - return resources, ack.url, ack.version, nonce, true -} - -// Close closes the Transport and frees any associated resources. -func (t *Transport) Close() { - t.adsRunnerCancel() - <-t.adsRunnerDoneCh - t.adsRequestCh.Close() - t.cc.Close() -} - -// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC -// channel to the management server. -// -// Only for testing purposes. -func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { - return t.cc.GetState() -} - -// adsFlowControl implements ADS stream level flow control that enables the -// transport to block the reading of the next message off of the stream until -// the previous update is consumed by all watchers. -// -// The lifetime of the flow control is tied to the lifetime of the stream. -type adsFlowControl struct { - logger *grpclog.PrefixLogger - - // Whether the most recent update is pending consumption by all watchers. - pending atomic.Bool - // Channel used to notify when all the watchers have consumed the most - // recent update. Wait() blocks on reading a value from this channel. - readyCh chan struct{} -} - -// newADSFlowControl returns a new adsFlowControl. -func newADSFlowControl() *adsFlowControl { - return &adsFlowControl{readyCh: make(chan struct{}, 1)} -} - -// setPending changes the internal state to indicate that there is an update -// pending consumption by all watchers. -func (fc *adsFlowControl) setPending() { - fc.pending.Store(true) -} - -// wait blocks until all the watchers have consumed the most recent update and -// returns true. If the context expires before that, it returns false. -func (fc *adsFlowControl) wait(ctx context.Context) bool { - // If there is no pending update, there is no need to block. - if !fc.pending.Load() { - // If all watchers finished processing the most recent update before the - // `recv` goroutine made the next call to `Wait()`, there would be an - // entry in the readyCh channel that needs to be drained to ensure that - // the next call to `Wait()` doesn't unblock before it actually should. - select { - case <-fc.readyCh: - default: - } - return true - } - - select { - case <-ctx.Done(): - return false - case <-fc.readyCh: - return true - } -} - -// onDone indicates that all watchers have consumed the most recent update. -func (fc *adsFlowControl) onDone() { - fc.pending.Store(false) - - select { - // Writes to the readyCh channel should not block ideally. The default - // branch here is to appease the paranoid mind. - case fc.readyCh <- struct{}{}: - default: - if fc.logger.V(2) { - fc.logger.Infof("ADS stream flow control readyCh is full") - } - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index aff9de57df..8e9375fcbb 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -86,13 +86,8 @@ type ClusterResourceData struct { Resource ClusterUpdate } -<<<<<<< HEAD // RawEqual returns true if other is equal to r. func (c *ClusterResourceData) RawEqual(other ResourceData) bool { -======= -// Equal returns true if other is equal to r. -func (c *ClusterResourceData) Equal(other ResourceData) bool { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if c == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index a438f05081..94c03d0c52 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -81,13 +81,8 @@ type EndpointsResourceData struct { Resource EndpointsUpdate } -<<<<<<< HEAD // RawEqual returns true if other is equal to r. func (e *EndpointsResourceData) RawEqual(other ResourceData) bool { -======= -// Equal returns true if other is equal to r. -func (e *EndpointsResourceData) Equal(other ResourceData) bool { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if e == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 84163ed38f..e3ca1134a0 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -118,13 +118,8 @@ type ListenerResourceData struct { Resource ListenerUpdate } -<<<<<<< HEAD // RawEqual returns true if other is equal to l. func (l *ListenerResourceData) RawEqual(other ResourceData) bool { -======= -// Equal returns true if other is equal to l. -func (l *ListenerResourceData) Equal(other ResourceData) bool { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if l == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go index a595eda6f4..798f618849 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/matcher.go @@ -19,11 +19,7 @@ package xdsresource import ( "fmt" -<<<<<<< HEAD rand "math/rand/v2" -======= - "math/rand" ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "strings" "google.golang.org/grpc/internal/grpcutil" @@ -146,19 +142,11 @@ func newFractionMatcher(fraction uint32) *fractionMatcher { return &fractionMatcher{fraction: int64(fraction)} } -<<<<<<< HEAD // RandInt64n overwrites rand for control in tests. var RandInt64n = rand.Int64N func (fm *fractionMatcher) match() bool { t := RandInt64n(1000000) -======= -// RandInt63n overwrites rand for control in tests. -var RandInt63n = rand.Int63n - -func (fm *fractionMatcher) match() bool { - t := RandInt63n(1000000) ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return t <= fm.fraction } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index 50c002d4ce..e14f56f781 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -46,15 +46,9 @@ func init() { type Producer interface { // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how -<<<<<<< HEAD // xDS responses are are deserialized and validated, as received from the // xDS management server. Upon receipt of a response from the management // server, an appropriate callback on the watcher is invoked. -======= - // xDS requests are sent out and how responses are deserialized and - // validated. Upon receipt of a response from the management server, an - // appropriate callback on the watcher is invoked. ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) } @@ -125,17 +119,9 @@ type Type interface { // provide an implementation of this interface to represent the configuration // received from the xDS management server. type ResourceData interface { -<<<<<<< HEAD // RawEqual returns true if the passed in resource data is equal to that of // the receiver, based on the underlying raw protobuf message. RawEqual(ResourceData) bool -======= - isResourceData() - - // Equal returns true if the passed in resource data is equal to that of the - // receiver. - Equal(ResourceData) bool ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) // ToJSON returns a JSON string representation of the resource data. ToJSON() string diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index ee9b1069a5..98ac313288 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -81,13 +81,8 @@ type RouteConfigResourceData struct { Resource RouteConfigUpdate } -<<<<<<< HEAD // RawEqual returns true if other is equal to r. func (r *RouteConfigResourceData) RawEqual(other ResourceData) bool { -======= -// Equal returns true if other is equal to r. -func (r *RouteConfigResourceData) Equal(other ResourceData) bool { ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if r == nil && other == nil { return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go index 15a2e37921..f94a17e7c6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/type_eds.go @@ -49,11 +49,7 @@ const ( // Endpoint contains information of an endpoint. type Endpoint struct { -<<<<<<< HEAD Addresses []string -======= - Address string ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) HealthStatus EndpointHealthStatus Weight uint32 } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go index b2e87d31d6..fd780d6632 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -26,10 +26,7 @@ import ( v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" -<<<<<<< HEAD "google.golang.org/grpc/internal/envconfig" -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" "google.golang.org/protobuf/proto" @@ -97,7 +94,6 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs } weight = w.GetValue() } -<<<<<<< HEAD addrs := []string{parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress())} if envconfig.XDSDualstackEndpointsEnabled { for _, sa := range lbEndpoint.GetEndpoint().GetAdditionalAddresses() { @@ -114,16 +110,6 @@ func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Addresses: addrs, -======= - addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) - if uniqueEndpointAddrs[addr] { - return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) - } - uniqueEndpointAddrs[addr] = true - endpoints = append(endpoints, Endpoint{ - HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: addr, ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) Weight: weight, }) } diff --git a/vendor/modules.txt b/vendor/modules.txt index ccf190ee41..01e3567f55 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,13 +4,8 @@ # 4d63.com/gochecknoglobals v0.2.1 ## explicit; go 1.15 4d63.com/gochecknoglobals/checknoglobals -<<<<<<< HEAD # cel.dev/expr v0.19.1 ## explicit; go 1.21.1 -======= -# cel.dev/expr v0.16.1 -## explicit; go 1.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cel.dev/expr # cloud.google.com/go v0.116.0 ## explicit; go 1.21 @@ -19,13 +14,8 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -<<<<<<< HEAD # cloud.google.com/go/auth v0.14.0 ## explicit; go 1.22 -======= -# cloud.google.com/go/auth v0.11.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/idtoken @@ -43,17 +33,10 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -<<<<<<< HEAD # cloud.google.com/go/auth/oauth2adapt v0.2.7 ## explicit; go 1.22 cloud.google.com/go/auth/oauth2adapt # cloud.google.com/go/compute/metadata v0.6.0 -======= -# cloud.google.com/go/auth/oauth2adapt v0.2.6 -## explicit; go 1.21 -cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 cloud.google.com/go/compute/metadata # cloud.google.com/go/firestore v1.17.0 @@ -65,11 +48,7 @@ cloud.google.com/go/firestore/internal ## explicit; go 1.21 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -<<<<<<< HEAD # cloud.google.com/go/kms v1.20.4 -======= -# cloud.google.com/go/kms v1.20.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 cloud.google.com/go/kms/apiv1 cloud.google.com/go/kms/apiv1/kmspb @@ -84,13 +63,8 @@ cloud.google.com/go/longrunning/autogen/longrunningpb cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal -<<<<<<< HEAD # cloud.google.com/go/storage v1.50.0 ## explicit; go 1.22 -======= -# cloud.google.com/go/storage v1.48.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cloud.google.com/go/storage cloud.google.com/go/storage/experimental cloud.google.com/go/storage/internal @@ -102,19 +76,10 @@ contrib.go.opencensus.io/exporter/ocagent # contrib.go.opencensus.io/exporter/prometheus v0.4.2 ## explicit; go 1.13 contrib.go.opencensus.io/exporter/prometheus -<<<<<<< HEAD # dario.cat/mergo v1.0.1 ## explicit; go 1.13 # github.com/4meepo/tagalign v1.4.1 ## explicit; go 1.21.0 -======= -# filippo.io/edwards25519 v1.1.0 -## explicit; go 1.20 -filippo.io/edwards25519 -filippo.io/edwards25519/field -# github.com/4meepo/tagalign v1.3.4 -## explicit; go 1.19 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/4meepo/tagalign # github.com/Abirdcfly/dupword v0.1.3 ## explicit; go 1.22.0 @@ -125,11 +90,7 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider # github.com/Antonboom/errname v1.0.0 ## explicit; go 1.22.1 github.com/Antonboom/errname/pkg/analyzer -<<<<<<< HEAD # github.com/Antonboom/nilnil v1.0.1 -======= -# github.com/Antonboom/nilnil v1.0.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 github.com/Antonboom/nilnil/pkg/analyzer # github.com/Antonboom/testifylint v1.5.2 @@ -144,11 +105,7 @@ github.com/Antonboom/testifylint/internal/testify ## explicit github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry github.com/Azure/azure-sdk-for-go/version -<<<<<<< HEAD # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 -======= -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -170,11 +127,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -<<<<<<< HEAD # github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 -======= -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -187,17 +140,10 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -<<<<<<< HEAD # github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys # github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 -======= -# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 -## explicit; go 1.18 -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys -# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal # github.com/Azure/go-autorest v14.2.0+incompatible @@ -225,11 +171,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -<<<<<<< HEAD # github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 -======= -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -268,11 +210,7 @@ github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure -<<<<<<< HEAD # github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 -======= -# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp # github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 @@ -284,11 +222,7 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp # github.com/IBM/sarama v1.43.3 ## explicit; go 1.19 github.com/IBM/sarama -<<<<<<< HEAD # github.com/Masterminds/semver/v3 v3.3.1 -======= -# github.com/Masterminds/semver/v3 v3.3.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/Masterminds/semver/v3 # github.com/Microsoft/go-winio v0.6.2 @@ -302,13 +236,8 @@ github.com/Microsoft/go-winio/pkg/guid ## explicit; go 1.20 github.com/OpenPeeDeeP/depguard/v2 github.com/OpenPeeDeeP/depguard/v2/internal/utils -<<<<<<< HEAD # github.com/ProtonMail/go-crypto v1.1.5 ## explicit; go 1.17 -======= -# github.com/ProtonMail/go-crypto v1.0.0 -## explicit; go 1.13 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool github.com/ProtonMail/go-crypto/eax @@ -319,11 +248,8 @@ github.com/ProtonMail/go-crypto/openpgp/aes/keywrap github.com/ProtonMail/go-crypto/openpgp/armor github.com/ProtonMail/go-crypto/openpgp/ecdh github.com/ProtonMail/go-crypto/openpgp/ecdsa -<<<<<<< HEAD github.com/ProtonMail/go-crypto/openpgp/ed25519 github.com/ProtonMail/go-crypto/openpgp/ed448 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/ProtonMail/go-crypto/openpgp/eddsa github.com/ProtonMail/go-crypto/openpgp/elgamal github.com/ProtonMail/go-crypto/openpgp/errors @@ -332,7 +258,6 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k -<<<<<<< HEAD github.com/ProtonMail/go-crypto/openpgp/x25519 github.com/ProtonMail/go-crypto/openpgp/x448 # github.com/ThalesIgnite/crypto11 v1.2.5 @@ -340,13 +265,6 @@ github.com/ProtonMail/go-crypto/openpgp/x448 github.com/ThalesIgnite/crypto11 # github.com/alecthomas/go-check-sumtype v0.3.1 ## explicit; go 1.22.0 -======= -# github.com/ThalesIgnite/crypto11 v1.2.5 -## explicit; go 1.13 -github.com/ThalesIgnite/crypto11 -# github.com/alecthomas/go-check-sumtype v0.2.0 -## explicit; go 1.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/alecthomas/go-check-sumtype # github.com/alexkohler/nakedret/v2 v2.0.5 ## explicit; go 1.21 @@ -357,11 +275,7 @@ github.com/alexkohler/prealloc/pkg # github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 ## explicit; go 1.14 github.com/alibabacloud-go/alibabacloud-gateway-spi/client -<<<<<<< HEAD # github.com/alibabacloud-go/cr-20160607 v1.0.1 => github.com/vdemeester/cr-20160607 v1.0.1 -======= -# github.com/alibabacloud-go/cr-20160607 v1.0.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.15 github.com/alibabacloud-go/cr-20160607/client # github.com/alibabacloud-go/cr-20181201 v1.0.10 @@ -392,13 +306,10 @@ github.com/alibabacloud-go/tea-xml/service # github.com/alingse/asasalint v0.0.11 ## explicit; go 1.18 github.com/alingse/asasalint -<<<<<<< HEAD # github.com/alingse/nilnesserr v0.1.1 ## explicit; go 1.22.0 github.com/alingse/nilnesserr github.com/alingse/nilnesserr/internal/typeparams -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/aliyun/credentials-go v1.3.2 ## explicit; go 1.14 github.com/aliyun/credentials-go/credentials @@ -414,17 +325,10 @@ github.com/asaskevich/govalidator # github.com/ashanbrown/forbidigo v1.6.0 ## explicit; go 1.13 github.com/ashanbrown/forbidigo/forbidigo -<<<<<<< HEAD # github.com/ashanbrown/makezero v1.2.0 ## explicit; go 1.12 github.com/ashanbrown/makezero/makezero # github.com/aws/aws-sdk-go v1.55.6 -======= -# github.com/ashanbrown/makezero v1.1.1 -## explicit; go 1.12 -github.com/ashanbrown/makezero/makezero -# github.com/aws/aws-sdk-go v1.55.5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer @@ -472,11 +376,7 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2 v1.34.0 -======= -# github.com/aws/aws-sdk-go-v2 v1.32.4 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -502,17 +402,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/config v1.29.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/config # github.com/aws/aws-sdk-go-v2/credentials v1.17.55 -======= -# github.com/aws/aws-sdk-go-v2/config v1.28.3 -## explicit; go 1.21 -github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.44 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -521,7 +414,6 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/feature/ec2/imds @@ -533,19 +425,6 @@ github.com/aws/aws-sdk-go-v2/internal/configsources ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 -======= -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 -## explicit; go 1.21 -github.com/aws/aws-sdk-go-v2/feature/ec2/imds -github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 -## explicit; go 1.21 -github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 -## explicit; go 1.21 -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 @@ -558,7 +437,6 @@ github.com/aws/aws-sdk-go-v2/service/ecr/types github.com/aws/aws-sdk-go-v2/service/ecrpublic github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecrpublic/types -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding @@ -566,51 +444,26 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url # github.com/aws/aws-sdk-go-v2/service/kms v1.37.14 -======= -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 -## explicit; go 1.21 -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 -## explicit; go 1.21 -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/kms v1.37.5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/kms github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints github.com/aws/aws-sdk-go-v2/service/kms/types -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 -======= -# github.com/aws/aws-sdk-go-v2/service/sso v1.24.5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 -======= -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -<<<<<<< HEAD # github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 -======= -# github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -<<<<<<< HEAD # github.com/aws/smithy-go v1.22.2 -======= -# github.com/aws/smithy-go v1.22.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -663,13 +516,8 @@ github.com/blizzy78/varnamelen # github.com/bmatcuk/doublestar/v4 v4.0.2 ## explicit; go 1.16 github.com/bmatcuk/doublestar/v4 -<<<<<<< HEAD # github.com/bombsimon/wsl/v4 v4.5.0 ## explicit; go 1.22 -======= -# github.com/bombsimon/wsl/v4 v4.4.1 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/bombsimon/wsl/v4 # github.com/breml/bidichk v0.3.2 ## explicit; go 1.22.0 @@ -677,7 +525,6 @@ github.com/breml/bidichk/pkg/bidichk # github.com/breml/errchkjson v0.4.0 ## explicit; go 1.22.0 github.com/breml/errchkjson -<<<<<<< HEAD # github.com/buildkite/agent/v3 v3.91.0 ## explicit; go 1.22.7 github.com/buildkite/agent/v3/api @@ -685,20 +532,11 @@ github.com/buildkite/agent/v3/internal/agenthttp github.com/buildkite/agent/v3/logger github.com/buildkite/agent/v3/version # github.com/buildkite/go-pipeline v0.13.3 -======= -# github.com/buildkite/agent/v3 v3.81.0 -## explicit; go 1.22.6 -github.com/buildkite/agent/v3/api -github.com/buildkite/agent/v3/logger -github.com/buildkite/agent/v3/version -# github.com/buildkite/go-pipeline v0.13.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.6 github.com/buildkite/go-pipeline github.com/buildkite/go-pipeline/internal/env github.com/buildkite/go-pipeline/ordered github.com/buildkite/go-pipeline/warning -<<<<<<< HEAD # github.com/buildkite/interpolate v0.1.5 ## explicit; go 1.22 github.com/buildkite/interpolate @@ -706,24 +544,11 @@ github.com/buildkite/interpolate ## explicit; go 1.18 github.com/buildkite/roko # github.com/butuzov/ireturn v0.3.1 -======= -# github.com/buildkite/interpolate v0.1.3 -## explicit; go 1.22 -github.com/buildkite/interpolate -# github.com/buildkite/roko v1.2.0 -## explicit; go 1.18 -github.com/buildkite/roko -# github.com/butuzov/ireturn v0.3.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 github.com/butuzov/ireturn/analyzer github.com/butuzov/ireturn/analyzer/internal/config github.com/butuzov/ireturn/analyzer/internal/types -<<<<<<< HEAD # github.com/butuzov/mirror v1.3.0 -======= -# github.com/butuzov/mirror v1.2.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/butuzov/mirror github.com/butuzov/mirror/internal/checker @@ -766,11 +591,7 @@ github.com/chavacava/garif github.com/chrismellard/docker-credential-acr-env/pkg/credhelper github.com/chrismellard/docker-credential-acr-env/pkg/registry github.com/chrismellard/docker-credential-acr-env/pkg/token -<<<<<<< HEAD # github.com/ckaznocha/intrange v0.3.0 -======= -# github.com/ckaznocha/intrange v0.2.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22 github.com/ckaznocha/intrange # github.com/clbanning/mxj/v2 v2.7.0 @@ -819,7 +640,6 @@ github.com/cncf/xds/go/xds/type/v3 # github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be ## explicit github.com/common-nighthawk/go-figure -<<<<<<< HEAD # github.com/containerd/stargz-snapshotter/estargz v0.16.3 ## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz @@ -829,17 +649,6 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil github.com/coreos/go-oidc/v3/oidc # github.com/curioswitch/go-reassign v0.3.0 ## explicit; go 1.21 -======= -# github.com/containerd/stargz-snapshotter/estargz v0.14.3 -## explicit; go 1.19 -github.com/containerd/stargz-snapshotter/estargz -github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/coreos/go-oidc/v3 v3.11.0 -## explicit; go 1.21 -github.com/coreos/go-oidc/v3/oidc -# github.com/curioswitch/go-reassign v0.2.0 -## explicit; go 1.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/curioswitch/go-reassign github.com/curioswitch/go-reassign/internal/analyzer # github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 @@ -871,11 +680,7 @@ github.com/digitorus/timestamp # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -<<<<<<< HEAD # github.com/docker/cli v27.5.0+incompatible -======= -# github.com/docker/cli v27.1.1+incompatible ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -884,11 +689,7 @@ github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -<<<<<<< HEAD # github.com/docker/docker-credential-helpers v0.8.2 -======= -# github.com/docker/docker-credential-helpers v0.8.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials @@ -919,11 +720,7 @@ github.com/emirpasic/gods/lists/arraylist github.com/emirpasic/gods/trees github.com/emirpasic/gods/trees/binaryheap github.com/emirpasic/gods/utils -<<<<<<< HEAD # github.com/envoyproxy/go-control-plane v0.13.1 -======= -# github.com/envoyproxy/go-control-plane v0.13.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/envoyproxy/go-control-plane/envoy/admin/v3 github.com/envoyproxy/go-control-plane/envoy/annotations @@ -1013,10 +810,6 @@ github.com/go-critic/go-critic/linter ## explicit; go 1.12 github.com/go-jose/go-jose/v3 github.com/go-jose/go-jose/v3/cipher -<<<<<<< HEAD -======= -github.com/go-jose/go-jose/v3/cryptosigner ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/go-jose/go-jose/v3/json github.com/go-jose/go-jose/v3/jwt # github.com/go-jose/go-jose/v4 v4.0.4 @@ -1109,11 +902,7 @@ github.com/go-toolsmith/typep ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 github.com/go-viper/mapstructure/v2/internal/errors -<<<<<<< HEAD # github.com/go-xmlfmt/xmlfmt v1.1.3 -======= -# github.com/go-xmlfmt/xmlfmt v1.1.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit github.com/go-xmlfmt/xmlfmt # github.com/gobwas/glob v0.2.3 @@ -1163,21 +952,12 @@ github.com/golangci/dupl/syntax/golang # github.com/golangci/go-printf-func-name v0.1.0 ## explicit; go 1.22.0 github.com/golangci/go-printf-func-name/pkg/analyzer -<<<<<<< HEAD # github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 ## explicit; go 1.22.0 github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/gofmt/internal/diff github.com/golangci/gofmt/goimports # github.com/golangci/golangci-lint v1.63.4 -======= -# github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 -## explicit; go 1.22 -github.com/golangci/gofmt/gofmt -github.com/golangci/gofmt/gofmt/internal/diff -github.com/golangci/gofmt/goimports -# github.com/golangci/golangci-lint v1.62.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.1 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache @@ -1186,13 +966,10 @@ github.com/golangci/golangci-lint/internal/go/cache github.com/golangci/golangci-lint/internal/go/mmap github.com/golangci/golangci-lint/internal/go/quoted github.com/golangci/golangci-lint/internal/go/robustio -<<<<<<< HEAD github.com/golangci/golangci-lint/internal/x/tools/analysisflags github.com/golangci/golangci-lint/internal/x/tools/analysisinternal github.com/golangci/golangci-lint/internal/x/tools/diff github.com/golangci/golangci-lint/internal/x/tools/diff/lcs -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/commands github.com/golangci/golangci-lint/pkg/commands/internal github.com/golangci/golangci-lint/pkg/config @@ -1201,14 +978,11 @@ github.com/golangci/golangci-lint/pkg/fsutils github.com/golangci/golangci-lint/pkg/goanalysis github.com/golangci/golangci-lint/pkg/goanalysis/load github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors -<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/goformatters github.com/golangci/golangci-lint/pkg/goformatters/gci github.com/golangci/golangci-lint/pkg/goformatters/gofmt github.com/golangci/golangci-lint/pkg/goformatters/gofumpt github.com/golangci/golangci-lint/pkg/goformatters/goimports -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters github.com/golangci/golangci-lint/pkg/golinters/asasalint github.com/golangci/golangci-lint/pkg/golinters/asciicheck @@ -1233,10 +1007,7 @@ github.com/golangci/golangci-lint/pkg/golinters/errorlint github.com/golangci/golangci-lint/pkg/golinters/exhaustive github.com/golangci/golangci-lint/pkg/golinters/exhaustruct github.com/golangci/golangci-lint/pkg/golinters/exportloopref -<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/golinters/exptostd -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters/fatcontext github.com/golangci/golangci-lint/pkg/golinters/forbidigo github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert @@ -1284,10 +1055,7 @@ github.com/golangci/golangci-lint/pkg/golinters/musttag github.com/golangci/golangci-lint/pkg/golinters/nakedret github.com/golangci/golangci-lint/pkg/golinters/nestif github.com/golangci/golangci-lint/pkg/golinters/nilerr -<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/golinters/nilnesserr -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters/nilnil github.com/golangci/golangci-lint/pkg/golinters/nlreturn github.com/golangci/golangci-lint/pkg/golinters/noctx @@ -1322,10 +1090,7 @@ github.com/golangci/golangci-lint/pkg/golinters/unconvert github.com/golangci/golangci-lint/pkg/golinters/unparam github.com/golangci/golangci-lint/pkg/golinters/unused github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars -<<<<<<< HEAD github.com/golangci/golangci-lint/pkg/golinters/usetesting -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/golangci/golangci-lint/pkg/golinters/varnamelen github.com/golangci/golangci-lint/pkg/golinters/wastedassign github.com/golangci/golangci-lint/pkg/golinters/whitespace @@ -1345,12 +1110,6 @@ github.com/golangci/golangci-lint/pkg/timeutils # github.com/golangci/misspell v0.6.0 ## explicit; go 1.21 github.com/golangci/misspell -<<<<<<< HEAD -======= -# github.com/golangci/modinfo v0.3.4 -## explicit; go 1.21 -github.com/golangci/modinfo ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/golangci/plugin-module-register v0.1.1 ## explicit; go 1.21 github.com/golangci/plugin-module-register/register @@ -1363,13 +1122,8 @@ github.com/golangci/unconvert # github.com/google/addlicense v1.1.1 ## explicit; go 1.13 github.com/google/addlicense -<<<<<<< HEAD # github.com/google/cel-go v0.23.1 ## explicit; go 1.21.1 -======= -# github.com/google/cel-go v0.20.1 -## explicit; go 1.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls @@ -1390,13 +1144,8 @@ github.com/google/cel-go/common/types/traits github.com/google/cel-go/interpreter github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -<<<<<<< HEAD # github.com/google/certificate-transparency-go v1.3.1 ## explicit; go 1.22.0 -======= -# github.com/google/certificate-transparency-go v1.2.1 -## explicit; go 1.21.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/google/certificate-transparency-go github.com/google/certificate-transparency-go/asn1 github.com/google/certificate-transparency-go/gossip/minimal/x509ext @@ -1419,13 +1168,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -<<<<<<< HEAD # github.com/google/go-containerregistry v0.20.3 ## explicit; go 1.23.0 -======= -# github.com/google/go-containerregistry v0.20.2 -## explicit; go 1.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression github.com/google/go-containerregistry/internal/estargz @@ -1487,11 +1231,7 @@ github.com/google/licenseclassifier/stringclassifier github.com/google/licenseclassifier/stringclassifier/internal/pq github.com/google/licenseclassifier/stringclassifier/searchset github.com/google/licenseclassifier/stringclassifier/searchset/tokenizer -<<<<<<< HEAD # github.com/google/s2a-go v0.1.9 -======= -# github.com/google/s2a-go v0.1.8 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback @@ -1524,11 +1264,7 @@ github.com/google/wire ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -<<<<<<< HEAD # github.com/googleapis/gax-go/v2 v2.14.1 -======= -# github.com/googleapis/gax-go/v2 v2.14.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -1536,10 +1272,7 @@ github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal github.com/googleapis/gax-go/v2/internallog -<<<<<<< HEAD github.com/googleapis/gax-go/v2/internallog/grpclog -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/googleapis/gax-go/v2/internallog/internal github.com/googleapis/gax-go/v2/iterator # github.com/gordonklaus/ineffassign v0.1.0 @@ -1562,13 +1295,8 @@ github.com/gostaticanalysis/nilerr ## explicit; go 1.20 github.com/grafeas/grafeas/go/utils/intoto github.com/grafeas/grafeas/proto/v1/grafeas_go_proto -<<<<<<< HEAD # github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 ## explicit; go 1.22.0 -======= -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -1578,12 +1306,9 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp -<<<<<<< HEAD # github.com/hashicorp/go-immutable-radix/v2 v2.1.0 ## explicit; go 1.18 github.com/hashicorp/go-immutable-radix/v2 -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror @@ -1612,13 +1337,10 @@ github.com/hashicorp/go-version ## explicit; go 1.12 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -<<<<<<< HEAD # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/hashicorp/hcl v1.0.1-vault-5 ## explicit; go 1.15 github.com/hashicorp/hcl @@ -1639,16 +1361,10 @@ github.com/hashicorp/vault/api github.com/hexops/gotextdiff github.com/hexops/gotextdiff/myers github.com/hexops/gotextdiff/span -<<<<<<< HEAD -# github.com/in-toto/archivista v0.8.0 -## explicit; go 1.23.0 -github.com/in-toto/archivista/pkg/api -======= -# github.com/in-toto/archivista v0.5.4 => ../archivista +# github.com/in-toto/archivista v0.9.0 ## explicit; go 1.23.0 github.com/in-toto/archivista/pkg/api github.com/in-toto/archivista/pkg/http-client ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/in-toto/attestation v1.1.0 ## explicit; go 1.20 github.com/in-toto/attestation/go/predicates/provenance/v1 @@ -1730,13 +1446,8 @@ github.com/jgautheron/goconst # github.com/jingyugao/rowserrcheck v1.1.1 ## explicit; go 1.13 github.com/jingyugao/rowserrcheck/passes/rowserr -<<<<<<< HEAD # github.com/jjti/go-spancheck v0.6.4 ## explicit; go 1.22.1 -======= -# github.com/jjti/go-spancheck v0.6.2 -## explicit; go 1.20 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/jjti/go-spancheck # github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 ## explicit; go 1.14 @@ -1747,13 +1458,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -<<<<<<< HEAD # github.com/julz/importas v0.2.0 ## explicit; go 1.20 -======= -# github.com/julz/importas v0.1.0 -## explicit; go 1.15 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/julz/importas # github.com/karamaru-alpha/copyloopvar v1.1.0 ## explicit; go 1.21 @@ -1797,7 +1503,6 @@ github.com/kyoh86/exportloopref # github.com/lasiar/canonicalheader v1.1.2 ## explicit; go 1.22.0 github.com/lasiar/canonicalheader -<<<<<<< HEAD # github.com/ldez/exptostd v0.3.1 ## explicit; go 1.22.0 github.com/ldez/exptostd @@ -1813,14 +1518,6 @@ github.com/ldez/tagliatelle # github.com/ldez/usetesting v0.4.2 ## explicit; go 1.22.0 github.com/ldez/usetesting -======= -# github.com/ldez/gomoddirectives v0.2.4 -## explicit; go 1.21 -github.com/ldez/gomoddirectives -# github.com/ldez/tagliatelle v0.5.0 -## explicit; go 1.19 -github.com/ldez/tagliatelle ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/leonklingele/grouper v1.1.2 ## explicit; go 1.18 github.com/leonklingele/grouper/pkg/analyzer @@ -1840,11 +1537,7 @@ github.com/letsencrypt/boulder/strictyaml # github.com/macabu/inamedparam v0.1.3 ## explicit; go 1.20 github.com/macabu/inamedparam -<<<<<<< HEAD # github.com/magiconair/properties v1.8.9 -======= -# github.com/magiconair/properties v1.8.7 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.19 github.com/magiconair/properties # github.com/mailru/easyjson v0.7.7 @@ -1884,11 +1577,7 @@ github.com/miekg/pkcs11 # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -<<<<<<< HEAD # github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c -======= -# github.com/mitchellh/mapstructure v1.5.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.14 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -1925,11 +1614,7 @@ github.com/nishanths/predeclared/passes/predeclared # github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 ## explicit github.com/nozzle/throttler -<<<<<<< HEAD # github.com/nunnatsa/ginkgolinter v0.18.4 -======= -# github.com/nunnatsa/ginkgolinter v0.18.3 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 github.com/nunnatsa/ginkgolinter github.com/nunnatsa/ginkgolinter/internal/expression @@ -2022,13 +1707,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -<<<<<<< HEAD # github.com/prometheus/common v0.62.0 ## explicit; go 1.21 -======= -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -2073,11 +1753,7 @@ github.com/quasilyte/regex/syntax # github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 ## explicit; go 1.17 github.com/quasilyte/stdinfo -<<<<<<< HEAD # github.com/raeperd/recvcheck v0.2.0 -======= -# github.com/raeperd/recvcheck v0.1.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 github.com/raeperd/recvcheck # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 @@ -2086,13 +1762,8 @@ github.com/rcrowley/go-metrics # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -<<<<<<< HEAD # github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a ## explicit; go 1.22.0 -======= -# github.com/rogpeppe/go-internal v1.13.1 -## explicit; go 1.22 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/rogpeppe/go-internal/internal/syscall/windows github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll github.com/rogpeppe/go-internal/lockedfile @@ -2112,7 +1783,6 @@ github.com/sagikazarmark/locafero # github.com/sagikazarmark/slog-shim v0.1.0 ## explicit; go 1.20 github.com/sagikazarmark/slog-shim -<<<<<<< HEAD # github.com/sanposhiho/wastedassign/v2 v2.1.0 ## explicit; go 1.18 github.com/sanposhiho/wastedassign/v2 @@ -2124,19 +1794,6 @@ github.com/santhosh-tekuri/jsonschema/v6/kind ## explicit; go 1.18 github.com/sashamelentyev/interfacebloat/pkg/analyzer # github.com/sashamelentyev/usestdlibvars v1.28.0 -======= -# github.com/sanposhiho/wastedassign/v2 v2.0.7 -## explicit; go 1.14 -github.com/sanposhiho/wastedassign/v2 -# github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 -## explicit; go 1.19 -github.com/santhosh-tekuri/jsonschema/v5 -github.com/santhosh-tekuri/jsonschema/v5/httploader -# github.com/sashamelentyev/interfacebloat v1.1.0 -## explicit; go 1.18 -github.com/sashamelentyev/interfacebloat/pkg/analyzer -# github.com/sashamelentyev/usestdlibvars v1.27.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/sashamelentyev/usestdlibvars/pkg/analyzer github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping @@ -2144,11 +1801,7 @@ github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping ## explicit github.com/sassoftware/relic/lib/pkcs7 github.com/sassoftware/relic/lib/x509tools -<<<<<<< HEAD # github.com/secure-systems-lab/go-securesystemslib v0.9.0 -======= -# github.com/secure-systems-lab/go-securesystemslib v0.8.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse @@ -2173,13 +1826,8 @@ github.com/shazow/go-diff/difflib # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec -<<<<<<< HEAD # github.com/sigstore/cosign/v2 v2.4.2 ## explicit; go 1.23.4 -======= -# github.com/sigstore/cosign/v2 v2.4.1 -## explicit; go 1.22.7 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio github.com/sigstore/cosign/v2/cmd/cosign/cli/options github.com/sigstore/cosign/v2/cmd/cosign/cli/sign/privacy @@ -2219,30 +1867,17 @@ github.com/sigstore/cosign/v2/pkg/providers/google github.com/sigstore/cosign/v2/pkg/providers/spiffe github.com/sigstore/cosign/v2/pkg/signature github.com/sigstore/cosign/v2/pkg/types -<<<<<<< HEAD # github.com/sigstore/fulcio v1.6.6 ## explicit; go 1.23.3 github.com/sigstore/fulcio/pkg/api # github.com/sigstore/protobuf-specs v0.4.0 ## explicit; go 1.22.0 -======= -# github.com/sigstore/fulcio v1.6.3 -## explicit; go 1.22.5 -github.com/sigstore/fulcio/pkg/api -# github.com/sigstore/protobuf-specs v0.3.2 -## explicit; go 1.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1 github.com/sigstore/protobuf-specs/gen/pb-go/common/v1 github.com/sigstore/protobuf-specs/gen/pb-go/dsse github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1 -<<<<<<< HEAD # github.com/sigstore/rekor v1.3.9 ## explicit; go 1.22.0 -======= -# github.com/sigstore/rekor v1.3.7 -## explicit; go 1.23.2 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/rekor/pkg/client github.com/sigstore/rekor/pkg/generated/client github.com/sigstore/rekor/pkg/generated/client/entries @@ -2271,13 +1906,8 @@ github.com/sigstore/rekor/pkg/types/intoto/v0.0.2 github.com/sigstore/rekor/pkg/types/rekord github.com/sigstore/rekor/pkg/types/rekord/v0.0.1 github.com/sigstore/rekor/pkg/util -<<<<<<< HEAD # github.com/sigstore/sigstore v1.8.15 ## explicit; go 1.22.0 -======= -# github.com/sigstore/sigstore v1.8.10 -## explicit; go 1.22.8 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/fulcioroots github.com/sigstore/sigstore/pkg/oauth @@ -2285,7 +1915,6 @@ github.com/sigstore/sigstore/pkg/oauthflow github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/dsse github.com/sigstore/sigstore/pkg/signature/kms -<<<<<<< HEAD github.com/sigstore/sigstore/pkg/signature/kms/cliplugin github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/common github.com/sigstore/sigstore/pkg/signature/kms/cliplugin/encoding @@ -2307,25 +1936,6 @@ github.com/sigstore/sigstore/pkg/signature/kms/gcp github.com/sigstore/sigstore/pkg/signature/kms/hashivault # github.com/sigstore/timestamp-authority v1.2.4 ## explicit; go 1.22.0 -======= -github.com/sigstore/sigstore/pkg/signature/options -github.com/sigstore/sigstore/pkg/signature/payload -github.com/sigstore/sigstore/pkg/tuf -# github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.10 -## explicit; go 1.22.8 -github.com/sigstore/sigstore/pkg/signature/kms/aws -# github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.10 -## explicit; go 1.22.8 -github.com/sigstore/sigstore/pkg/signature/kms/azure -# github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.10 -## explicit; go 1.22.8 -github.com/sigstore/sigstore/pkg/signature/kms/gcp -# github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.10 -## explicit; go 1.22.8 -github.com/sigstore/sigstore/pkg/signature/kms/hashivault -# github.com/sigstore/timestamp-authority v1.2.2 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/sigstore/timestamp-authority/pkg/verification # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 @@ -2364,11 +1974,7 @@ github.com/spf13/cast # github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra -<<<<<<< HEAD # github.com/spf13/pflag v1.0.6 -======= -# github.com/spf13/pflag v1.0.5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.12 github.com/spf13/pflag # github.com/spf13/viper v1.19.0 @@ -2383,13 +1989,8 @@ github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml github.com/spf13/viper/internal/features -<<<<<<< HEAD # github.com/spiffe/go-spiffe/v2 v2.5.0 ## explicit; go 1.22.11 -======= -# github.com/spiffe/go-spiffe/v2 v2.4.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/spiffe/go-spiffe/v2/bundle/jwtbundle github.com/spiffe/go-spiffe/v2/bundle/spiffebundle github.com/spiffe/go-spiffe/v2/bundle/x509bundle @@ -2412,13 +2013,8 @@ github.com/src-d/gcfg/types # github.com/ssgreg/nlreturn/v2 v2.2.1 ## explicit; go 1.13 github.com/ssgreg/nlreturn/v2/pkg/nlreturn -<<<<<<< HEAD # github.com/stbenjam/no-sprintf-host-port v0.2.0 ## explicit; go 1.18 -======= -# github.com/stbenjam/no-sprintf-host-port v0.1.1 -## explicit; go 1.16 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/stbenjam/no-sprintf-host-port/pkg/analyzer # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 @@ -2448,19 +2044,11 @@ github.com/syndtr/goleveldb/leveldb/opt github.com/syndtr/goleveldb/leveldb/storage github.com/syndtr/goleveldb/leveldb/table github.com/syndtr/goleveldb/leveldb/util -<<<<<<< HEAD # github.com/tdakkota/asciicheck v0.3.0 ## explicit; go 1.18 github.com/tdakkota/asciicheck # github.com/tektoncd/pipeline v0.68.0 ## explicit; go 1.22.3 -======= -# github.com/tdakkota/asciicheck v0.2.0 -## explicit; go 1.18 -github.com/tdakkota/asciicheck -# github.com/tektoncd/pipeline v0.66.0 -## explicit; go 1.22 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) github.com/tektoncd/pipeline/internal/artifactref github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/config/resolver @@ -2552,19 +2140,11 @@ github.com/tektoncd/pipeline/pkg/result github.com/tektoncd/pipeline/pkg/spire/config github.com/tektoncd/pipeline/pkg/substitution github.com/tektoncd/pipeline/test -<<<<<<< HEAD # github.com/tektoncd/plumbing v0.0.0-20250115133002-f515628dffea ## explicit; go 1.22 github.com/tektoncd/plumbing github.com/tektoncd/plumbing/scripts # github.com/tetafro/godot v1.4.20 -======= -# github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1 -## explicit; go 1.19 -github.com/tektoncd/plumbing -github.com/tektoncd/plumbing/scripts -# github.com/tetafro/godot v1.4.18 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 github.com/tetafro/godot # github.com/thales-e-security/pool v0.0.2 @@ -2585,11 +2165,7 @@ github.com/theupdateframework/go-tuf/pkg/targets github.com/theupdateframework/go-tuf/sign github.com/theupdateframework/go-tuf/util github.com/theupdateframework/go-tuf/verify -<<<<<<< HEAD # github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 -======= -# github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.12 github.com/timakin/bodyclose/passes/bodyclose # github.com/timonwong/loggercheck v0.10.1 @@ -2606,11 +2182,7 @@ github.com/titanous/rocacheck # github.com/tjfoc/gmsm v1.4.1 ## explicit; go 1.14 github.com/tjfoc/gmsm/sm3 -<<<<<<< HEAD # github.com/tomarrell/wrapcheck/v2 v2.10.0 -======= -# github.com/tomarrell/wrapcheck/v2 v2.9.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.21 github.com/tomarrell/wrapcheck/v2/wrapcheck # github.com/tommy-muehle/go-mnd/v2 v2.5.1 @@ -2624,7 +2196,6 @@ github.com/transparency-dev/merkle github.com/transparency-dev/merkle/compact github.com/transparency-dev/merkle/proof github.com/transparency-dev/merkle/rfc6962 -<<<<<<< HEAD # github.com/ultraware/funlen v0.2.0 ## explicit; go 1.22.0 github.com/ultraware/funlen @@ -2635,39 +2206,17 @@ github.com/ultraware/whitespace ## explicit; go 1.19 github.com/uudashr/gocognit # github.com/uudashr/iface v1.3.0 -======= -# github.com/ultraware/funlen v0.1.0 -## explicit; go 1.20 -github.com/ultraware/funlen -# github.com/ultraware/whitespace v0.1.1 -## explicit; go 1.20 -github.com/ultraware/whitespace -# github.com/uudashr/gocognit v1.1.3 -## explicit; go 1.18 -github.com/uudashr/gocognit -# github.com/uudashr/iface v1.2.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.1 github.com/uudashr/iface/identical github.com/uudashr/iface/internal/directive github.com/uudashr/iface/opaque github.com/uudashr/iface/unused -<<<<<<< HEAD # github.com/vbatts/tar-split v0.11.6 -======= -# github.com/vbatts/tar-split v0.11.5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -<<<<<<< HEAD -======= -# github.com/xanzy/go-gitlab v0.109.0 -## explicit; go 1.19 -github.com/xanzy/go-gitlab ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # github.com/xanzy/ssh-agent v0.3.3 ## explicit; go 1.16 github.com/xanzy/ssh-agent @@ -2697,22 +2246,15 @@ github.com/ykadowak/zerologlint # github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 ## explicit; go 1.17 github.com/youmark/pkcs8 -<<<<<<< HEAD # github.com/zeebo/errs v1.4.0 -======= -# github.com/zeebo/errs v1.3.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.12 github.com/zeebo/errs # gitlab.com/bosi/decorder v0.4.2 ## explicit; go 1.20 gitlab.com/bosi/decorder -<<<<<<< HEAD # gitlab.com/gitlab-org/api/client-go v0.121.0 ## explicit; go 1.22 gitlab.com/gitlab-org/api/client-go -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # go-simpler.org/musttag v0.13.0 ## explicit; go 1.20 go-simpler.org/musttag @@ -2789,7 +2331,6 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -<<<<<<< HEAD # go.opentelemetry.io/auto/sdk v1.1.0 ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk @@ -2803,28 +2344,12 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 ## explicit; go 1.22.0 -======= -# go.opentelemetry.io/contrib/detectors/gcp v1.29.0 -## explicit; go 1.21 -go.opentelemetry.io/contrib/detectors/gcp -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 -## explicit; go 1.21 -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -<<<<<<< HEAD # go.opentelemetry.io/otel v1.34.0 ## explicit; go 1.22.0 -======= -# go.opentelemetry.io/otel v1.29.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -2840,7 +2365,6 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 -<<<<<<< HEAD # go.opentelemetry.io/otel/metric v1.34.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/metric @@ -2848,20 +2372,10 @@ go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop # go.opentelemetry.io/otel/sdk v1.34.0 ## explicit; go 1.22.0 -======= -# go.opentelemetry.io/otel/metric v1.29.0 -## explicit; go 1.21 -go.opentelemetry.io/otel/metric -go.opentelemetry.io/otel/metric/embedded -go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.29.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource -<<<<<<< HEAD # go.opentelemetry.io/otel/sdk/metric v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/sdk/metric @@ -2875,32 +2389,6 @@ go.opentelemetry.io/otel/sdk/metric/metricdata go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -======= -# go.opentelemetry.io/otel/sdk/metric v1.29.0 -## explicit; go 1.21 -go.opentelemetry.io/otel/sdk/metric -go.opentelemetry.io/otel/sdk/metric/internal -go.opentelemetry.io/otel/sdk/metric/internal/aggregate -go.opentelemetry.io/otel/sdk/metric/internal/exemplar -go.opentelemetry.io/otel/sdk/metric/internal/x -go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.29.0 -## explicit; go 1.21 -go.opentelemetry.io/otel/trace -go.opentelemetry.io/otel/trace/embedded -# go.step.sm/crypto v0.54.2 -## explicit; go 1.22 -go.step.sm/crypto/fingerprint -go.step.sm/crypto/internal/bcrypt_pbkdf -go.step.sm/crypto/internal/emoji -go.step.sm/crypto/internal/utils -go.step.sm/crypto/internal/utils/utfbom -go.step.sm/crypto/jose -go.step.sm/crypto/keyutil -go.step.sm/crypto/pemutil -go.step.sm/crypto/randutil -go.step.sm/crypto/x25519 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # go.uber.org/automaxprocs v1.6.0 ## explicit; go 1.20 go.uber.org/automaxprocs/internal/cgroups @@ -2948,11 +2436,7 @@ gocloud.dev/docstore/mongodocstore # gocloud.dev/pubsub/kafkapubsub v0.40.0 ## explicit; go 1.21.0 gocloud.dev/pubsub/kafkapubsub -<<<<<<< HEAD # golang.org/x/crypto v0.32.0 -======= -# golang.org/x/crypto v0.31.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -2988,11 +2472,7 @@ golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/ssh/terminal -<<<<<<< HEAD # golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f -======= -# golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -3011,11 +2491,7 @@ golang.org/x/mod/module golang.org/x/mod/semver golang.org/x/mod/sumdb/dirhash golang.org/x/mod/sumdb/note -<<<<<<< HEAD # golang.org/x/net v0.34.0 -======= -# golang.org/x/net v0.33.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -3028,11 +2504,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -<<<<<<< HEAD # golang.org/x/oauth2 v0.26.0 -======= -# golang.org/x/oauth2 v0.24.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -3056,11 +2528,7 @@ golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -<<<<<<< HEAD # golang.org/x/term v0.28.0 -======= -# golang.org/x/term v0.27.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.21.0 @@ -3069,7 +2537,6 @@ golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/unicode -<<<<<<< HEAD golang.org/x/text/feature/plural golang.org/x/text/internal golang.org/x/text/internal/catmsg @@ -3083,26 +2550,16 @@ golang.org/x/text/internal/utf8internal golang.org/x/text/language golang.org/x/text/message golang.org/x/text/message/catalog -======= -golang.org/x/text/internal/utf8internal ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -<<<<<<< HEAD # golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.29.0 -======= -# golang.org/x/time v0.8.0 -## explicit; go 1.18 -golang.org/x/time/rate -# golang.org/x/tools v0.27.0 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/appends @@ -3141,10 +2598,7 @@ golang.org/x/tools/go/analysis/passes/sigchanyzer golang.org/x/tools/go/analysis/passes/slog golang.org/x/tools/go/analysis/passes/sortslice golang.org/x/tools/go/analysis/passes/stdmethods -<<<<<<< HEAD golang.org/x/tools/go/analysis/passes/stdversion -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/tools/go/analysis/passes/stringintconv golang.org/x/tools/go/analysis/passes/structtag golang.org/x/tools/go/analysis/passes/testinggoroutine @@ -3155,10 +2609,7 @@ golang.org/x/tools/go/analysis/passes/unreachable golang.org/x/tools/go/analysis/passes/unsafeptr golang.org/x/tools/go/analysis/passes/unusedresult golang.org/x/tools/go/analysis/passes/unusedwrite -<<<<<<< HEAD golang.org/x/tools/go/analysis/passes/waitgroup -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/buildutil @@ -3182,10 +2633,7 @@ golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports -<<<<<<< HEAD golang.org/x/tools/internal/modindex -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib @@ -3199,13 +2647,8 @@ golang.org/x/xerrors/internal # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -<<<<<<< HEAD # google.golang.org/api v0.219.0 ## explicit; go 1.22 -======= -# google.golang.org/api v0.210.0 -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -3224,10 +2667,6 @@ google.golang.org/api/support/bundler google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http -<<<<<<< HEAD -======= -google.golang.org/api/transport/http/internal/propagation ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 ## explicit; go 1.21 google.golang.org/genproto/googleapis/cloud/location @@ -3236,13 +2675,8 @@ google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/googleapis/type/latlng google.golang.org/genproto/protobuf/field_mask -<<<<<<< HEAD # google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f ## explicit; go 1.22 -======= -# google.golang.org/genproto/googleapis/api v0.0.0-20241113202542-65e8d215514f -## explicit; go 1.21 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution @@ -3251,21 +2685,12 @@ google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -<<<<<<< HEAD # google.golang.org/genproto/googleapis/rpc v0.0.0-20250124145028-65684f501c47 ## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.70.0 -======= -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 -## explicit; go 1.21 -google.golang.org/genproto/googleapis/rpc/code -google.golang.org/genproto/googleapis/rpc/errdetails -google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.68.1 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes @@ -3274,10 +2699,7 @@ google.golang.org/grpc/authz/audit/stdout google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -<<<<<<< HEAD google.golang.org/grpc/balancer/endpointsharding -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state @@ -3368,11 +2790,8 @@ google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats -<<<<<<< HEAD google.golang.org/grpc/stats/opentelemetry google.golang.org/grpc/stats/opentelemetry/internal -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/xds @@ -3403,24 +2822,13 @@ google.golang.org/grpc/xds/internal/xdsclient google.golang.org/grpc/xds/internal/xdsclient/internal google.golang.org/grpc/xds/internal/xdsclient/load google.golang.org/grpc/xds/internal/xdsclient/transport -<<<<<<< HEAD google.golang.org/grpc/xds/internal/xdsclient/transport/ads google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport google.golang.org/grpc/xds/internal/xdsclient/transport/lrs -======= -google.golang.org/grpc/xds/internal/xdsclient/transport/internal ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter google.golang.org/grpc/xds/internal/xdsclient/xdsresource google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version -<<<<<<< HEAD -======= -# google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a -## explicit; go 1.21 -google.golang.org/grpc/stats/opentelemetry -google.golang.org/grpc/stats/opentelemetry/internal ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) # google.golang.org/protobuf v1.36.4 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim @@ -4297,11 +3705,7 @@ mvdan.cc/unparam/check ## explicit; go 1.21 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -<<<<<<< HEAD # sigs.k8s.io/release-utils v0.11.0 -======= -# sigs.k8s.io/release-utils v0.8.5 ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) ## explicit; go 1.23 sigs.k8s.io/release-utils/version # sigs.k8s.io/structured-merge-diff/v4 v4.4.2 @@ -4315,10 +3719,5 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 -<<<<<<< HEAD # github.com/alibabacloud-go/cr-20160607 => github.com/vdemeester/cr-20160607 v1.0.1 # github.com/golang/glog => github.com/jdolitsky/glog v0.0.0-20220729172235-78744e90d087 -======= -# github.com/golang/glog => github.com/jdolitsky/glog v0.0.0-20220729172235-78744e90d087 -# github.com/in-toto/archivista => ../archivista ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) diff --git a/vendor/sigs.k8s.io/release-utils/version/command.go b/vendor/sigs.k8s.io/release-utils/version/command.go index 0f1068eafd..0e26ea78b2 100644 --- a/vendor/sigs.k8s.io/release-utils/version/command.go +++ b/vendor/sigs.k8s.io/release-utils/version/command.go @@ -44,10 +44,7 @@ func WithFont(fontName string) *cobra.Command { func version(fontName string) *cobra.Command { var outputJSON bool -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) cmd := &cobra.Command{ Use: "version", Short: "Prints the version", @@ -71,10 +68,7 @@ func version(fontName string) *cobra.Command { } else { cmd.Println(v.String()) } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return nil }, } diff --git a/vendor/sigs.k8s.io/release-utils/version/version.go b/vendor/sigs.k8s.io/release-utils/version/version.go index 847dd1e120..c1a6907b32 100644 --- a/vendor/sigs.k8s.io/release-utils/version/version.go +++ b/vendor/sigs.k8s.io/release-utils/version/version.go @@ -79,10 +79,7 @@ func getBuildInfo() *debug.BuildInfo { if !ok { return nil } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return bi } @@ -108,34 +105,22 @@ func getDirty(bi *debug.BuildInfo) string { if modified == "true" { return "dirty" } -<<<<<<< HEAD if modified == "false" { return "clean" } -======= - if modified == "false" { - return "clean" - } ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return unknown } func getBuildDate(bi *debug.BuildInfo) string { buildTime := getKey(bi, "vcs.time") -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) t, err := time.Parse("2006-01-02T15:04:05Z", buildTime) if err != nil { return unknown } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return t.Format("2006-01-02T15:04:05") } @@ -143,19 +128,13 @@ func getKey(bi *debug.BuildInfo, key string) string { if bi == nil { return unknown } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) for _, iter := range bi.Settings { if iter.Key == key { return iter.Value } } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return unknown } @@ -164,10 +143,7 @@ func GetVersionInfo() Info { once.Do(func() { buildInfo := getBuildInfo() gitVersion = getGitVersion(buildInfo) -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) if gitCommit == unknown { gitCommit = getCommit(buildInfo) } @@ -218,18 +194,12 @@ func (i *Info) String() string { f := figure.NewFigure(strings.ToUpper(i.Name), i.FontName, true) _, _ = fmt.Fprint(w, f.String()) } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, _ = fmt.Fprint(w, i.Name) if i.Description != "" { _, _ = fmt.Fprintf(w, ": %s", i.Description) } -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) _, _ = fmt.Fprint(w, "\n\n") } @@ -242,10 +212,7 @@ func (i *Info) String() string { _, _ = fmt.Fprintf(w, "Platform:\t%s\n", i.Platform) _ = w.Flush() -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return b.String() } @@ -269,9 +236,6 @@ func (i *Info) CheckFontName(fontName string) bool { } fmt.Fprintln(os.Stderr, "font not valid, using default") -<<<<<<< HEAD -======= ->>>>>>> 70e0318b1 ([WIP] add archivista storage backend) return false }